max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
export.py | fxkr/snmp-tftp-config | 3 | 12758551 | <reponame>fxkr/snmp-tftp-config
#!/usr/bin/env python2
import gevent.monkey; gevent.monkey.patch_all()
import argparse
import logging
import logging.config
import os
import random
import socket
import string
import StringIO
import struct
import sys
import gevent.socket
import gevent.server
import gevent.event
import pysnmp.entity.rfc3413.oneliner.cmdgen as pysnmp
import pysnmp.proto.rfc1902 as pysnmp_types
import yaml
__version__ = "0.1.0"
class TftpFile(object):
def __init__(self, name, buffer_io):
self.name = name
self.is_done = False
self.event = gevent.event.Event()
self.buffer_io = buffer_io
def done(self):
self.is_done = True
self.event.set()
def wait(self):
self.event.wait()
class TftpReceiveFile(TftpFile):
def read(self):
self.wait()
return self.buffer_io.getvalue()
class TftpConnection(gevent.server.DatagramServer):
BLOCK_SIZE = 512
RRQ_OP = 1 # Read request
WRQ_OP = 2 # Write request
DATA_OP = 3
ACK_OP = 4
ERR_OP = 5
def __init__(self, socket, remote_addr, file_obj):
super(TftpConnection, self).__init__(socket)
self.socket = socket
self.remote_addr = remote_addr
self.file_obj = file_obj
self.previous_packet = None
def send(self, raw_data, may_retransmit=False):
self.socket.sendto(raw_data, self.remote_addr)
self.previous_packet = raw_data if may_retransmit else None
def retransmit(self):
if self.previous_packet is not None:
self.send(self.previous_packet)
class TftpSendConnection(TftpConnection):
def __init__(self, socket, remote_addr, file_obj):
super(TftpSendConnection, self).__init__(socket, remote_addr, file_obj)
self.block_num = 0
def handle(self, data, address):
# Common header
buf = buffer(data)
opcode = struct.unpack("!h", buf[:2])[0]
# Acknowledgement?
if opcode == TftpConnection.ACK_OP:
block = struct.unpack("!h", buf[2:4])[0]
if block != self.block_num:
raise Exception("wrong ack, expected %i, got %i" % (self.block_num, block))
if self.file_obj.is_done:
return
self._send_data()
# Error?
elif opcode == TftpConnection.ERR_OP:
err_num, err_text = struct.unpack("!h", buf[2:4])[0], buf[4:-1]
print err_num, repr(err_text)
self.retransmit()
def start(self):
super(TftpSendConnection, self).start()
self.send_data()
def send_data(self):
data = self.read_file.read(TftpConnection.BLOCK_SIZE)
self.last_packet = struct.pack("!hh", TftpConnection.DATA_OP, self.block_num)
self.send(self.last_packet)
if len(data) < TftpConnection.BLOCK_SIZE:
self.file_obj.done()
class TftpReceiveConnection(TftpConnection):
def __init__(self, socket, remote_addr, file_obj):
super(TftpReceiveConnection, self).__init__(socket, remote_addr, file_obj)
self.block_num = 0
self.previous_packet = None
def start(self):
super(TftpReceiveConnection, self).start()
self.send_ack()
def handle(self, data, address):
# Common header
buf = buffer(data)
opcode = struct.unpack("!h", buf[:2])[0]
# Data transfer?
if opcode == TftpConnection.DATA_OP:
block, data = struct.unpack("!h", buf[2:4])[0], buf[4:]
if not self.file_obj:
raise Exception("still waiting for first packet")
if block != self.block_num:
raise Exception("wrong block, expected %i, got %i" % (self.block_num, block))
self.file_obj.buffer_io.write(data)
if TftpConnection.BLOCK_SIZE != len(data):
self.file_obj.done()
self.send_ack()
# Error?
elif opcode == TftpConnection.ERR_OP:
err_num, err_text = struct.unpack("!h", buf[2:4])[0], buf[4:-1]
print err_num, repr(err_text)
self.retransmit()
def send_data(self):
data = self.read_file.read(TftpConnection.BLOCK_SIZE)
self.file_obj.is_done = len(data) < TftpConnection.BLOCK_SIZE
self.last_packet = struct.pack("!hh", TftpConnection.DATA_OP, self.block_num)
self.send(self.last_packet)
if self.file_obj.is_done:
self.file_obj.done()
def send_ack(self):
self.previous_packet = struct.pack("!hh", TftpConnection.ACK_OP, self.block_num)
self.send(self.previous_packet)
self.block_num += 1
class TftpServer(gevent.server.DatagramServer):
def __init__(self, interface):
super(TftpServer, self).__init__(interface)
self.sendable = {}
self.receivable = {}
def receive(self, filename):
if filename in self.receivable:
raise Exception('already receiving file: "%s"')
file_obj = TftpReceiveFile(filename, StringIO.StringIO())
self.receivable[filename] = file_obj
return file_obj
def send(self, filename, content):
if filename in self.sendable:
raise Exception('already sending file: "%s"')
file_obj = TftpFile(filename, StringIO.StringIO(content))
self.sendable[filename] = file_obj
return file_obj
def handle(self, data, address):
# Common header
buf = buffer(data)
opcode = struct.unpack("!h", buf[:2])[0]
# Read/write request?
if opcode == TftpConnection.RRQ_OP:
filename, mode, _ = string.split(data[2:], "\0")
if filename not in self.sendable:
raise Exception("invalid filename: %s" % filename)
file_obj = self.sendable[filename]
new_socket = gevent.socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
TftpSendConnection(new_socket, address, file_obj).start()
del self.sendable[filename]
elif opcode == TftpConnection.WRQ_OP:
filename, mode, _ = string.split(data[2:], "\0")
if filename not in self.receivable:
raise Exception("invalid filename: %s" % filename)
file_obj = self.receivable[filename]
new_socket = gevent.socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
TftpReceiveConnection(new_socket, address, file_obj).start()
del self.receivable[filename]
def main():
# Configure logging
for path in ('logging.yml', 'logging.default.yml'):
if not os.path.isfile(path):
continue
with open(path, 'rt') as file:
config = yaml.load(file)
logging.config.dictConfig(config)
# Parse arguments
par = argparse.ArgumentParser(
description='Export network device configuration via SNMP')
par.add_argument('-V', '--version', action='version', version=__version__)
par.add_argument('-c', '--community', default='public')
par.add_argument('--debug-local-port', dest='local_port', default=69, type=int)
par.add_argument('--debug-remote-port', dest='remote_port', default=161, type=int)
par.add_argument('--debug-filename', dest='filename', default=None, type=int)
par.add_argument('--debug-no-trigger', dest='no_trigger', action="store_true", default=False)
par.add_argument('local_addr')
par.add_argument('remote_addr')
args = par.parse_args()
# Determine random filename
if args.filename is None:
charset = (string.ascii_lowercase + string.digits)[:32]
assert 256 % len(charset) == 0 # even distribution
filename = "".join(charset[ord(x) % len(charset)] for x in os.urandom(16))
else:
filename = args.filename
# Start server
server = TftpServer((args.local_addr, args.local_port))
server.start()
file_obj = server.receive(filename)
# Tell switch to start upload
if not args.no_trigger:
i = random.randint(100000, 999999)
snmp = pysnmp.CommandGenerator()
community = pysnmp.CommunityData(args.community)
target = pysnmp.UdpTransportTarget((args.remote_addr, args.remote_port))
errIndication, errStatus, errIndex, varBinds = snmp.setCmd(community, target,
("1.3.6.1.4.192.168.127.12.1.1.1.1.2.%i" % i, pysnmp_types.Integer(1)),
("1.3.6.1.4.192.168.127.12.1.1.1.1.3.%i" % i, pysnmp_types.Integer(4)),
("1.3.6.1.4.192.168.127.12.1.1.1.1.4.%i" % i, pysnmp_types.Integer(1)),
("1.3.6.1.4.192.168.127.12.1.1.1.1.5.%i" % i, pysnmp_types.IpAddress(args.local_addr)),
("1.3.6.1.4.192.168.127.12.1.1.1.1.6.%i" % i, pysnmp_types.OctetString(filename)))
errIndication, errStatus, errIndex, varBinds = snmp.setCmd(community, target,
("1.3.6.1.4.1.9.9.96.1.1.1.1.14.%i" % i, pysnmp_types.Integer(1)))
else:
print("filename: %s" % filename)
# Wait for upload to finish
print file_obj.read()
if __name__ == '__main__':
main()
| 2.203125 | 2 |
cell_localization/models/unet/EDSR.py | ver228/cell_localization | 1 | 12758552 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 14 20:46:30 2019
@author: avelinojaver
#based on https://github.com/thstkdgus35/EDSR-PyTorch/
"""
from torch import nn
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(in_channels,
out_channels,
kernel_size,
padding=(kernel_size//2),
bias=bias)
class BasicBlock(nn.Sequential):
def __init__(self,
conv,
in_channels,
out_channels,
kernel_size,
stride=1,
bias=False,
bn=True,
act=nn.ReLU(inplace = True)
):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(self,
conv,
n_feats,
kernel_size,
bias=True,
bn=False,
act=nn.ReLU(inplace = True),
res_scale=1
):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
# 16, 64, 1.
# 32, 256, 0.1
class EDSRBody(nn.Module):
def __init__(self,
n_inputs = 1,
n_outputs = 1,
n_resblocks = 16,
n_feats = 64,
res_scale = 1.
):
super(EDSRBody, self).__init__()
self.n_inputs = n_inputs
self.n_outputs = n_outputs
kernel_size = 3
act = nn.ReLU(True)
# define head module
m_head = [default_conv(n_inputs, n_feats, kernel_size)]
# define body module
m_body = [
ResBlock(
default_conv, n_feats, kernel_size, act=act, res_scale=res_scale
) for _ in range(n_resblocks)
]
m_body.append(default_conv(n_feats, n_feats, kernel_size))
# define tail module
m_tail = [default_conv(n_feats, n_outputs, kernel_size)]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x | 2.46875 | 2 |
python_full/HelloWorld/2.6 stringmethods.py | marcmanley/mosh | 0 | 12758553 | <filename>python_full/HelloWorld/2.6 stringmethods.py
# functions
# i.e., len() where the () designate a function
# functions that are related to str
course = "python programming"
# here we have a kind of function called a "method" which
# comes after a str and designated by a "."
# in Py all everything is an object
# and objects have "functions"
# and "functions" have "methods"
print(course.upper())
print(course)
print(course.capitalize())
print(course.istitle())
print(course.title())
# you can make a new var/str based off of a method applied to another str/var
upper_course = course.upper()
print(upper_course)
lower_course = upper_course.lower()
print(lower_course)
# striping white space
unstriped_course = " The unstriped Python Course"
print(unstriped_course)
striped_course = unstriped_course.strip()
print(striped_course)
# there's also .lstrip and .rstrip for removing text either from l or r
# how to find the index of a character(s)
print(course.find("ra"))
# in this case, "ra" is at the 11 index within the str
# replacing
print(course.replace("python", "Our new Python"),
(course.replace("programming", "Programming Course")))
# in and not in
print(course)
print("py" in course)
# this is true because "py" is in "python"
print("meat balls" not in course)
# this is also true because "meatballs" are not in the str 'course'
| 4.625 | 5 |
policies.py | microsoft/multi-concept-sample | 1 | 12758554 | """
Fixed policies to test our sim integration with. These are intended to take
Brain states and return Brain actions.
"""
import random
def random_policy(state):
"""
Ignore the state, select randomly.
"""
action = {
'command': random.randint(1, 2)
}
return action
def coast(state):
"""
Ignore the state, always select one exported brain.
"""
action = {
'command': 1
}
return action
POLICIES = {"random": random_policy,
"coast": coast} | 2.46875 | 2 |
allennlp/training/metrics/categorical_accuracy.py | unendin/allennlp | 1 | 12758555 | <gh_stars>1-10
from typing import Optional
from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
@Metric.register("categorical_accuracy")
class CategoricalAccuracy(Metric):
"""
Categorical Top-K accuracy. Assumes integer labels, with
each item to be classified having a single correct class.
"""
def __init__(self, top_k: int = 1) -> None:
self._top_k = top_k
self.correct_count = 0.
self.total_count = 0.
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the ``predictions`` tensor without the ``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
# Get the data from the Variables.
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
# Some sanity checks.
num_classes = predictions.size(-1)
if gold_labels.dim() != predictions.dim() - 1:
raise ConfigurationError("gold_labels must have dimension == predictions.size() - 1 but "
"found tensor of shape: {}".format(predictions.size()))
if (gold_labels >= num_classes).any():
raise ConfigurationError("A gold label passed to Categorical Accuracy contains an id >= {}, "
"the number of classes.".format(num_classes))
# Top K indexes of the predictions (or fewer, if there aren't K of them).
# Special case topk == 1, because it's common and .max() is much faster than .topk().
if self._top_k == 1:
top_k = predictions.max(-1)[1].unsqueeze(-1)
else:
top_k = predictions.topk(min(self._top_k, predictions.shape[-1]), -1)[1]
# This is of shape (batch_size, ..., top_k).
correct = top_k.eq(gold_labels.long().unsqueeze(-1)).float()
if mask is not None:
correct *= mask.float().unsqueeze(-1)
self.total_count += mask.sum()
else:
self.total_count += gold_labels.numel()
self.correct_count += correct.sum()
def get_metric(self, reset: bool = False):
"""
Returns
-------
The accumulated accuracy.
"""
accuracy = float(self.correct_count) / float(self.total_count)
if reset:
self.reset()
return accuracy
@overrides
def reset(self):
self.correct_count = 0.0
self.total_count = 0.0
| 2.5625 | 3 |
wapyce/accessibility/models.py | wapyce/wapyce | 0 | 12758556 | <filename>wapyce/accessibility/models.py
"""
Models of accessibility app.
"""
from django.db import models
from django.utils.translation import gettext as _
from wapyce.core.models import CoreModel
from wapyce.validation.models import Page
# Create your models here.
class IssueCode(models.Model):
"""
The IssueCode class is a model that represents a code of a issue of page.
"""
code = models.CharField(
max_length=100,
unique=True,
verbose_name=_('Issue code')
)
class Meta:
"""
Metadata class of issue code model.
"""
verbose_name = _('Issue code')
def __str__(self):
return self.code
class IssuePage(CoreModel):
"""
The IssuePage class is a model that represents a issue of page.
"""
ERROR = 1
WARNING = 2
NOTICE = 3
TYPE_CHOICES = (
(ERROR, _('Error')),
(WARNING, _('Warning')),
(NOTICE, _('Notice')),
)
page = models.ForeignKey(
Page,
on_delete=models.PROTECT,
verbose_name=_('Page URL')
)
code = models.ForeignKey(
IssueCode,
on_delete=models.PROTECT,
verbose_name=_('Issue code')
)
context = models.CharField(
max_length=255,
verbose_name=_('Context'),
null=True,
blank=True,
)
message = models.CharField(max_length=255, verbose_name=_('Message'))
selector = models.CharField(
max_length=200,
verbose_name=_('Selector'),
blank=True,
)
issue_type = models.IntegerField(
choices=TYPE_CHOICES,
verbose_name=_('Type')
)
class Meta:
"""
Metadata class of issue model.
"""
verbose_name = _('Issue of page')
| 2.40625 | 2 |
server/webapp/extensions.py | ninjha01/scrobble | 0 | 12758557 | import os
import shelve
from typing import Dict, List, Optional, Set, Tuple
from flask import current_app
from google.cloud import datastore
class DatastoreAdapter:
@property
def ds_client(self):
if not hasattr(current_app, "_datastore_client"):
config = current_app.config
current_app._datastore_client = datastore.Client(
project=config.get("DATASTORE_PROJECT"),
credentials=config.get("GCP_CREDENTIALS"),
)
return current_app._datastore_client
db = DatastoreAdapter()
class CredentialsAdapter:
@property
def credentials(self):
credentials = current_app.config.get("GCP_CREDENTIALS")
assert credentials is not None
return credentials
gcp = CredentialsAdapter()
| 2.703125 | 3 |
src/subscriber.py | vitvakatu/skelevisors | 4 | 12758558 | <filename>src/subscriber.py
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from os import system
import tf2_msgs.msg
import roslib
import cmath as math
import geometry_msgs.msg as msgs
import turtlesim.srv
from darwin_gazebo.darwin import Darwin
from tf.transformations import euler_from_quaternion
shoulder_coord_l = (0.0, 0.0, 0.0)
hand_coord_l = (0.0, 0.0, 0.0)
elbow_coord_l = (0.0, 0.0, 0.0)
shoulder_coord_r = (0.0, 0.0, 0.0)
hand_coord_r = (0.0, 0.0, 0.0)
elbow_coord_r = (0.0, 0.0, 0.0)
head = (0.0, 0.0, 0.0, 0.0)
def sub(s, f):
return (f[0] - s[0], f[1] - s[1], f[2] - s[2])
def length(v):
return math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)
def angle(v):
x = v[0] * 100
y = v[1] * 100
z = v[2] * 100
phi = math.atan(z / math.sqrt(x ** 2 + y ** 2))
theta = math.acos(x / math.sqrt(y ** 2 + x ** 2))
return (phi.real, theta.real)
def angle_vectors(v1, v2):
up = v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]
down = math.sqrt(v1[0] ** 2 + v1[1] ** 2 + v1[2] ** 2)
down = down * math.sqrt(v2[0] ** 2 + v2[1] ** 2 + v2[2] ** 2)
return math.acos(up / down).real
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
def callback(data, darwin):
global hand_coord_l
global shoulder_coord_l
global elbow_coord_l
global hand_coord_r
global shoulder_coord_r
global elbow_coord_r
global head
for tr in data.transforms:
if tr.child_frame_id.startswith('left_hand'):
translation = tr.transform.translation
hand_coord_l = (translation.y, translation.x, translation.z)
if tr.child_frame_id.startswith('left_shoulder'):
translation = tr.transform.translation
shoulder_coord_l = (translation.y, translation.x, translation.z)
if tr.child_frame_id.startswith('left_elbow'):
translation = tr.transform.translation
elbow_coord_l = (translation.y, translation.x, translation.z)
if tr.child_frame_id.startswith('right_hand'):
translation = tr.transform.translation
hand_coord_r = (translation.y, translation.x, translation.z)
if tr.child_frame_id.startswith('right_shoulder'):
translation = tr.transform.translation
shoulder_coord_r = (translation.y, translation.x, translation.z)
if tr.child_frame_id.startswith('right_elbow'):
translation = tr.transform.translation
elbow_coord_r = (translation.y, translation.x, translation.z)
if tr.child_frame_id.startswith('head'):
rotation = tr.transform.rotation
head = euler_from_quaternion((rotation.w, rotation.x, rotation.y, rotation.z))
relative_elbow_l = sub(shoulder_coord_l, elbow_coord_l)
relative_hand_l = sub(shoulder_coord_l, hand_coord_l)
relative_elbow_r = sub(shoulder_coord_r, elbow_coord_r)
relative_hand_r = sub(shoulder_coord_r, hand_coord_r)
(phi_elbow, theta_elbow) = angle(relative_elbow_l)
elbow_to_hand_l = sub(relative_elbow_l, relative_hand_l)
angle_elbow_to_hand_l = angle_vectors(elbow_to_hand_l, relative_elbow_l)
elbow_to_hand_r = sub(relative_elbow_r, relative_hand_r)
angle_elbow_to_hand_r = angle_vectors(elbow_to_hand_r, relative_elbow_r)
print('Rel.elbow ', relative_elbow_l)
print('Rel.hand ', relative_hand_l)
print('El 2 hand ', elbow_to_hand_l)
print('Angle2hand ', angle_elbow_to_hand_l)
print('Phi ', phi_elbow)
print('Theta ', theta_elbow)
print('HEAD: ', head)
elbow_to_axis_z_l = math.acos(relative_elbow_l[2] / math.sqrt(relative_elbow_l[0] ** 2 + relative_elbow_l[1] ** 2 + relative_elbow_l[2] ** 2)).real -0.25
elbow_to_axis_y_l = math.acos(relative_elbow_l[0] / math.sqrt(relative_elbow_l[0] ** 2 + relative_elbow_l[1] ** 2 + relative_elbow_l[2] ** 2)).real
if relative_elbow_l[2] < 0:
elbow_to_axis_y_l = -elbow_to_axis_y_l
elbow_to_axis_z_r = math.acos(relative_elbow_r[2] / math.sqrt(relative_elbow_r[0] ** 2 + relative_elbow_r[1] ** 2 + relative_elbow_r[2] ** 2)).real +0.25
elbow_to_axis_y_r = math.acos(relative_elbow_r[0] / math.sqrt(relative_elbow_r[0] ** 2 + relative_elbow_r[1] ** 2 + relative_elbow_r[2] ** 2)).real
if relative_elbow_r[2] < 0:
elbow_to_axis_y_r = -elbow_to_axis_y_r
print('ELBOW_TO_Z: ', elbow_to_axis_z_l)
print('ELBOW_TO_Y: ', elbow_to_axis_y_l)
shoulder_l = (3.14 - elbow_to_axis_y_l)
high_arm_l = 1.57 - elbow_to_axis_z_l
shoulder_r = -(3.14 - elbow_to_axis_y_r)
high_arm_r = 1.57 - elbow_to_axis_z_r
print('Shoulder L: ', shoulder_l)
print('HighArm L: ', high_arm_l)
print('Shoulder R: ', shoulder_r)
print('HighArm R: ', high_arm_r)
print(' ')
darwin.set_angles({"j_shoulder_l": shoulder_l})
darwin.set_angles({"j_high_arm_l": high_arm_l})
darwin.set_angles({"j_shoulder_r": shoulder_r})
darwin.set_angles({"j_high_arm_r": high_arm_r})
darwin.set_angles({"j_tilt": translate(head[2], 1.5, 2.0, -1, 1)})
darwin.set_angles({"j_pan": translate(head[1], -0.1, 0.1, -1, 1)})
if angle_elbow_to_hand_l > 1.75:
darwin.set_angles({"j_low_arm_l": 1.5})
elif angle_elbow_to_hand_l < 0.25:
darwin.set_angles({"j_low_arm_l": 0})
else:
darwin.set_angles({"j_low_arm_l": (angle_elbow_to_hand_l - 0.25)})
if angle_elbow_to_hand_r > 1.75:
darwin.set_angles({"j_low_arm_r": -1.5})
elif angle_elbow_to_hand_r < 0.25:
darwin.set_angles({"j_low_arm_r": 0})
else:
darwin.set_angles({"j_low_arm_r": -(angle_elbow_to_hand_r - 0.25)})
if __name__ == '__main__':
rospy.init_node("walker_demo", anonymous=True)
print('Hello')
darwin = Darwin()
rospy.loginfo("Darwin initialization finished")
print("Hello")
rospy.Subscriber('tf', tf2_msgs.msg.TFMessage, callback, darwin)
rospy.spin()
| 2.3125 | 2 |
hard-gists/2c7b95beed642248487a/snippet.py | jjhenkel/dockerizeme | 21 | 12758559 | <filename>hard-gists/2c7b95beed642248487a/snippet.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple script illustrating how to perform embarrassingly parallel computations
in Python using MPI/mpi4py. I like this approach a lot as its very easy to get
right without having to deal with the complications arising from forked
processes which the multiprocessing module uses.
This script can be executed with or without `mpirun`; it will just run on one
core if not executed with it. With some more logic its also possible to make
MPI/mpi4py completely optional.
Run with (making sure MPI and mpi4py are installed):
$ mpirun -n X python embarrassingly_parallel.py
where X is the number of processes you want to run this on.
The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from mpi4py import MPI
# Use default communicator. No need to complicate things.
COMM = MPI.COMM_WORLD
def split(container, count):
"""
Simple function splitting a container into equal length chunks.
Order is not preserved but this is potentially an advantage depending on
the use case.
"""
return [container[_i::count] for _i in range(count)]
# Collect whatever has to be done in a list. Here we'll just collect a list of
# numbers. Only the first rank has to do this.
if COMM.rank == 0:
jobs = list(range(100))
# Split into however many cores are available.
jobs = split(jobs, COMM.size)
else:
jobs = None
# Scatter jobs across cores.
jobs = COMM.scatter(jobs, root=0)
# Now each rank just does its jobs and collects everything in a results list.
# Make sure to not use super big objects in there as they will be pickled to be
# exchanged over MPI.
results = []
for job in jobs:
# Do something meaningful here...
results.append(job ** 2)
# Gather results on rank 0.
results = MPI.COMM_WORLD.gather(results, root=0)
if COMM.rank == 0:
# Flatten list of lists.
results = [_i for temp in results for _i in temp]
print("Results:", results)
| 2.234375 | 2 |
python/FizzBuzz.py | campbe13/FizzBuzz | 0 | 12758560 | <reponame>campbe13/FizzBuzz
#!/usr/bin/python
"""
Fizz Buzz in python 3
<NAME>
February 2018
"""
for i in range(1,101):
if i % 3 == 0 or i % 5 == 0 :
if i % 3 == 0:
msg = "Fizz"
if i % 5 == 0:
msg += "Buzz"
print (msg)
msg = ""
else:
print (i)
| 3.53125 | 4 |
30_Cube/python/cube.py | serranojl/basic-computer-games | 1 | 12758561 | <filename>30_Cube/python/cube.py
#!/usr/bin/env python3
"""
CUBE
Converted from BASIC to Python by <NAME>
"""
import random
from typing import Tuple
def mine_position() -> Tuple[int, int, int]:
return (random.randint(1, 3), random.randint(1, 3), random.randint(1, 3))
def parse_move(move: str) -> Tuple[int, int, int]:
coordinates = [int(item) for item in move.split(",")]
if len(coordinates) == 3:
return tuple(coordinates) # type: ignore
raise ValueError
def play_game() -> None:
"""Play one round of the game"""
money = 500
print("\nYou have", money, "dollars.")
while True:
mines = []
for _ in range(5):
while True:
mine = mine_position()
if not (mine in mines or mine == (1, 1, 1) or mine == (3, 3, 3)):
break
mines.append(mine)
wager = -1
while wager == -1:
try:
wager = int(input("\nHow much do you want to wager? "))
if not 0 <= wager <= money:
wager = -1
print("Tried to fool me; bet again")
except ValueError:
print("Please enter a number.")
prompt = "\nIt's your move: "
position = (1, 1, 1)
while True:
move = (-1, -1, -1)
while move == (-1, -1, -1):
try:
move = parse_move(input(prompt))
except (ValueError, IndexError):
print("Please enter valid coordinates.")
if (
abs(move[0] - position[0])
+ abs(move[1] - position[1])
+ abs(move[2] - position[2])
) > 1:
print("\nIllegal move. You lose")
money = money - wager
break
elif (
move[0] not in [1, 2, 3]
or move[1] not in [1, 2, 3]
or move[2] not in [1, 2, 3]
):
print("\nIllegal move. You lose")
money = money - wager
break
elif move == (3, 3, 3):
print("\nCongratulations!")
money = money + wager
break
elif move in mines:
print("\n******BANG******")
print("You lose!")
money = money - wager
break
else:
position = move
prompt = "\nNext move: "
if money > 0:
print("\nYou now have", money, "dollars.")
if not input("Do you want to try again ").lower().startswith("y"):
break
else:
print("\nYou bust.")
print("\nTough luck")
print("\nGoodbye.")
def print_instructions() -> None:
print("\nThis is a game in which you will be playing against the")
print("random decisions of the computer. The field of play is a")
print("cube of side 3. Any of the 27 locations can be designated")
print("by inputing three numbers such as 2,3,1. At the start,")
print("you are automatically at location 1,1,1. The object of")
print("the game is to get to location 3,3,3. One minor detail:")
print("the computer will pick, at random, 5 locations at which")
print("it will plant land mines. If you hit one of these locations")
print("you lose. One other detail: You may move only one space")
print("in one direction each move. For example: From 1,1,2 you")
print("may move to 2,1,2 or 1,1,3. You may not change")
print("two of the numbers on the same move. If you make an illegal")
print("move, you lose and the computer takes the money you may")
print("have bet on that round.\n")
print("When stating the amount of a wager, print only the number")
print("of dollars (example: 250) you are automatically started with")
print("500 dollars in your account.\n")
print("Good luck!")
def main() -> None:
print(" " * 34 + "CUBE")
print(" " * 15 + "CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\n")
if input("Do you want to see the instructions ").lower().startswith("y"):
print_instructions()
keep_playing = True
while keep_playing:
play_game()
keep_playing = input("\nPlay again? (yes or no) ").lower().startswith("y")
if __name__ == "__main__":
main()
| 4.1875 | 4 |
pywde/spwde.py | carlosayam/PyWDE | 0 | 12758562 | import math
import itertools as itt
import numpy as np
from collections import namedtuple
from datetime import datetime
from scipy.special import gamma
from sklearn.neighbors import BallTree
import random
from pywde.pywt_ext import WaveletTensorProduct
from pywde.common import all_zs_tensor
class dictwithfactory(dict):
def __init__(self, factory):
super(dictwithfactory, self).__init__()
self._factory = factory
def __getitem__(self, key):
if key in self:
return self.get(key)
val = self._factory(key)
self[key] = val
return val
class SPWDE(object):
def __init__(self, waves, k=1):
self.wave = WaveletTensorProduct([wave_desc[0] for wave_desc in waves])
self.j0s = [wave_desc[1] for wave_desc in waves]
self.k = k
self.minx = None
self.maxx = None
# target distance
TARGET_NORMED = 'normed'
TARGET_DIFF = 'diff'
# threshold calculation
TH_CLASSIC = 'classic' # Donoho et al
TH_ADJUSTED = 'adjusted' # Delyon & Judistky
TH_EMP_STD = 'emp-var' # New
def best_j(self, xs, mode, stop_on_max=False):
t0 = datetime.now()
assert mode in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong mode'
best_j_data = []
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
omega = calc_omega(xs.shape[0], self.k)
best_b_hat_j = None
best_j = None
for j in range(8):
# In practice, one would stop when maximum is reached, i.e. after first decreasing value of B Hat
g_ring_no_i_xs = []
wave_base_j_00_ZS, wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs = self.calc_funs_at(j, (0, 0), xs)
if mode == self.TARGET_DIFF:
coeff_j_00_ZS = self.calc_coeffs(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, balls_info, (0, 0))
coeffs = np.array(list(coeff_j_00_ZS.values()))
alphas_norm_2 = (coeffs[:,0] * coeffs[:,1]).sum()
for i, x in enumerate(xs):
coeff_no_i_j_00_ZS = self.calc_coeffs_no_i(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, i, balls_info, (0, 0))
g_ring_no_i_at_xi = 0.0
norm2 = 0.0
for zs in coeff_no_i_j_00_ZS:
if zs not in wave_base_j_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_j_00_ZS[zs]
g_ring_no_i_at_xi += alpha_zs * wave_base_j_00_ZS_at_xs[zs][i]
norm2 += alpha_zs * alpha_d_zs
# q_ring_x ^ 2 / norm2 == f_at_x
if norm2 == 0.0:
if g_ring_no_i_at_xi == 0.0:
g_ring_no_i_xs.append(0.0)
else:
raise RuntimeError('Got norms but no value')
else:
if mode == self.TARGET_NORMED:
g_ring_no_i_xs.append(g_ring_no_i_at_xi * g_ring_no_i_at_xi / norm2)
else: # mode == self.MODE_DIFF:
g_ring_no_i_xs.append(g_ring_no_i_at_xi * g_ring_no_i_at_xi)
g_ring_no_i_xs = np.array(g_ring_no_i_xs)
if mode == self.TARGET_NORMED:
b_hat_j = omega * (np.sqrt(g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_j = 2 * omega * (np.sqrt(g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - alphas_norm_2
print(mode, j, b_hat_j)
if best_j is None:
best_j = j
best_b_hat_j = b_hat_j
elif b_hat_j > best_b_hat_j:
best_j = j
best_b_hat_j = b_hat_j
elif stop_on_max:
self.the_best_j = best_j
return best_j
if stop_on_max:
continue
# if calculating pdf
name = 'WDE Alphas, dj=%d' % j
if mode == self.TARGET_DIFF:
pdf = self.calc_pdf(wave_base_j_00_ZS, coeff_j_00_ZS, name)
else:
coeff_j_00_ZS = self.calc_coeffs(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, balls_info, (0, 0))
pdf = self.calc_pdf(wave_base_j_00_ZS, coeff_j_00_ZS, name)
elapsed = (datetime.now() - t0).total_seconds()
best_j_data.append((j, b_hat_j, pdf, elapsed))
best_b_hat = max([info_j[1] for info_j in best_j_data])
best_j = list(filter(lambda info_j: info_j[1] == best_b_hat, best_j_data))[0][0]
self.best_j_data = [
tuple([info_j[0], info_j[0] == best_j, info_j[1], info_j[2], info_j[3]])
for info_j in best_j_data]
def best_c(self, xs, delta_j, opt_target, th_mode):
"""best c - hard thresholding"""
assert delta_j > 0, 'delta_j must be 1 or more'
assert opt_target in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong optimisation target'
assert th_mode in [self.TH_CLASSIC, self.TH_ADJUSTED, self.TH_EMP_STD], 'Wrong threshold strategy'
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = {}
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, qqs[0])] = self.calc_funs_at(0, qqs[0], xs)
for j, qq in itt.product(range(delta_j), qqs[1:]):
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)] = self.calc_funs_at(j, qq, xs)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
beta_var = True
all_betas = []
for (j, qq), triple in dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at.items():
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
if qq == (0, 0):
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
continue
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
if beta_var:
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j,
xs, i, all_balls[i], qq, zs)
coeff_i_vals.append(coeff_i)
# coeff_i_std = np.array(coeff_i_vals).std()
coeff_i_std = (np.array(coeff_i_vals) - coeff_zs).std()
else:
coeff_i_std = 0.
all_betas.append((j, qq, zs, coeff_zs, coeff_d_zs, coeff_i_std))
# order2 : 1995, Donoho, Johnstone, Kerkyacharian, Picard - Wavelet Shrinkage, Asymptopia
order1 = lambda tt: math.fabs(tt[3])
# order1 : 1996, Delyon, Juditsky - On Minimax Wavelet Estimators
order2 = lambda tt: math.fabs(tt[3]) / math.sqrt(delta_j - tt[0])
# order3 : New things
# order3 = lambda tt: math.fabs(tt[3]) - 4 * tt[5] ## kind of work for low n
# order3 = lambda tt: math.fabs(tt[3]) / (math.fabs(tt[3]) * 0.5 + tt[5]) # ??
# order3 = lambda tt: tt[5]
# order3 = lambda tt: math.fabs(tt[3]) / tt[5] / math.sqrt(delta_j - tt[0])
order4 = lambda tt: math.fabs(tt[3]) / tt[5]
if th_mode == self.TH_CLASSIC:
key_order = order1
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C$"
elif th_mode == self.TH_ADJUSTED:
key_order = order2
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C \sqrt{j + 1}$"
elif th_mode == self.TH_EMP_STD:
key_order = order4
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C \hat{\sigma}\left[\beta_{j,q,z}^{(-i)}\right]$"
else:
raise RuntimeError('Unknown threshold mode')
all_betas = sorted(all_betas, key=key_order, reverse=True)
# get base line for acummulated values by computing alphas and the
# target HD_i functions
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
num_alphas = 0
for zs in alphas_dict:
alpha_zs, alpha_d_zs = alphas_dict[zs]
if alpha_zs == 0.0 or alpha_d_zs == 0.0:
continue
num_alphas += 1
omega_nk = calc_omega(xs.shape[0], self.k)
best_c_data = []
best_hat = None
self.best_c_found = None
for cx, beta_info in enumerate(all_betas):
j, qq, zs, coeff , coeff_d, coeff_i_std = beta_info
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i, all_balls[i], qq, zs)
if zs not in wave_base_j_qq_ZS_at_xs:
continue
g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
if opt_target == self.TARGET_NORMED:
b_hat_beta = omega_nk * (np.sqrt(g_ring_no_i_xs * g_ring_no_i_xs / norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(g_ring_no_i_xs * g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - norm2_xs.mean()
best_c_data.append((key_order(beta_info), b_hat_beta, np.array(coeff_i_vals).std(), num_alphas + cx + 1))
# calc best
if len(best_c_data) > 0:
pos_c = np.argmax(np.array([tt[1] for tt in best_c_data]))
print('Best C', best_c_data[pos_c], '@ %d' % pos_c)
name = 'WDE C = %f (%d + %d)' % (best_c_data[pos_c][0], num_alphas, pos_c + 1)
the_betas = all_betas[:pos_c + 1]
else:
name = 'WDE C = None'
the_betas = []
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas, name, subtitle)
if len(best_c_data) > 0:
self.best_c_found = (pdf, best_c_data[pos_c])
self.best_c_data = best_c_data
else:
self.best_c_found = (pdf, None)
self.best_c_data = best_c_data
def best_greedy_not_working(self, xs, delta_j, mode):
"best c - greedy optimisation `go`"
assert delta_j > 0, 'delta_j must be 1 or more'
assert mode in [self.MODE_NORMED, self.MODE_DIFF], 'Wrong mode'
random.seed(1)
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
calc_funs_at = lambda key: self.calc_funs_at(key[0], key[1], xs)
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = dictwithfactory(calc_funs_at)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
# get base line for acummulated values by computing alphas and the
# target HD_i functions
# >> calculate alphas >> same as best_c
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
def populate_at(new_key, populate_mode):
if populate_mode == 'by_j':
j, _, _ = new_key
if len(curr_betas.keys()) == 0:
# add new level
j = j + 1
print('populate_at - new level', j)
for qq in qqs[1:]:
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
curr_betas[(j, qq, zs)] = coeff_zs, coeff_d_zs
print('curr_betas #', len(curr_betas))
return
if populate_mode == 'by_near_zs':
raise RuntimeError('by_near_zs not implemented')
raise RuntimeError('populate_mode_wrong')
def beta_factory(key):
j, qq, zs, i = key
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i,
all_balls[i], qq, zs)
return coeff_i, coeff_d_i
betas_no_i_j_qq_zz_i = dictwithfactory(beta_factory)
def g_ring_calc(j, qq, zs):
loc_g_ring_no_i_xs = g_ring_no_i_xs.copy()
loc_norm2_xs = norm2_xs.copy()
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = betas_no_i_j_qq_zz_i[(j, qq, zs, i)]
if zs not in wave_base_j_qq_ZS_at_xs:
continue
loc_g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
loc_norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
return loc_g_ring_no_i_xs, loc_norm2_xs, np.array(coeff_i_vals)
ball_std = balls_info.sqrt_vol_k.std()
def get_all_betas():
resp = []
for k, v in curr_betas.items():
j, qq, zs = k
coeff_zs, coeff_d_zs = v
loc_g_ring_no_i_xs, loc_norm2_xs, betas_j_qq_zs_no_i = g_ring_calc(j, qq, zs)
if mode == self.MODE_NORMED:
b_hat_beta = omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - loc_norm2_xs.mean()
if len(betas_j_qq_zs_no_i) == 0:
continue
#print(j, qq, zs, b_hat_beta, coeff_zs, 3 * math.sqrt(betas_j_qq_zs_no_i.std()))
correction = 2 * math.sqrt(betas_j_qq_zs_no_i.std()) ##np.abs(loc_g_ring_no_i_xs).std() ## * (j+1) ##* ball_std
b_hat_std = betas_j_qq_zs_no_i.std()
resp.append((j, qq, zs, coeff_zs, coeff_d_zs, b_hat_beta + correction, b_hat_beta, b_hat_std))
return resp
popu_mode = 'by_j'
the_betas = []
omega_nk = calc_omega(xs.shape[0], self.k)
found = True
curr_betas = {}
curr_b_hat_beta = None
# populate w/ j = 0, all QQ
populate_at((-1, None, None), 'by_j')
betas_num = 10
## << BEST !! count number of betas of current level as we know it
## 180 or 90 give very good results
curr_j = 0
used_level = False
while curr_j < 6:
all_betas = get_all_betas()
if len(all_betas) == 0:
populate_at((curr_j, None, None), popu_mode)
curr_j += 1
used_level = False
continue
fkey1 = lambda tt: tt[5]
fkey2 = lambda tt: math.fabs(tt[3])*tt[5]
fkey3 = lambda tt: tt[3]*tt[3]*tt[5]
fkey4 = lambda tt: math.fabs(tt[3])*tt[5]/tt[6]
fkey5 = lambda tt: math.fabs(tt[3]) * tt[5] - tt[6]
fkey6 = lambda tt: tt[5] - tt[6] / (curr_j + 1)
fkey7 = lambda tt: tt[5] / tt[6]
fkey8 = lambda tt: math.fabs(tt[3])/tt[6]
fkey = fkey1
all_betas = sorted(all_betas, key=fkey, reverse=True)
##print(all_betas)
# print(all_betas[0], ':', fkey(all_betas[0]), '..(%d)..' % len(all_betas), all_betas[-1], ':', fkey(all_betas[-1]))
# import seaborn as sns
# import matplotlib.pyplot as plt
# xx = np.array([(tt[3], fkey(tt)) for tt in all_betas])
# ##xx = xx - xx.min()
# sns.scatterplot(xx[:,0], xx[:,1])
# plt.show()
# raise RuntimeError('blah')
## ix = random.choices(list(range(all_betas)), weights=[fkey(tt) for tt in all_betas])
chosen_betas = all_betas[:betas_num]
new_b_hat_beta = max([tt[5] for tt in chosen_betas])
if curr_b_hat_beta is None or new_b_hat_beta > curr_b_hat_beta:
## print('.'*betas_num, end='')
curr_b_hat_beta = min([tt[5] for tt in chosen_betas])
used_level = True
print(all_betas[0], curr_b_hat_beta)
for ix_tuple in chosen_betas:
the_betas.append(ix_tuple)
del curr_betas[ix_tuple[:3]]
## populate_at(ix_tuple[:3], popu_mode)
g_ring_no_i_xs, norm2_xs, _ = g_ring_calc(*ix_tuple[:3])
continue
if not used_level:
break
if curr_j + 1 >= 6:
break
print('\n next level, # betas =', len(the_betas))
for k in list(curr_betas.keys()):
del curr_betas[k]
populate_at((curr_j, None, None), popu_mode)
curr_j += 1
used_level = False
print('')
name = 'WDE greedy = %f' % curr_b_hat_beta
the_betas_p = [tt[:6] for tt in the_betas]
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas_p, name)
self.best_c_found = (pdf, curr_b_hat_beta)
self.best_c_data = [(ix, tt[5]) for ix, tt in enumerate(the_betas)]
def best_greedy(self, xs, delta_j, j0, opt_target):
"best c - greedy optimisation `go`"
assert delta_j > 0, 'delta_j must be 1 or more'
assert opt_target in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong optimisation target'
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
calc_funs_at = lambda key: self.calc_funs_at(key[0], key[1], xs)
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = dictwithfactory(calc_funs_at)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
# get base line for acummulated values by computing alphas and the
# target HD_i functions
# >> calculate alphas >> same as best_c
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
def populate_betas():
for dj in range(delta_j):
j = j0 + dj
print('Calc. betas for level %d' % j)
for qq in qqs[1:]:
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
curr_betas[(j, qq, zs)] = coeff_zs, coeff_d_zs
print('curr_betas #', len(curr_betas))
def beta_factory(key):
j, qq, zs, i = key
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i,
all_balls[i], qq, zs)
return coeff_i, coeff_d_i
betas_no_i_j_qq_zz_i = dictwithfactory(beta_factory)
def g_ring_calc(j, qq, zs):
loc_g_ring_no_i_xs = g_ring_no_i_xs.copy()
loc_norm2_xs = norm2_xs.copy()
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = betas_no_i_j_qq_zz_i[(j, qq, zs, i)]
if zs not in wave_base_j_qq_ZS_at_xs:
continue
loc_g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
loc_norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
return loc_g_ring_no_i_xs, loc_norm2_xs, np.array(coeff_i_vals)
ball_std = balls_info.sqrt_vol_k.std()
def calc_b_hat():
resp = []
for k, v in curr_betas.items():
j, qq, zs = k
coeff_zs, coeff_d_zs = v
loc_g_ring_no_i_xs, loc_norm2_xs, betas_j_qq_zs_no_i = g_ring_calc(j, qq, zs)
if opt_target == self.TARGET_NORMED:
b_hat_beta = omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - loc_norm2_xs.mean()
if len(betas_j_qq_zs_no_i) == 0:
continue
#print(j, qq, zs, b_hat_beta, coeff_zs, 3 * math.sqrt(betas_j_qq_zs_no_i.std()))
## correction = 3 * math.sqrt(betas_j_qq_zs_no_i.std()) ##np.abs(loc_g_ring_no_i_xs).std() ## * (j+1) ##* ball_std
correction = math.fabs(coeff_zs) / betas_j_qq_zs_no_i.std()
b_hat_std = betas_j_qq_zs_no_i.std()
resp.append((j, qq, zs, coeff_zs, coeff_d_zs, b_hat_beta + correction, b_hat_beta, b_hat_std))
return resp
popu_mode = 'by_j'
the_betas = []
omega_nk = calc_omega(xs.shape[0], self.k)
found = True
curr_betas = {}
curr_b_hat_beta = None
# populate w/ j = 0, all QQ
populate_betas()
# betas_ref : position of b_hat that we will use to stop iteration. If we can't improve \hat{B}
# beyond the value at position betas_ref next time, we consider the optimum reached.
betas_ref = 3
while True:
curr_b_hat = calc_b_hat()
if len(curr_b_hat) == 0:
break
fkey1 = lambda tt: tt[5]
fkey = fkey1
curr_b_hat = sorted(curr_b_hat, key=fkey, reverse=True)
new_b_hat_beta = fkey(curr_b_hat[0])
if curr_b_hat_beta is None or new_b_hat_beta > curr_b_hat_beta:
## we use a slightly less optimal value to smooth target a little bit
curr_b_hat_beta = fkey(curr_b_hat[betas_ref - 1])
print(curr_b_hat[0], curr_b_hat_beta)
the_betas.append(curr_b_hat[0])
del curr_betas[curr_b_hat[0][:3]]
g_ring_no_i_xs, norm2_xs, _ = g_ring_calc(*curr_b_hat[0][:3])
continue
else:
break
print('')
name = 'WDE greedy = %f' % curr_b_hat_beta
the_betas_p = [tt[:6] for tt in the_betas]
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas_p, name)
self.best_c_found = (pdf, curr_b_hat_beta)
self.best_c_data = [(ix, tt[5]) for ix, tt in enumerate(the_betas)]
def calc_pdf(self, base_fun, alphas, name):
norm2 = 0.0
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
norm2 += alpha_zs * alpha_d_zs
if norm2 == 0.0:
raise RuntimeError('No norm')
def pdf(xs, alphas=alphas, norm2=norm2, base_fun=base_fun):
g_ring_xs = np.zeros(xs.shape[0])
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
g_ring_xs += alpha_zs * base_fun[zs](xs)
# q_ring_x ^ 2 / norm2 == f_at_x
return g_ring_xs * g_ring_xs / norm2
pdf.name = name
return pdf
def calc_pdf_with_betas(self, base_funs_j, alphas, betas, name, subtitle=None):
"Calculate the pdf for given alphas and betas"
norm2 = 0.0
base_fun, _, _ = base_funs_j[(0, (0, 0))]
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
norm2 += alpha_zs * alpha_d_zs
for j, qq, zs, coeff_zs, coeff_d_zs, coeff_std in betas:
base_fun, _, _ = base_funs_j[(j, qq)]
if zs not in base_fun:
continue
norm2 += coeff_zs * coeff_d_zs
if norm2 == 0.0:
raise RuntimeError('No norm')
def pdf(xs, alphas=alphas, betas=betas, norm2=norm2, base_funs_j=base_funs_j):
g_ring_xs = np.zeros(xs.shape[0])
base_fun, _, _ = base_funs_j[(0, (0, 0))]
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
g_ring_xs += alpha_zs * base_fun[zs](xs)
for j, qq, zs, coeff_zs, coeff_d_zs, coeff_std in betas:
base_fun, _, _ = base_funs_j[(j, qq)]
if zs not in base_fun:
continue
g_ring_xs += coeff_zs * base_fun[zs](xs)
# q_ring_x ^ 2 / norm2 == f_at_x
return g_ring_xs * g_ring_xs / norm2
pdf.name = name
pdf.subtitle = subtitle
return pdf
def calc_funs_at(self, j, qq, xs):
"""
:param j: int, resolution level
:param qq: tensor index in R^d
:param xs: data in R^d
:return: (base funs, base @ xs, dual @ xs)
funs[zs] = base-wave _{j,zs}^{(qq)}
base @ xs[zs] = base-wave _{j,zs}^{(qq)}(xs)
dual @ xs[zs] = dual-wave _{j,zs}^{(qq)}(xs)
"""
wave_base_j_qq_ZS, wave_dual_j_qq_ZS = self.calc_funs(j, qq)
base_fun_xs = {}
for zs in wave_base_j_qq_ZS:
base_fun_xs[zs] = wave_base_j_qq_ZS[zs](xs)
dual_fun_xs = {}
for zs in wave_dual_j_qq_ZS:
dual_fun_xs[zs] = wave_dual_j_qq_ZS[zs](xs)
return wave_base_j_qq_ZS, base_fun_xs, dual_fun_xs
def calc_funs(self, j, qq):
"""
:param j: int, resolution level
:param qq: tensor index in R^d
:return: (base funs, dual funs)
funs[zs] = base|dual wave _{j,zs}^{(qq)}
wave_base_j_qq_ZS, wave_dual_j_qq_ZS
"""
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
funs = {}
for what in ['dual', 'base']:
zs_min, zs_max = self.wave.z_range(what, (qq, jpow2, None), self.minx, self.maxx)
funs[what] = {}
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
funs[what][zs] = self.wave.fun_ix(what, (qq, jpow2, zs))
return funs['base'], funs['dual']
def calc_coeffs(self, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq):
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
zs_min, zs_max = self.wave.z_range('dual', (qq, jpow2, None), self.minx, self.maxx)
omega = calc_omega(xs.shape[0], self.k)
resp = {}
balls = balls_info.sqrt_vol_k
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
alpha_zs = omega * (wave_dual_j_qq_ZS_at_xs[zs] * balls).sum()
resp[zs] = (alpha_zs, alpha_zs)
if self.wave.orthogonal:
# we are done
return resp
zs_min, zs_max = self.wave.z_range('base', (qq, jpow2, None), self.minx, self.maxx)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
if zs not in resp:
continue
alpha_d_zs = omega * (wave_base_j_qq_ZS_at_xs[zs] * balls).sum()
resp[zs] = (resp[zs][0], alpha_d_zs)
return resp
def calc_coeffs_no_i(self, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i, balls_info, qq):
"Calculate alphas (w/ dual) and alpha-duals (w/ base)"
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
zs_min, zs_max = self.wave.z_range('dual', (qq, jpow2, None), self.minx, self.maxx)
omega_no_i = calc_omega(xs.shape[0] - 1, self.k)
resp = {}
vol_no_i = balls_no_i(balls_info, i)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
# below, we remove factor for i from sum << this has the biggest impact in performance
# also, we calculated alpha_zs previously and cen be further optimised w/ calc_coeffs
alpha_zs = omega_no_i * ((wave_dual_j_qq_ZS_at_xs[zs] * vol_no_i).sum() - wave_dual_j_qq_ZS_at_xs[zs][i] * vol_no_i[i])
resp[zs] = (alpha_zs, alpha_zs)
if self.wave.orthogonal:
# we are done
return resp
zs_min, zs_max = self.wave.z_range('base', (qq, jpow2, None), self.minx, self.maxx)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
if zs not in resp:
continue
# below, we remove factor for i from sum << this has the biggest impact in performance
alpha_d_zs = omega_no_i * ((wave_base_j_qq_ZS_at_xs[zs] * vol_no_i).sum() - wave_base_j_qq_ZS_at_xs[zs][i] * vol_no_i[i])
resp[zs] = (resp[zs][0], alpha_d_zs)
return resp
def calc_1_coeff_no_i(self, base_fun_xs, dual_fun_xs, j, xs, i, balls, qq, zs):
omega_no_i = calc_omega(xs.shape[0] - 1, self.k)
if zs in dual_fun_xs:
coeff = omega_no_i * ((dual_fun_xs[zs] * balls).sum() - dual_fun_xs[zs][i] * balls[i])
else:
coeff = 0.0
if self.wave.orthogonal:
# we are done
return coeff, coeff
if zs in base_fun_xs:
coeff_d = omega_no_i * ((base_fun_xs[zs] * balls).sum() - base_fun_xs[zs][i] * balls[i])
else:
coeff_d = 0.0
return coeff, coeff_d
def balls_no_i(balls_info, i):
n = balls_info.nn_indexes.shape[0]
resp = []
for i_prim in range(n):
# note index i is removed at callers site
if i in balls_info.nn_indexes[i_prim, :-1]:
resp.append(balls_info.sqrt_vol_k_plus_1[i_prim])
else:
resp.append(balls_info.sqrt_vol_k[i_prim])
return np.array(resp)
def calc_omega(n, k):
"Bias correction for k-th nearest neighbours sum for sample size n"
return math.sqrt(n - 1) * gamma(k) / gamma(k + 0.5) / n
BallsInfo = namedtuple('BallsInfo', ['sqrt_vol_k', 'sqrt_vol_k_plus_1', 'nn_indexes'])
def calc_sqrt_vs(xs, k):
"Returns BallsInfo object with sqrt of volumes of k-th balls and (k+1)-th balls"
dim = xs.shape[1]
ball_tree = BallTree(xs)
# as xs is both data and query, xs's nearest neighbour would be xs itself, hence the k+2 below
dist, inx = ball_tree.query(xs, k + 2)
k_near_radious = dist[:, -2:]
xs_balls_both = np.power(k_near_radious, dim / 2)
xs_balls = xs_balls_both[:, 0] * sqrt_vunit(dim)
xs_balls2 = xs_balls_both[:, 1] * sqrt_vunit(dim)
return BallsInfo(xs_balls, xs_balls2, inx)
def sqrt_vunit(dim):
"Square root of Volume of unit hypersphere in d dimensions"
return math.sqrt((np.pi ** (dim / 2)) / gamma(dim / 2 + 1))
| 2.015625 | 2 |
JSON_to_Dict_Entities.py | rohankar/motif | 0 | 12758563 | <gh_stars>0
import json
from pprint import pprint
from operator import itemgetter
import csv
writer = open('Entities\\All_Entities.txt','w')
#writer = csv.writer(EntityFile)
#i=1
for i in range(123):
print(i+1)
data = json.load(open('Entities\\'+str(i+1)+'.json'))
Entities = []
#EntitiesOnly = []
EntitiesOnly = ''
pprint(len(data["entities"]))
for j in range(len(data["entities"])):
print(j)
ent = data["entities"][j]["name"]
sal = data["entities"][j]["salience"]
Entities.append([ent,sal])
EntitiesOnly = EntitiesOnly+','+ent
#EntitiesOnly.append(ent)
#print(Entities)
#Entities = sorted(Entities, key=itemgetter(1))
#print(Entities)
#EntLen = len(Entities)
#EntitiesTop5 = [Entities[EntLen-1],Entities[EntLen-2],Entities[EntLen-3],Entities[EntLen-4],Entities[EntLen-5]]
print("+++++++")
print(EntitiesOnly)
#print(EntitiesTop5)
writer.write(EntitiesOnly+'\n') | 2.921875 | 3 |
examples/doc_model_savemodel.py | FaustinCarter/lmfit-py | 0 | 12758564 | #!/usr/bin/env python
# <examples/doc_mode_savemodel.py>
import numpy as np
from lmfit.model import Model, save_model
def mysine(x, amp, freq, shift):
return amp * np.sin(x*freq + shift)
sinemodel = Model(mysine)
pars = sinemodel.make_params(amp=1, freq=0.25, shift=0)
save_model(sinemodel, 'sinemodel.sav')
# <end examples/doc_model_savemodel.py>
| 2.375 | 2 |
dft_workflow/job_analysis/get_init_slabs_bare_oh/get_init_slabs_bare_oh.py | raulf2012/PROJ_IrOx_OER | 1 | 12758565 | <filename>dft_workflow/job_analysis/get_init_slabs_bare_oh/get_init_slabs_bare_oh.py<gh_stars>1-10
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Extract the initial atoms objects for the bare and *OH slabs
# ---
# ### Import Modules
# +
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import pickle
from IPython.display import display
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", None)
pd.options.display.max_colwidth = 20
# pd.set_option('display.max_rows', None)
# #########################################################
from methods import (
get_df_jobs,
get_df_jobs_data,
get_df_active_sites,
)
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
else:
from tqdm import tqdm
verbose = False
# ### Read Data
# +
df_jobs = get_df_jobs(exclude_wsl_paths=True)
df_jobs_data = get_df_jobs_data(exclude_wsl_paths=True)
# -
# ### Filtering dataframes to only include `oer_adsorbate` job types
df_jobs = df_jobs[df_jobs.job_type == "oer_adsorbate"]
# ### Main loop
# +
# #########################################################
data_dict_list = []
# #########################################################
group_cols = [
"compenv", "slab_id",
"ads", "active_site", "att_num"]
grouped = df_jobs.groupby(group_cols)
# #########################################################
for name, group in grouped:
# for i in range(1):
# name = ('sherlock', 'fogalonu_46', 'o', 16.0, 1)
# group = grouped.get_group(name)
# #####################################################
compenv_i = name[0]
slab_id_i = name[1]
ads_i = name[2]
active_site_i = name[3]
att_num_i = name[4]
# #####################################################
group = group.drop(
# cols_to_drop + ["num_revs", "job_id"],
group_cols + ["num_revs", "job_id"],
axis=1)
# #####################################################
row_i = group[group.rev_num == 1]
mess_i = "Must only have one row in a group with rev_num=1"
assert row_i.shape[0] == 1, mess_i
row_i = row_i.iloc[0]
job_id_min_i = row_i.name
# #####################################################
if job_id_min_i == "ruhusunu_66":
print(name)
# #####################################################
row_data_i = df_jobs_data.loc[job_id_min_i]
# #####################################################
init_atoms_i = row_data_i.init_atoms
# #####################################################
# #####################################################
data_dict_i = dict()
# #####################################################
data_dict_i["compenv"] = compenv_i
data_dict_i["slab_id"] = slab_id_i
data_dict_i["ads"] = ads_i
data_dict_i["active_site"] = active_site_i
data_dict_i["att_num"] = att_num_i
data_dict_i["job_id_min"] = job_id_min_i
data_dict_i["init_atoms"] = init_atoms_i
# #####################################################
data_dict_list.append(data_dict_i)
# #####################################################
# #########################################################
df_init_slabs = pd.DataFrame(data_dict_list)
df_init_slabs = df_init_slabs.set_index(["compenv", "slab_id", "ads", "active_site", "att_num", ])
# #########################################################
# -
# ### Get number of atoms
# +
def method(row_i):
# #####################################################
init_atoms_i = row_i.init_atoms
job_id_min_i = row_i.job_id_min
# #####################################################
if init_atoms_i is None:
print("Couldn't find init_atoms for this job_id")
print("job_id_min:", job_id_min_i)
num_atoms_i = init_atoms_i.get_global_number_of_atoms()
return(num_atoms_i)
df_init_slabs["num_atoms"] = df_init_slabs.apply(
method,
axis=1)
# -
# ### Save data to pickle
# Pickling data ###########################################
directory = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/get_init_slabs_bare_oh",
"out_data")
if not os.path.exists(directory): os.makedirs(directory)
with open(os.path.join(directory, "df_init_slabs.pickle"), "wb") as fle:
pickle.dump(df_init_slabs, fle)
# #########################################################
# +
from methods import get_df_init_slabs
df_init_slabs_tmp = get_df_init_slabs()
df_init_slabs_tmp.head()
# -
# #########################################################
print(20 * "# # ")
print("All done!")
print("Run time:", np.round((time.time() - ti) / 60, 3), "min")
print("get_init_slabs_bare_oh.ipynb")
print(20 * "# # ")
# #########################################################
# + active=""
#
#
#
# + jupyter={"source_hidden": true}
# df_dft = get_df_dft()
# df_job_ids = get_df_job_ids()
# df_jobs_data_clusters = get_df_jobs_data_clusters()
# df_slab = get_df_slab()
# df_slab_ids = get_df_slab_ids()
# df_jobs_anal = get_df_jobs_anal()
# df_active_sites = get_df_active_sites()
# df_atoms_sorted_ind = get_df_atoms_sorted_ind()
# + jupyter={"source_hidden": true}
# df_job_ids
# df_jobs
# df_jobs_data
# df_jobs_data_clusters
# df_jobs_anal
# df_atoms_sorted_in
# + jupyter={"source_hidden": true}
# assert False
# + jupyter={"source_hidden": true}
# init_atoms_i
# # row_data_i =
# df_jobs_data.loc[job_id_min_i]
# + jupyter={"source_hidden": true}
# assert False
| 2.078125 | 2 |
anadama/skeleton.py | biobakery/anadama | 0 | 12758566 | <reponame>biobakery/anadama
import os
from os.path import join
import sys
from pprint import pformat
import yaml
from doit.exceptions import InvalidCommand
from .loader import PipelineLoader
from .util.help import print_pipeline_help
OPTIONS_DIR = "_options"
def logger_init(verbose=True):
if verbose:
return lambda msg, *args: sys.stderr.write(msg.format(*args))
else:
return lambda msg, *args: None
def skel_list(pipeline, the_dir):
os.mkdir(the_dir)
open(join(the_dir,".placeholder"), 'w').close()
skel_funcs = {
list: skel_list,
str: skel_list
}
YAML_HELP = \
"""# Options are defined as key: value pairs.
# For example, to set the ``nproc`` option to ``16``, do
# nproc: 16
#
# If a workflow expects a boolean value, just write in true or false. i.e:
# parallel: true
#
# Nested mappings or dictionaries are specified with indents, like so:
# qiime_opts:
# a: true
# jobs_to_start: 6
#
# For more information, check out:
# - http://pyyaml.org/wiki/PyYAMLDocumentation#YAMLsyntax
# - http://rschwager-hsph.bitbucket.org/documentation/anadama/your_own_pipeline.html#your-own-pipeline
"""
def commented(doc_str):
return "\n".join([
"# "+line for line in doc_str.split("\n")
])
def write_options(options_dict, fname, workflow_func=None):
with open(fname, 'w') as f:
print >> f, YAML_HELP
if all((workflow_func,
hasattr(workflow_func, "__doc__"),
workflow_func.__doc__)):
print >> f, "# Workflow Documentation:"
print >> f, commented(workflow_func.__doc__)
if options_dict:
yaml.safe_dump(options_dict, stream=f,
default_flow_style=False)
_default_template = None
def default_template():
global _default_template
if not _default_template:
here = os.path.abspath(os.path.dirname(__file__))
with open(join(here, "default_skel_template.txt")) as f:
_default_template = f.read()
return _default_template
def format_optimports(optpipe_classes):
return "\n".join([
"from {cls.__module__} import {cls.__name__}".format(cls=c)
for c in optpipe_classes
])
def format_optappends(optpipe_classes):
return "\n ".join([
"pipeline.append({cls.__name__})".format(cls=c)
for c in optpipe_classes
])
def make_pipeline_skeleton(pipeline_name,
optional_pipelines=list(),
verbose=True, template=None):
log = logger_init(verbose)
PipelineClass = PipelineLoader._import(pipeline_name)
optpipe_classes = map(PipelineLoader._import, optional_pipelines)
here = os.getcwd()
input_dir = join(here, "input")
options_dir = join(input_dir, OPTIONS_DIR)
def _combine(attr):
orig = list( getattr(PipelineClass, attr).items() )
for p in optpipe_classes:
orig += list( getattr(p, attr).items() )
return dict(orig)
attrs_to_combine = ("products", "default_options", "workflows")
allprods, allopts, allworks = map(_combine, attrs_to_combine)
opt_import_stmts, append_stmts = str(), str()
if optpipe_classes:
opt_import_stmts = format_optimports(optpipe_classes)
append_stmts = format_optappends(optpipe_classes)
if os.path.exists(input_dir):
raise InvalidCommand("Input directory already exists: "+input_dir)
log("Constructing input skeleton at {}.\n", input_dir)
os.mkdir(input_dir)
os.mkdir(options_dir)
if not template:
template = default_template()
product_dirs = list()
for name, prod in allprods.iteritems():
skel_func = skel_funcs.get(type(prod))
if not skel_func:
msg = "Unable to handle products of type {}"
raise ValueError(msg.format(skel_func))
skel_dir = join(input_dir, name)
log("Creating input directory {} for {}...",
skel_dir, str(type(prod)))
skel_func(PipelineClass, skel_dir)
log("Done.\n")
product_dirs.append(skel_dir)
for name, opt_dict in allopts.iteritems():
options_fname = join(options_dir, name+".txt")
log("Writing default options for {}.{} into {}...",
pipeline_name, name, options_fname)
workflow_func = allworks.get(name)
write_options(opt_dict, options_fname, workflow_func)
log("Done.\n")
log("Writing dodo.py file...")
dodo_fname = "dodo.py"
with open(dodo_fname, 'w') as dodo_f:
print >> dodo_f, template.format(
pipeline_class=PipelineClass,
known_input_directories=pformat(product_dirs),
options_dir=repr(options_dir),
append_imports=opt_import_stmts,
append_statements=append_stmts
)
log("Done.\n")
help_fname = "README.rst"
log("Writing help file to {}...", help_fname)
with open(help_fname, 'w') as help_f:
print_pipeline_help(PipelineClass, optpipe_classes, stream=help_f)
log("Done.\n")
log("Complete.\n")
return allprods, allopts, allworks
| 2.234375 | 2 |
competition_trick/WBF_emsemble.py | G-Naughty/Fine-grained-OBB-Detection | 2 | 12758567 | <gh_stars>1-10
import cv2
import os
import math
import argparse
import codecs
import copy
import json
import numpy as np
from collections import defaultdict
from shapely.geometry import Polygon
from xml.dom.minidom import Document
from xml.dom import minidom
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
#from weighted_boxes_fusion.ensemble_boxes import *
#使用gpu计算iou可大大提高集成速度
def parse_args():
parser = argparse.ArgumentParser(description='results style change')#'/home/disk/FAIR1M_emsemble/OBB_epoch12_0.05_41','/home/disk/FAIR1M_emsemble/redet_epoch24_0.001_45'
parser.add_argument('--models_result', help='所有模型结果的路径列表', default=['/home/disk/FAIR1M_emsemble/OBB_epoch12_0.001_43',
'/home/disk/FAIR1M_emsemble/redet_epoch24_0.001_45'
])
parser.add_argument('--save_path', help='ensemble后模型结果列表', default='/home/disk/FAIR1M_emsemble/emsemble')
parser.add_argument('--weights', help='各模型权重', default=[1, 1])
parser.add_argument('--IOU_threshold', help='IOU阈值', default=0.5) #0.7
parser.add_argument('--conf_threshold', help='置信度阈值', default=0.001)
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def mkdirs_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def prefilter_boxes(class_objs,weights, thr = 0):
'''
Args:
class_objs: [model][objs]:[conf,bbox]
thr: 保留bbox的最低置信度值
Returns:
'''
# Create dict with boxes stored by its label
new_objs = []
for model_name, objs in class_objs.items():
weight = weights[model_name]
for obj in objs:
obj[0] = float(obj[0]) * weight #加权
if obj[0] >= thr:
new_objs.append(obj)
#排序
new_objs = np.array(new_objs)
if len(new_objs) > 0:
new_objs = new_objs[np.argsort(-new_objs[:, 0])] #将序排列
return new_objs
def rotate_IOU(g,p):
g = np.asarray(g)
p = np.asarray(p)
g = Polygon(g[:8].reshape((4, 2))) #创建一个多边形对象
p = Polygon(p[:8].reshape((4, 2)))
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter / union
#确定属于哪一类
def find_matching_box(boxes_list, new_box, match_iou):
'''
Args:
boxes_list: 已有的ensemble出来的框
new_box: 新框
match_iou: IOU
Returns:
best_index: 重叠度最大框的索引
best_iou: 最大重叠度
'''
best_iou = match_iou
best_index = -1
for i in range(len(boxes_list)):
box = boxes_list[i][1]
iou = rotate_IOU(box, new_box[1]) #计算IOU
if iou > best_iou:
best_index = i
best_iou = iou
return best_index, best_iou
#对box进行整理
def get_weighted_box(boxes, conf_type='avg'):
"""
Create weighted box for set of boxes
:param boxes: set of boxes to fuse box格式[conf, bbox]
:param conf_type: type of confidence one of 'avg' or 'max'
:return: weighted box
"""
#bbox = np.zeros(8, dtype=np.float32)
conf = 0
best_conf = 0
for b in boxes:
if b[0] > best_conf:
bbox = b[1]
best_conf = b[0]
# bbox += (b[0] * np.array(b[1])) #假设bbox中每个点排列顺序都一样
conf += b[0]
#box[0] = boxes[0][0]
if conf_type == 'avg':
conf_final = conf / len(boxes)
elif conf_type == 'max':
conf_final = best_conf
#bbox /= conf
box = [conf_final, bbox]
return box
#对一类对象进行WBF操作
def weighted_boxes_fusion(class_objs, weights=None, iou_thr=0.55, skip_box_thr=0.0, conf_type='avg', allows_overflow=False):
'''
:param boxes_list: list of boxes predictions from each model, each box is 4 numbers.
It has 3 dimensions (models_number, model_preds, 4)
Order of boxes: x1, y1, x2, y2. We expect float normalized coordinates [0; 1]
:param scores_list: list of scores for each model
:param labels_list: list of labels for each model
:param weights: list of weights for each model. Default: None, which means weight == 1 for each model
:param iou_thr: IoU value for boxes to be a match
:param skip_box_thr: exclude boxes with score lower than this variable 排除得分低于设置值的框
:param conf_type: how to calculate confidence in weighted boxes. 'avg': average value, 'max': maximum value 融合bbox置信度的取值策略
:param allows_overflow: false if we want confidence score not exceed 1.0
:return: boxes: boxes coordinates (Order of boxes: x1, y1, x2, y2).
:return: scores: confidence scores
:return: labels: boxes labels
'''
# if weights is None:
# weights = np.ones(len(class_objs)) #如果不设置模型间权重则按等权重取值
# if len(weights) != len(class_objs):
# print('Warning: incorrect number of weights {}. Must be: {}. Set weights equal to 1.'.format(len(weights), len(class_objs)))
# weights = np.ones(len(class_objs))
weights = np.array(weights)
if conf_type not in ['avg', 'max']:
print('Unknown conf_type: {}. Must be "avg" or "max"'.format(conf_type))
exit()
boxes = prefilter_boxes(class_objs, weights, skip_box_thr) #返回一个列表 加权值
if len(boxes) == 0:
return [] #error
overall_boxes = []
new_boxes = []
weighted_boxes = []
# Clusterize boxes # 聚集框
for j in range(0, len(boxes)): # 对该类别的每个框
index, best_iou = find_matching_box(new_boxes, boxes[j], iou_thr) # 计算该框与已得框是否重合,与哪个框重合
if index != -1:
new_boxes[index][0].append(boxes[j][0])
#weighted_boxes[index] = get_weighted_box(new_boxes[index], conf_type) # 边界变换
else:
new_boxes.append(boxes[j].copy())
new_boxes[-1][0] = [new_boxes[-1][0]]
#weighted_boxes.append(boxes[j].copy())
# Rescale confidence based on number of models and boxes #依据预测出来的筐数
for i in range(len(new_boxes)):
num_predicts = len(new_boxes[i][0])
new_boxes[i][0] = np.mean(new_boxes[i][0])
if not allows_overflow:
new_boxes[i][0] = new_boxes[i][0] * min(weights.sum(),
num_predicts) / weights.sum() # ? 模型总数
else:
new_boxes[i][0] = new_boxes[i][0] * num_predicts/weights.sum()
overall_boxes.append(np.array(new_boxes))
overall_boxes = np.concatenate(overall_boxes, axis=0)
return overall_boxes
def cal_line_length(point1, point2):
return math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
def get_best_begin_point_single(coordinate):
x1 = coordinate[0]
y1 = coordinate[1]
x2 = coordinate[2]
y2 = coordinate[3]
x3 = coordinate[4]
y3 = coordinate[5]
x4 = coordinate[6]
y4 = coordinate[7]
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
xmax = max(x1, x2, x3, x4)
ymax = max(y1, y2, y3, y4)
combinate = [[[x1, y1], [x2, y2], [x3, y3], [x4, y4]], [[x2, y2], [x3, y3], [x4, y4], [x1, y1]],
[[x3, y3], [x4, y4], [x1, y1], [x2, y2]], [[x4, y4], [x1, y1], [x2, y2], [x3, y3]]]
dst_coordinate = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
force = 100000000.0
force_flag = 0
for i in range(4):
temp_force = cal_line_length(combinate[i][0], dst_coordinate[0]) \
+ cal_line_length(combinate[i][1],dst_coordinate[1]) \
+ cal_line_length(combinate[i][2], dst_coordinate[2]) \
+ cal_line_length(combinate[i][3], dst_coordinate[3])
if temp_force < force:
force = temp_force
force_flag = i
if force_flag != 0:
pass
# print("choose one direction!")
return [combinate[force_flag][0][0], combinate[force_flag][0][1], combinate[force_flag][1][0], combinate[force_flag][1][1], combinate[force_flag][2][0], combinate[force_flag][2][1], combinate[force_flag][3][0], combinate[force_flag][3][1]]
#微调bbox
def anchor_finetune(object , min_anchor_size=0, img_size=(1024,1024)):
img_x, img_y = img_size
img_bbox = np.asarray([0, 0, img_x, 0, img_x, img_y, 0, img_y])
img_bbox = Polygon(img_bbox[:8].reshape((4, 2))) #创建图片大小的bbox对象
object_bbox = np.asarray(object)
object_bbox = Polygon(object_bbox[:8].reshape((4, 2))) # 创建图片大小的bbox对象
if img_bbox.is_valid and img_bbox.is_valid:
inter = Polygon(img_bbox).intersection(Polygon(object_bbox)) # 求相交四边形
if inter.area > min_anchor_size:
object_bbox = inter.convex_hull.exterior.coords # 得到相交多边形顶点
bbox_x, bbox_y = object_bbox.xy
if len(bbox_x) <= 5: # 4,3个点
object_bbox = [int(bbox_x[0]), int(bbox_y[0]), int(bbox_x[1]), int(bbox_y[1]), int(bbox_x[2]),
int(bbox_y[2]), int(bbox_x[3]), int(bbox_y[3])]
elif len(bbox_x) == 6: #5个点
point = [[bbox_x[i], bbox_y[i]] for i in range(len(bbox_x) -1)]
area_max = 0
for i in range(np.shape(point)[0]):
bbox = copy.deepcopy(point)
bbox.pop(i)
area = Polygon(np.asarray(bbox)).area
if area > area_max:
bbox_now = copy.deepcopy(bbox)
area_max = area
object_bbox = [float(bbox_now[0][0]), float(bbox_now[0][1]), float(bbox_now[1][0]), float(bbox_now[1][1]),
float(bbox_now[2][0]), float(bbox_now[2][1]), float(bbox_now[3][0]), float(bbox_now[3][1])]
elif len(bbox_x) > 6:
object_bbox = copy.deepcopy(object)
for i in range(0, len(object_bbox), 2):
if object_bbox[i] < 0:
object_bbox[i] = 0
if object_bbox[i] > img_x:
object_bbox[i] = img_x
if object_bbox[i+1] < 0:
object_bbox[i+1] = 0
if object_bbox[i+1] > img_y:
object_bbox[i+1] = img_y
object_bbox[i] = int(object_bbox[i])
object = object_bbox
return object
def WBF(args, iou_thr=0.55, draw_image=True):
"""
This example shows how to ensemble boxes from 2 models using WBF method
:return:
"""
weights = args.weights
results = defaultdict(lambda: defaultdict(defaultdict))
print('loading results:')
# 提取预测结果
for model_num, model_result in enumerate(args.models_result): #对每个模型
result_files = os.listdir(model_result)
for result_file in result_files: #每个图片
result_path = os.path.join(model_result, result_file)
tree = ET.parse(result_path) # 打开xml文档
root = tree.getroot() # 获得root节点
img_name = result_file.strip().split('.')[0]
for object in root.find('objects').findall('object'): # 每个结果
label_name = object.find('possibleresult').find('name').text # 子节点下节点name的值
conf = float(object.find('possibleresult').find('probability').text)
bbox = []
for i, point in enumerate(object.find('points').findall('point')): # object下每个点
if i >= 4:
break
x, y = point.text.strip().split(',')
bbox.append(float(x))
bbox.append(float(y))
#bbox = anchor_finetune(bbox, img_size=img_size[img_name])
obj_det = [conf, bbox]
try:
results[img_name][label_name][model_num].append(obj_det)
except:
results[img_name][label_name][model_num] = []
results[img_name][label_name][model_num].append(obj_det)
# else:
# print('error!')
# init output xml
# if draw_image:
# show_boxes(boxes_list, scores_list, labels_list) #画图
# 输出成DOTA格式再
print('init emsemble results:')
filltxt(args.save_path)
print('emsembleling:')
i=0
for img_name, value1 in results.items():
i=i+1
print('img_name=',img_name,' num:',i)
root = minidom.parse(os.path.join(args.save_path, img_name + '.xml'))
xmlBuilder = Document()
objects = root.getElementsByTagName("objects").item(0)
for label_name, value2 in value1.items():
boxes = weighted_boxes_fusion(value2, weights=weights, iou_thr=args.IOU_threshold,
skip_box_thr=args.conf_threshold)
for box in boxes:
conf = str(box[0])
bbox = [str(point) for point in box[1]]
# update_xml(os.path.join(args.save_path, img_name+'.xml'), label_name, conf, bbox)
object = xmlBuilder.createElement("object")
coordinate = xmlBuilder.createElement("coordinate")
coordinateContent = xmlBuilder.createTextNode('pixel')
coordinate.appendChild(coordinateContent)
object.appendChild(coordinate)
type = xmlBuilder.createElement("type")
typeContent = xmlBuilder.createTextNode('rectangle')
type.appendChild(typeContent)
object.appendChild(type)
description = xmlBuilder.createElement("description")
descriptionContent = xmlBuilder.createTextNode('None')
description.appendChild(descriptionContent)
object.appendChild(description)
possibleresult = xmlBuilder.createElement("possibleresult")
objname = xmlBuilder.createElement("name")
objnameContent = xmlBuilder.createTextNode(label_name) # label_name
objname.appendChild(objnameContent)
possibleresult.appendChild(objname)
probability = xmlBuilder.createElement("probability")
# probabilityContent = xmlBuilder.createTextNode(oneline[1])
probabilityContent = xmlBuilder.createTextNode(conf) # score
probability.appendChild(probabilityContent)
possibleresult.appendChild(probability)
object.appendChild(possibleresult)
points = xmlBuilder.createElement("points")
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[2]+','+oneline[3])
pointContent = xmlBuilder.createTextNode(bbox[0] + ',' + bbox[1]) # point1
point.appendChild(pointContent)
points.appendChild(point)
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[4]+','+oneline[5])
pointContent = xmlBuilder.createTextNode(bbox[2] + ',' + bbox[3]) # point2
point.appendChild(pointContent)
points.appendChild(point)
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[6]+','+oneline[7])
pointContent = xmlBuilder.createTextNode(bbox[4] + ',' + bbox[5]) # point3
point.appendChild(pointContent)
points.appendChild(point)
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[8]+','+oneline[9])
pointContent = xmlBuilder.createTextNode(bbox[6] + ',' + bbox[7]) # points
point.appendChild(pointContent)
points.appendChild(point)
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[2]+','+oneline[3])
pointContent = xmlBuilder.createTextNode(bbox[0] + ',' + bbox[1]) #
point.appendChild(pointContent)
points.appendChild(point)
object.appendChild(points)
objects.appendChild(object)
#objects.appendChild(object)
root.writexml(open(os.path.join(args.save_path, img_name + '.xml'), "w"), encoding='utf-8')
def filltxt(save_path):
for i in range(8287):#
xmlBuilder = Document()
annotation = xmlBuilder.createElement("annotation") # 创建annotation标签
xmlBuilder.appendChild(annotation)
# txtFile = open(os.path.join(txt_path, name))
# txtList = txtFile.readlines()
# img = cv2.imread(picPath+name[0:-4]+".jpg")
# Pheight,Pwidth,Pdepth=img.shape
source = xmlBuilder.createElement("source") # folder标签
filename = xmlBuilder.createElement('filename')
filenameContent = xmlBuilder.createTextNode(str(i) + '.tif')
filename.appendChild(filenameContent)
source.appendChild(filename)
origin = xmlBuilder.createElement('origin')
originContent = xmlBuilder.createTextNode('GF2/GF3')
origin.appendChild(originContent)
source.appendChild(origin)
annotation.appendChild(source)
research = xmlBuilder.createElement("research") # folder标签
version = xmlBuilder.createElement('version')
versionContent = xmlBuilder.createTextNode('1.0')
version.appendChild(versionContent)
research.appendChild(version)
provider = xmlBuilder.createElement('provider')
providerContent = xmlBuilder.createTextNode('School of team')
provider.appendChild(providerContent)
research.appendChild(provider)
author = xmlBuilder.createElement('author')
authorContent = xmlBuilder.createTextNode('1111')
author.appendChild(authorContent)
research.appendChild(author)
pluginname = xmlBuilder.createElement('pluginname')
pluginnameContent = xmlBuilder.createTextNode('FAIR1M')
pluginname.appendChild(pluginnameContent)
research.appendChild(pluginname)
pluginclass = xmlBuilder.createElement('pluginclass')
pluginclassContent = xmlBuilder.createTextNode('object detection')
pluginclass.appendChild(pluginclassContent)
research.appendChild(pluginclass)
time = xmlBuilder.createElement('time')
timeContent = xmlBuilder.createTextNode('2021-03')
time.appendChild(timeContent)
research.appendChild(time)
annotation.appendChild(research)
objects = xmlBuilder.createElement("objects")
annotation.appendChild(objects)
f = open(os.path.join(save_path, str(i)) + ".xml", 'w')
xmlBuilder.writexml(f, indent='', newl='\n', addindent='\t', encoding='utf-8')
f.close()
def update_xml(path, label_name, conf, bbox):
root = minidom.parse(path)
xmlBuilder = Document()
objects = root.getElementsByTagName("objects").item(0)#root.find("objects")
object = xmlBuilder.createElement("object")
coordinate = xmlBuilder.createElement("coordinate")
coordinateContent = xmlBuilder.createTextNode('pixel')
coordinate.appendChild(coordinateContent)
object.appendChild(coordinate)
type = xmlBuilder.createElement("type")
typeContent = xmlBuilder.createTextNode('rectangle')
type.appendChild(typeContent)
object.appendChild(type)
description = xmlBuilder.createElement("description")
descriptionContent = xmlBuilder.createTextNode('None')
description.appendChild(descriptionContent)
object.appendChild(description)
possibleresult = xmlBuilder.createElement("possibleresult")
objname = xmlBuilder.createElement("name")
objnameContent = xmlBuilder.createTextNode(label_name) # label_name
objname.appendChild(objnameContent)
possibleresult.appendChild(objname)
probability = xmlBuilder.createElement("probability")
# probabilityContent = xmlBuilder.createTextNode(oneline[1])
probabilityContent = xmlBuilder.createTextNode(conf) # score
probability.appendChild(probabilityContent)
possibleresult.appendChild(probability)
object.appendChild(possibleresult)
points = xmlBuilder.createElement("points")
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[2]+','+oneline[3])
pointContent = xmlBuilder.createTextNode('x1,y1') # point1
point.appendChild(pointContent)
points.appendChild(point)
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[4]+','+oneline[5])
pointContent = xmlBuilder.createTextNode('x2,y2') # point2
point.appendChild(pointContent)
points.appendChild(point)
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[6]+','+oneline[7])
pointContent = xmlBuilder.createTextNode('x3,y3') # point3
point.appendChild(pointContent)
points.appendChild(point)
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[8]+','+oneline[9])
pointContent = xmlBuilder.createTextNode('x4,y4') # points
point.appendChild(pointContent)
points.appendChild(point)
point = xmlBuilder.createElement("point")
# pointContent = xmlBuilder.createTextNode(oneline[2]+','+oneline[3])
pointContent = xmlBuilder.createTextNode('x1,y1') #
point.appendChild(pointContent)
points.appendChild(point)
object.appendChild(points)
objects.appendChild(object)
root.writexml(open(path,"w" ), encoding='utf-8')
# ET.dump(root) #打印xml
# root.write(path)
def get_imgs_size(imgs_dir,save_path):
imgs_size = {}
img_names = os.listdir(imgs_dir)
for img_name in img_names:
img = cv2.imread(os.path.join(imgs_dir,img_name))
img_y, img_x, _ = img.shape[:]
name = img_name.strip().split('.')[0]
imgs_size[name] = [img_x, img_y]
with open(save_path, 'w') as file_img:
json.dump(imgs_size, file_img)
if __name__ == '__main__':
args = parse_args()
#mkdirs_if_not_exists(args.save_path)
WBF(args)
#filltxt('/home/ggm/GGM/competition_trick/try_em')
#update_xml('/home/ggm/GGM/competition_trick/try_em/0.xml')
#get_imgs_size('/home/disk/FAIR1M_FUll/test/images','/home/disk/FAIR1M_emsemble/imgs_size.json')
print("done!") | 1.914063 | 2 |
SVHN/SVHN_data_preprocess/SVHN_class.py | IMBINGO95/FairMOT | 0 | 12758568 | <reponame>IMBINGO95/FairMOT
import pandas as pd
import json
import torch
import os
import numpy as np
from skimage import io, transform
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from SVHN_data_preprocess.img_transforms import *
import cv2 as cv
from PIL import Image
from SVHN_data_preprocess.numberRecognize import crop_data_of_img
class SVHNDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, json_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_frame = self.json_read(os.path.join(root_dir,json_file))
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = os.path.join(self.root_dir,
self.landmarks_frame[idx]['name'])
image = io.imread(img_name)
landmarks = self.landmarks_frame[idx]['bbox']
landmarks = np.array(landmarks)
# landmarks arrange in this format [[left.x, left.y, width,height, label],...]
landmarks = landmarks.astype('float').reshape(-1, 5)
bboxes = landmarks[:,0:4] # (n_objects, 4)
labels = landmarks[:,4]
sample = {'image':image,'bboxes':bboxes,'labels':labels}
if self.transform:
sample = self.transform(sample)
return sample
def collate_fn(self,batch):
"""
Since each image may have a different number of numbers, we need a collate function (to be passed to the DataLoader).
This describes how to combine these tensors of different sizes. We use lists.
Note: this need not be defined in this Class, can be standalone.
:param batch: an iterable of N sets from __getitem__()
:return: a tensor of images, lists of varying-size tensors of bounding boxes, labels, and difficulties
"""
images = list()
bboxes = list()
labels = list()
for b in batch:
sub_img = torch.tensor(b['image'])
images.append(sub_img)
bboxes.append(b['bboxes'])
labels.append(b['labels'])
images = torch.stack(images, dim=0)
'''What kinds of data type that you want to batch together, depends on the data type that you return'''
return images,bboxes,labels
def json_read(self,json_file):
'''transfer json data into numpy array'''
with open(json_file,'r') as f:
landmarks = json.load(f)
return landmarks
class DatasetV1(Dataset):
def __init__(self, path_to_data_dir,mode,crop = False):
self.mode = mode
self.crop = crop
self.img_dir = os.path.join(path_to_data_dir, mode)
self.data_file = os.path.join(path_to_data_dir, mode, mode + '.json')
with open(self.data_file,'r') as f:
self.data = json.load(f)
self._length = len(self.data)
def __len__(self):
return self._length
def __getitem__(self, index):
img_name = self.data[index]['name']
bbox = self.data[index]['bbox']
image = Image.open(os.path.join(self.img_dir,img_name))
if self.crop == True:
region = crop_data_of_img(bbox)
image = image.crop(tuple(region[0:4]))
if self.mode == 'test':
trans_crop = transforms.CenterCrop([54,54])
else:
trans_crop = transforms.RandomCrop([54,54])
transform = transforms.Compose([
transforms.Resize([64,64]),
trans_crop,
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
image = transform(image)
length = len(bbox)
digits = [10 for i in range(5)]
# print('img_name : {}'.format(os.path.join(self.img_dir,img_name)))
for index in range(length):
if index >= 5 :
break
# print('index : {}'.format(index) )
digits[index] = bbox[index][-1]
return image, length, digits
class SJNDataset(Dataset):
def __init__(self, path_to_data_dir,mode,label_type = 'both',crop = False):
self.mode = mode
self.crop = crop
self.img_dir_pos = os.path.join(path_to_data_dir, mode, mode)
self.data_file_pos_label = os.path.join(path_to_data_dir, mode, mode + '.json')
with open(self.data_file_pos_label,'r') as f:
self.data_pos_label = json.load(f)
self.img_dir_neg = os.path.join(path_to_data_dir, mode, mode + '_neg_image')
if os.path.exists(self.img_dir_neg):
self.data_neg_label = os.listdir(self.img_dir_neg)
if label_type == 'pos':
self.data = self.data_pos_label
elif label_type == 'neg':
self.data = self.data_neg_label
else:
self.data = self.data_pos_label + self.data_neg_label
self._length = len(self.data)
def __len__(self):
return self._length
def __getitem__(self, index):
sub_data = self.data[index]
if type(sub_data) == str:
digits = [10,10]
length = 0
image = Image.open(os.path.join(self.img_dir_neg,sub_data))
else:
digits = self.data[index][1:3]
digits = [int(digits[0]),int(digits[1])]
'''If the first one is 10 then there is no digit in img'''
if digits[0] == 10:
length =0
elif digits[1] == 10:
length = 1
else:
length = 2
img_name = sub_data[0]
image = Image.open(os.path.join(self.img_dir_pos,img_name))
size = image.size
if self.crop == True:
'''only get the upper half of the player.'''
region = (0,0,size[0],int(0.5*size[1]))
image = image.crop(tuple(region[0:4]))
if self.mode == 'test':
trans_crop = transforms.CenterCrop([54,54])
else:
trans_crop = transforms.RandomCrop([54,54])
# IMG = image
transform = transforms.Compose([
transforms.Resize([64,64]),
trans_crop,
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
image = transform(image)
return image, length, digits
class SWDataset(Dataset):
def __init__(self, path_to_data_dir,mode,crop = False):
self.mode = mode
self.crop = crop
self.path_to_data_dir = path_to_data_dir
self.data_file = os.path.join(path_to_data_dir, 'Software' + '.json')
with open(self.data_file,'r') as f:
self.data = json.load(f)
self._length = len(self.data)
def __len__(self):
return self._length
def __getitem__(self, index):
sub_data = self.data[index]
x_l = int(sub_data[0])
y_l = int(sub_data[1])
x_r = int(sub_data[2])
y_r = int(sub_data[3])
'''If the first one is 10 then there is no digit in img'''
digits = [10,10]
num = sub_data[5]
if num> 10 :
digits[0] = int(num/10)
digits[1] = num % 10
length = 2
else:
digits[0] = num
length = 1
file_name = sub_data[6]
dir_name= file_name.split('.')[0].split('_')[0]
img_path = os.path.join(os.path.join(self.path_to_data_dir,dir_name,file_name))
image = Image.open(img_path)
'''get the bbox region of the target player.'''
region1 = (x_l, y_l, x_r, y_r)
image = image.crop(region1)
size = image.size
if self.crop == True:
'''only get the upper half of the player.'''
region2 = (0,0,size[0],int(0.5*size[1]))
image = image.crop(tuple(region2[0:4]))
if self.mode == 'test':
trans_crop = transforms.CenterCrop([54,54])
else:
trans_crop = transforms.RandomCrop([54,54])
# IMG = image
transform = transforms.Compose([
transforms.Resize([64,64]),
trans_crop,
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
image = transform(image)
sub_data = torch.tensor(sub_data[:-1])
return image, length, digits, sub_data, file_name
if __name__ == '__main__':
file_path = '/datanew/hwb/data/Football/SoftWare/From_Software'
SW = SWDataset(file_path,'train',True)
for image, length, digits in SW:
print(length)
| 2.546875 | 3 |
timm/data/dataset.py | yang-ruixin/pytorch-image-models-with-simclr | 14 | 12758569 | <reponame>yang-ruixin/pytorch-image-models-with-simclr<filename>timm/data/dataset.py<gh_stars>10-100
""" Quick n Simple Image Folder, Tarfile based DataSet
Hacked together by / Copyright 2020 <NAME>
Modified by <NAME> for multi-label classification with SimCLR
2021/09/07
https://github.com/yang-ruixin
<EMAIL> (in China)
<EMAIL> (out of China)
"""
# ================================
import csv
import numpy as np
# ================================
import torch.utils.data as data
import os
import torch
import logging
from PIL import Image
from .parsers import create_parser
_logger = logging.getLogger(__name__)
_ERROR_RETRY = 50
class ImageDataset(data.Dataset):
def __init__(
self,
root,
parser=None,
class_map='',
load_bytes=False,
transform=None,
):
if parser is None or isinstance(parser, str):
parser = create_parser(parser or '', root=root, class_map=class_map)
self.parser = parser
self.load_bytes = load_bytes
self.transform = transform
self._consecutive_errors = 0
def __getitem__(self, index):
img, target = self.parser[index]
try:
img = img.read() if self.load_bytes else Image.open(img).convert('RGB')
except Exception as e:
_logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}')
self._consecutive_errors += 1
if self._consecutive_errors < _ERROR_RETRY:
return self.__getitem__((index + 1) % len(self.parser))
else:
raise e
self._consecutive_errors = 0
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
return img, target
def __len__(self):
return len(self.parser)
def filename(self, index, basename=False, absolute=False):
return self.parser.filename(index, basename, absolute)
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class IterableImageDataset(data.IterableDataset):
def __init__(
self,
root,
parser=None,
split='train',
is_training=False,
batch_size=None,
class_map='',
load_bytes=False,
transform=None,
):
assert parser is not None
if isinstance(parser, str):
self.parser = create_parser(
parser, root=root, split=split, is_training=is_training, batch_size=batch_size)
else:
self.parser = parser
self.transform = transform
self._consecutive_errors = 0
def __iter__(self):
for img, target in self.parser:
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
yield img, target
def __len__(self):
if hasattr(self.parser, '__len__'):
return len(self.parser)
else:
return 0
def filename(self, index, basename=False, absolute=False):
assert False, 'Filename lookup by index not supported, use filenames().'
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix or other clean/augmentation mixes"""
def __init__(self, dataset, num_splits=2):
self.augmentation = None
self.normalize = None
self.dataset = dataset
if self.dataset.transform is not None:
self._set_transforms(self.dataset.transform)
self.num_splits = num_splits
def _set_transforms(self, x):
assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms'
self.dataset.transform = x[0]
self.augmentation = x[1]
self.normalize = x[2]
@property
def transform(self):
return self.dataset.transform
@transform.setter
def transform(self, x):
self._set_transforms(x)
def _normalize(self, x):
return x if self.normalize is None else self.normalize(x)
def __getitem__(self, i):
x, y = self.dataset[i] # all splits share the same dataset base transform
x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split)
# run the full augmentation on the remaining splits
for _ in range(self.num_splits - 1):
x_list.append(self._normalize(self.augmentation(x)))
return tuple(x_list), y
def __len__(self):
return len(self.dataset)
# ================================
class DatasetAttributes:
"""
Get all the possible labels
"""
def __init__(self, annotation_path):
color_labels = []
gender_labels = []
article_labels = []
with open(annotation_path) as f:
reader = csv.DictReader(f)
for row in reader:
color_labels.append(row['color'])
gender_labels.append(row['gender'])
article_labels.append(row['article'])
self.color_labels = np.unique(color_labels)
self.gender_labels = np.unique(gender_labels)
self.article_labels = np.unique(article_labels)
self.num_colors = len(self.color_labels)
self.num_genders = len(self.gender_labels)
self.num_articles = len(self.article_labels)
self.color_id_to_name = dict(zip(range(len(self.color_labels)), self.color_labels))
self.color_name_to_id = dict(zip(self.color_labels, range(len(self.color_labels))))
self.gender_id_to_name = dict(zip(range(len(self.gender_labels)), self.gender_labels))
self.gender_name_to_id = dict(zip(self.gender_labels, range(len(self.gender_labels))))
self.article_id_to_name = dict(zip(range(len(self.article_labels)), self.article_labels))
self.article_name_to_id = dict(zip(self.article_labels, range(len(self.article_labels))))
class DatasetML(data.Dataset):
def __init__(
self,
annotation_path,
attributes,
transform=None):
super().__init__()
self.transform = transform
self.attr = attributes
# initialize the arrays to store the ground truth labels and paths to the images
self.data = []
self.color_labels = []
self.gender_labels = []
self.article_labels = []
# read the annotations from the CSV file
with open(annotation_path) as f:
reader = csv.DictReader(f)
for row in reader:
self.data.append(row['image_path'])
self.color_labels.append(self.attr.color_name_to_id[row['color']])
self.gender_labels.append(self.attr.gender_name_to_id[row['gender']])
self.article_labels.append(self.attr.article_name_to_id[row['article']])
def __getitem__(self, idx):
# take the data sample by its index
img_path = self.data[idx]
# read image
img = Image.open(img_path)
# apply the image augmentations if needed
if self.transform:
# img = self.transform(img)
img_i = self.transform(img)
img_j = self.transform(img)
img = (img_i, img_j) # here for simclr, img returned is a tuple of 2 images
labels = {
'color_labels': self.color_labels[idx],
'gender_labels': self.gender_labels[idx],
'article_labels': self.article_labels[idx]
}
return img, labels # here for simclr, img returned is a tuple of 2 images
def __len__(self):
# return len(self.samples)
return len(self.data)
# ================================
| 2.40625 | 2 |
game.py | jmp/pyxel-build-example | 0 | 12758570 | #!/usr/bin/env python3
import pyxel
class App:
def __init__(self):
pyxel.init(160, 120, caption="test lol")
pyxel.load("assets/data.pyxres")
pyxel.run(self.update, self.draw)
def update(self):
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
def draw(self):
pyxel.cls(0)
pyxel.text(55, 41, "test lol", 15)
pyxel.blt(61, 66, 0, 0, 0, 38, 16)
if __name__ == "__main__":
App()
| 2.5 | 2 |
section4/video1/union.py | PacktPublishing/Mastering-Python-3.x-3rd-Edition | 6 | 12758571 | <filename>section4/video1/union.py<gh_stars>1-10
from typing import Tuple, Iterable, Callable, Union
Pair = Union[Tuple[int, int], Tuple[str, str]]
Single = Union[int, str]
def add(pair: Pair) -> Single:
return pair[0] + pair[1]
def even(a: Single) -> bool:
if isinstance(a, str):
return len(a) % 2 == 0
return a % 2 == 0
def map(func: Callable[[Pair], Single], objects: Iterable[Pair]) -> Iterable[Single]:
return [func(x) for x in objects]
def filter(
func: Callable[[Single], bool], objects: Iterable[Single]
) -> Iterable[Single]:
return [x for x in objects if func(x)]
if __name__ == "__main__":
print(filter(even, map(add, [(1, 2), (2, 2), (2, 1), (5, 1)])))
print(filter(even, map(add, [(1, 2), (2, 2), ("hello", "there"), (5, 1)])))
| 3.75 | 4 |
biLstm_with_chars.py | katchu11/Robustness-of-MT-DNNs | 3 | 12758572 | <reponame>katchu11/Robustness-of-MT-DNNs<gh_stars>1-10
import dynet as dy
class BiLSTM():
def build_model(self, nwords, nchars, ntags):
self.model = dy.Model()
trainer = dy.AdamTrainer(self.model)
EMB_SIZE = 64
HID_SIZE = 128
CHAR_EMB_SIZE = 32
CHAR_HID_SIZE = 32
self.W_emb = self.model.add_lookup_parameters((nwords, EMB_SIZE)) # Word embeddings
self.C_emb = self.model.add_lookup_parameters((nchars, CHAR_EMB_SIZE)) # Char embeddings
self.char_fwdLSTM = dy.VanillaLSTMBuilder(1, CHAR_EMB_SIZE, CHAR_HID_SIZE, self.model)
self.char_bwdLSTM = dy.VanillaLSTMBuilder(1, CHAR_EMB_SIZE, CHAR_HID_SIZE, self.model)
self.fwdLSTM = dy.VanillaLSTMBuilder(1, 2*CHAR_HID_SIZE + EMB_SIZE, HID_SIZE, self.model) # Forward RNN
self.bwdLSTM = dy.VanillaLSTMBuilder(1, 2*CHAR_HID_SIZE + EMB_SIZE, HID_SIZE, self.model) # Backward RNN
self.W_sm = self.model.add_parameters((ntags, 2 * HID_SIZE)) # Softmax weights
self.b_sm = self.model.add_parameters((ntags)) # Softmax bias
return trainer
def get_char_embeddings(self, word):
# word is a list a character indices
char_embs = [dy.lookup(self.C_emb, x) for x in word]
char_fwd_init = self.char_fwdLSTM.initial_state()
char_fwd_embs = char_fwd_init.transduce(char_embs)
char_bwd_init = self.char_bwdLSTM.initial_state()
char_bwd_embs = char_bwd_init.transduce(reversed(char_embs))
return dy.concatenate([char_fwd_embs[-1], char_bwd_embs[-1]])
# A function to calculate scores for one value
def calc_scores(self, words, chars):
dy.renew_cg()
word_embs = [dy.concatenate([dy.lookup(self.W_emb, words[x]), self.get_char_embeddings(chars[x])]) for x in range(len(words))]
fwd_init = self.fwdLSTM.initial_state()
fwd_embs = fwd_init.transduce(word_embs)
bwd_init = self.bwdLSTM.initial_state()
bwd_embs = bwd_init.transduce(reversed(word_embs))
W_sm_exp = dy.parameter(self.W_sm)
b_sm_exp = dy.parameter(self.b_sm)
return W_sm_exp * dy.concatenate([fwd_embs[-1], bwd_embs[-1]]) + b_sm_exp
def load(self, model_file):
self.model.populate(model_file)
return
def save(self, model_file):
self.model.save(model_file)
return
| 2.375 | 2 |
trajminer/datasets/__init__.py | ybj94/trajminer | 37 | 12758573 | <filename>trajminer/datasets/__init__.py
"""This module contains utilities for loading well-known trajectory datasets.
"""
from .base import load_brightkite_checkins
from .base import load_gowalla_checkins
from .base import load_foursquare_checkins
from .base import load_starkey_animals
__all__ = ['load_brightkite_checkins',
'load_gowalla_checkins',
'load_foursquare_checkins',
'load_starkey_animals']
| 1.296875 | 1 |
validator.py | maTORIx/flask_jwt_hcaptcha_sample | 0 | 12758574 | from flask_wtf.recaptcha.validators import Recaptcha, RECAPTCHA_ERROR_CODES
from flask import current_app, request
from wtforms import ValidationError
import urllib.parse
import urllib.request
import json
class Hcaptcha(Recaptcha):
def __call__(self, form, field):
if current_app.testing:
return True
if request.json:
response = request.json.get("h-captcha-response", "")
else:
response = request.form.get("h-captcha-response", "")
remote_ip = request.remote_addr
if not response:
raise ValidationError(field.gettext(self.message))
if not self._validate_recaptcha(response, remote_ip):
field.recaptcha_error = "incorrect-captcha-sol"
raise ValidationError(field.gettext(self.message))
def _validate_recaptcha(self, response, remote_addr):
"""Performs the actual validation."""
try:
private_key = current_app.config["RECAPTCHA_PRIVATE_KEY"]
except KeyError:
raise RuntimeError("No RECAPTCHA_PRIVATE_KEY config set") from None
verify_server = current_app.config.get("RECAPTCHA_VERIFY_SERVER")
if not verify_server:
raise ValidationError("No RECAPTCHA_VALIDATION_SERVER config set.")
data = urllib.parse.urlencode(
{"secret": private_key, "remoteip": remote_addr, "response": response}
).encode("utf-8")
http_response = urllib.request.urlopen(verify_server, data)
if http_response.code != 200:
return False
json_resp = json.loads(http_response.read())
if json_resp["success"]:
return True
for error in json_resp.get("error-codes", []):
if error in RECAPTCHA_ERROR_CODES:
raise ValidationError(RECAPTCHA_ERROR_CODES[error])
return False
| 2.953125 | 3 |
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/01-Gabarito/087.py | moacirsouza/nadas | 1 | 12758575 | print("""
087) Aprimore o desafio anterior, mostrando no final:
A) A soma de todos os valores somaDosPares digitados.
B) A soma dos valores da terceira coluna.
C) O maior valor da segunda linha.
""")
tamanhoDaMatriz = 3
matriz = []
saida = ''
somaDosPares = somaDosNumerosDaTerceiraColuna = 0
titulo = f' Usando números inteiros preencha \
a matriz {tamanhoDaMatriz}x{tamanhoDaMatriz} a seguir '
print('-'*len(titulo))
print(f'{titulo}')
print('-'*len(titulo))
for linha in range(tamanhoDaMatriz):
matriz.append([])
for coluna in range(tamanhoDaMatriz):
numero = int(input(f'Célula [{linha},{coluna}]: ').strip())
if numero%2 == 0:
somaDosPares += numero
if coluna == 2:
somaDosNumerosDaTerceiraColuna += numero
matriz[linha].append(numero)
saida += f'[ {matriz[linha][coluna]:^3} ]'
saida += '\n'
maiorValorDaSegundaLinha = max(matriz[1])
print('-'*len(titulo))
print(saida[:-1])
print('-'*len(titulo))
print(f'A) A soma dos valores pares é {somaDosPares}.')
print(f'B) A soma dos valores da terceira coluna é \
{somaDosNumerosDaTerceiraColuna}.')
print(f'C) O maior valor da segunda linha é {maiorValorDaSegundaLinha}.')
| 4.15625 | 4 |
app/plugins/plugin/plugin.py | proglang/servercodetest | 0 | 12758576 | import typing
from app.util import log as logging
from .executor import Executor
from .settings import Settings
from .request import Request
from .response import Response
from ..info import Info
class Plugin:
"""Base Plugin Class.
This class defines, which Executor, Settings, Request and Response class is used.
The Methods defined here should not be overwritten.
"""
Settings = Settings
Executor = Executor
Request = Request
Response = Response
def __init__(self, info: Info, path: str):
with logging.LogCall(__file__, "__init__", self.__class__):
self.info = info
self.path = path
self.logger = logging.PluginLogger(self.info.uid)
self.logger.debug("%s initialized!", self.__class__.__name__)
def execute(self, request: Request) -> Response:
with logging.LogCall(__file__, "execute", self.__class__):
res = self.Response()
try:
exec = self.Executor(self, request)
exec.execute()
res.error = exec.get_error() # pylint: disable=assignment-from-none
if res.error:
res.error_text = exec.get_error_text()
res.text = exec.get_text()
res.points = exec.get_points()
except Exception as e:
res.set_exception(e)
return res
| 2.359375 | 2 |
users/models.py | SDOS2020/Team_2_Mentoring_Platform | 0 | 12758577 | from django.db import models
from django.core import validators
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
class User(AbstractUser):
"""
Top most - for authentication purpose only
"""
is_admin = models.BooleanField(default=False)
class Admin(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
class Gender(models.IntegerChoices):
male = 1, _('Male')
female = 2, _('Female')
prefer_not_to_say = 3, _('Prefer not to say')
class Account(models.Model):
"""
The main class that stores all the common information for a mentor and a mentee
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
age = models.IntegerField(
null=True,
validators=[
validators.MinValueValidator(16),
validators.MaxValueValidator(100),
],
default=20
)
gender = models.IntegerField(choices=Gender.choices, default=Gender.choices[-1][0])
mobile = models.CharField(
max_length=10,
null=True,
validators=[
validators.MinLengthValidator(10),
]
)
introduction = models.TextField(max_length=512, null=True)
# education = models.TextField(max_length=512, null=True)
# research_experience = models.TextField(max_length=512, null=True)
expertise = models.TextField(max_length=512, null=True)
social_handle = models.URLField(null=True, help_text="Link to your personal website/LinkedIn profile")
rating = models.DecimalField(
null=True,
max_digits=3,
decimal_places=1,
validators=[
validators.MinValueValidator(0.0),
validators.MaxValueValidator(5.0),
]
)
is_mentor = models.BooleanField(default=False)
is_mentee = models.BooleanField(default=False)
def __str__(self):
return self.user.username
class AccountEducation(models.Model):
"""
Stores the education fields of accounts
"""
account = models.ForeignKey(Account, on_delete=models.CASCADE)
qualification = models.CharField(max_length=128)
start_date = models.DateField()
end_date = models.DateField()
organization = models.CharField(max_length=128)
detail = models.TextField(max_length=512, null=True)
def __str__(self):
return self.account.user.username
class AccountResearchExperience(models.Model):
"""
Stores the research experience of accounts
"""
account = models.ForeignKey(Account, on_delete=models.CASCADE)
position = models.CharField(max_length=128)
start_date = models.DateField()
end_date = models.DateField()
organization = models.CharField(max_length=128)
detail = models.TextField(max_length=512, null=True)
def __str__(self):
return self.account.user.username
class Mentor(models.Model):
"""
The mentor class, stores attributes specific to a mentor
"""
account = models.OneToOneField(Account, on_delete=models.CASCADE)
mentorship_duration = models.IntegerField(
default=6,
validators=[
validators.MinValueValidator(1),
validators.MaxValueValidator(24),
]
)
mentee_group_size = models.IntegerField(
default=1,
validators=[
validators.MinValueValidator(1),
]
)
verified = models.BooleanField(default=False)
is_open_to_mentorship = models.BooleanField(default=True)
will_mentor_faculty = models.BooleanField(default=False)
will_mentor_phd = models.BooleanField(default=False)
will_mentor_mtech = models.BooleanField(default=False)
will_mentor_btech = models.BooleanField(default=False)
# Responsibilities
responsibility1 = models.BooleanField(default=False)
responsibility2 = models.BooleanField(default=False)
responsibility3 = models.BooleanField(default=False)
responsibility4 = models.BooleanField(default=False)
responsibility5 = models.BooleanField(default=False)
responsibility6 = models.BooleanField(default=False)
responsibility7 = models.BooleanField(default=False)
responsibility8 = models.BooleanField(default=False)
other_responsibility = models.TextField(null=True, blank=True, max_length=512)
def __str__(self):
return self.account.user.username
class Mentee(models.Model):
"""
The mentee class, stores attributes specific to a mentee
"""
account = models.OneToOneField(Account, on_delete=models.CASCADE)
needs_mentoring = models.BooleanField(default=True)
needs_urgent_mentoring = models.BooleanField(default=False)
topics = models.TextField(max_length=512, null=True)
def __str__(self):
return self.account.user.username
class MyMentee(models.Model):
"""
Stores the mentees assigned to a mentor, you can get the mentees assigned to a mentor by
querying, MyMentee.objects.filter(mentor='current-mentor')
"""
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
def __str__(self):
return self.mentor.account.user.username + ' -> ' + self.mentee.account.user.username
class MyMentor(models.Model):
"""
For performance gains
Stores the mentors assigned to a mentee, you can get the mentors assigned to a mentee by
querying, MyMentor.objects.filter(mentee='current-mentee')
"""
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
def __str__(self):
return self.mentee.account.user.username + ' -> ' + self.mentor.account.user.username
class MenteeSentRequest(models.Model):
"""
For a mentee to view mentorship requests
"""
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
def __str__(self):
return self.mentee.account.user.username + ' -> ' + self.mentor.account.user.username
class MenteeRoles(models.IntegerChoices):
"""
The different type of users that can exist. These types are accessed in the
types of mentee a mentor needs, and also the types of mentor a mentee needs.
"""
faculty = 1, _('Faculty')
developer = 2, _('Industry Researcher')
undergraduate = 3, _('BTech')
graduate = 4, _('MTech')
post_graduate = 5, _('PhD')
class MentorRoles(models.IntegerChoices):
faculty = 1, _('Faculty')
developer = 2, _('Industry Researcher')
class Fields(models.IntegerChoices):
"""
The different fields of users that can exist
"""
computer_science = 1, _('Computer Science and Engineering')
electronics_and_communication = 2, _('Electronics and Communication Engineering')
computer_science_and_design = 3, _('Computer Science and Design')
computer_science_and_mathematics = 4, _('Computer Science and Mathematics')
computer_science_and_social_sciences = 5, _('Computer Science and Social Sciences')
computer_science_and_artificial_intelligence = 6, _('Computer Science and Artificial Intelligence')
class MentorRoleField(models.Model):
"""
Stores the mentors qualifications, their role (current / past), their fields(current / past)
"""
mentor = models.OneToOneField(Mentor, on_delete=models.CASCADE)
role = models.IntegerField(choices=MentorRoles.choices, null=True)
field = models.IntegerField(choices=Fields.choices, null=True)
def __str__(self):
return "{} -> {} -> {}".format(self.mentor.account.user.username, self.get_role_display(), self.get_field_display())
class MenteeRoleField(models.Model):
"""
Stores the mentees qualifications, their role (current / past), their fields (current / past)
"""
mentee = models.OneToOneField(Mentee, on_delete=models.CASCADE)
role = models.IntegerField(choices=MenteeRoles.choices, null=True)
field = models.IntegerField(choices=Fields.choices, null=True)
def __str__(self):
return "{} -> {} -> {}".format(self.mentee.account.user.username, self.get_role_display(), self.get_field_display())
class MentorExpectedRoleField(models.Model):
"""
Stores what the mentors expect from mentees in terms of their
qualifications, their role (current / past), their fields(current / past)
"""
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
role = models.IntegerField(choices=MentorRoles.choices, null=True)
field = models.IntegerField(choices=Fields.choices, null=True)
def __str__(self):
return self.mentor.account.user.username + ' -> ' + self.get_role_display() + ' -> ' + self.get_field_display()
class MenteeExpectedRoleField(models.Model):
"""
Stores what the mentees expect from mentors in terms of their
qualifications, their role (current / past), their fields (current / past)
NOTE: this might be deleted later on...
"""
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
role = models.IntegerField(choices=MenteeRoles.choices, null=True)
field = models.IntegerField(choices=Fields.choices, null=True)
def __str__(self):
return self.mentee.account.user.username + ' -> ' + self.get_role_display() + ' -> ' + self.get_field_display()
class Message(models.Model):
"""
Table to store the chat messages among users.
"""
sender = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='message_sender')
receiver = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='message_receiver')
content = models.TextField(max_length=512, null=True)
time_posted = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.sender.user.username + ' messaged ' + self.receiver.user.username
class Meeting(models.Model):
creator = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='meeting_creator')
guest = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='meeting_guest')
title = models.CharField(max_length=64, default="Untitled Meeting")
agenda = models.CharField(max_length=128, default="")
time = models.DateTimeField(auto_now_add=False)
meeting_url = models.CharField(max_length=128, default="https://www.meet.google.com")
def __str__(self):
return self.creator.user.username + ' created a meeting with ' + self.guest.user.username
class MentorResponsibility(models.IntegerChoices):
"""
Reference: Mail/Github Issue
"""
responsibility1 = 1, _('Listen to research proposals/initial research and give suggestions for improvement')
responsibility2 = 2, _('Read papers written (the final version which the author wants to submit) and give inputs')
responsibility3 = 3, _('Guide in literature reading')
responsibility4 = 4, _('Help in understanding difficult concepts, discussing some papers/results')
responsibility5 = 5, _('Guidance on where to submit a research paper')
responsibility6 = 6, _('Guidance on the proper conduct of research and literature review')
responsibility7 = 7, _('Review and comment on the resume')
responsibility8 = 8, _('Guide on postdoc and other research job possibilities')
class Areas(models.IntegerChoices):
'''
Reference: http://csrankings.org/
'''
algorithms_and_complexity = 1, _('Algorithms and Complexity')
artificial_intelligence = 2, _('Artificial Intelligence')
computational_bio_and_bioinformatics = 3, _('Computational Bio and Bioinformatics')
computer_architecture = 4, _('Computer Architecture')
computer_graphics = 5, _('Computer Graphics')
computer_networks = 6, _('Computer Networks')
computer_security = 7, _('Computer Security')
computer_vision = 8, _('Computer Vision')
cryptography = 9, _('Cryptography')
databases = 10, _('Databases')
design_automation = 11, _('Design Automation')
economics_and_computation = 12, _('Economics and Computation')
embedded_and_real_time_systems = 13, _('Embedded and Real-Time Systems')
high_performance_computing = 14, _('High-Performance Computing')
human_computer_interaction = 15, _('Human-Computer Interaction')
logic_and_verification = 16, _('Logic and Verification')
machine_learning_and_data_mining = 17, _('Machine Learning and Data Mining')
measurement_and_performance_analysis = 18, _('Measurement and Performance Analysis')
mobile_computing = 19, _('Mobile Computing')
natural_language_processing = 20, _('Natural Language Processing')
operating_systems = 21, _('Operating Systems')
programming_languages = 22, _('Programming Languages')
robotics = 23, _('Robotics')
software_engineering = 24, _('Software Engineering')
the_web_and_information_retrieval = 25, _('The Web and Information Retrieval')
visualization = 26, _('Visualization')
class MentorArea(models.Model):
mentor = models.OneToOneField(Mentor, on_delete=models.CASCADE)
area = models.IntegerField(choices=Areas.choices, null=True)
subarea = models.CharField(max_length=64, null=True)
def __str__(self):
return "{} of area {}".format(self.mentor.account.user.username, self.get_area_display())
class MentorshipRequestMessage(models.Model):
"""
Store the SOP, commitment, expectations of the mentee which is sent to the mentor at the time of requesting for
mentorship
"""
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
sop = models.TextField(max_length=512, null=True)
expectations = models.TextField(max_length=256, null=True)
commitment = models.TextField(max_length=256, null=True)
def __str__(self):
return "{} sent a request to {}".format(
self.mentee.account.user.username, self.mentor.account.user.username)
class MeetingSummary(models.Model):
"""
Store:
1. Meeting date
2. Meeting length (in hours)
3. Meeting agenda
4. Meeting todos (action items)
5. Next meeting date (tentative)
6. Next meeting agenda
"""
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
meeting_date = models.DateTimeField(auto_now_add=False)
meeting_length = models.FloatField()
meeting_details = models.TextField(max_length=512)
meeting_todos = models.TextField(max_length=512, null=True)
next_meeting_date = models.DateTimeField(auto_now_add=False)
next_meeting_agenda = models.TextField(max_length=512)
def __str__(self):
return "Meeting held at {} of length {} hours".format(
self.meeting_date, self.meeting_length)
class Milestone(models.Model):
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
content = models.TextField(max_length=512)
timestamp = models.DateField(null=True, blank=True)
def __str__(self):
return f'Mentor: {self.mentor}, Mentee: {self.mentee}'
class DeletedMentorMenteeRelation(models.Model):
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
end_reason = models.TextField(max_length=512)
date_ended = models.DateTimeField(default=timezone.now)
def __str__(self):
return f'[ENDED] Mentor: {self.mentor}, Mentee: {self.mentee}'
class RejectedMentorshipRequest(models.Model):
mentor = models.ForeignKey(Mentor, on_delete=models.CASCADE)
mentee = models.ForeignKey(Mentee, on_delete=models.CASCADE)
reject_reason = models.TextField(max_length=512)
date_rejected = models.DateTimeField(default=timezone.now)
def __str__(self):
return f'[REJECTED] Mentor: {self.mentor}, Mentee: {self.mentee}'
| 2.421875 | 2 |
update/ldap/update-ldap-dovecot-2.3.py | Igortorrente/iRedMail | 1 | 12758578 | #!/usr/bin/env python3
# Author: <NAME> <zhb _at_ iredmail.org>
# Purpose: Add missing attribute/value pairs required by Dovecot-2.3.
# Date: Apr 12, 2018.
import ldap
# Note:
# * bind_dn must have write privilege on LDAP server.
uri = 'ldap://127.0.0.1:389'
basedn = 'o=domains,dc=example,dc=com'
bind_dn = 'cn=Manager,dc=example,dc=com'
bind_pw = 'password'
# Initialize LDAP connection.
print("* Connecting to LDAP server: {}".format(uri))
conn = ldap.initialize(uri=uri, trace_level=0,)
conn.bind_s(bind_dn, bind_pw)
# Get all mail users.
print("* Get mail accounts ...")
allUsers = conn.search_s(
basedn,
ldap.SCOPE_SUBTREE,
"(&(objectClass=mailUser)(|(enabledService=imapsecured)(enabledService=pop3secured)(enabledService=smtpsecured)(enabledService=sievesecured)(enabledService=managesievesecured)))",
['mail', 'enabledService'],
)
total = len(allUsers)
print("* Updating {} user(s).".format(total))
# Counter.
count = 1
for (dn, entry) in allUsers:
mail = entry['mail'][0]
if 'enabledService' not in entry:
continue
enabledService = entry['enabledService']
_update = False
# If old service is disabled for the user, then no need to add the new one.
for old, new in [(b'imapsecured', b'imaptls'),
(b'pop3secured', b'pop3tls'),
(b'smtpsecured', b'smtptls'),
(b'sievesecured', b'sievetls')]:
if (old in enabledService) and (new not in enabledService):
enabledService.append(new)
_update = True
if _update:
print("* ({} of {}) Updating user: {}".format(count, total, mail))
mod_attr = [(ldap.MOD_REPLACE, 'enabledService', enabledService)]
try:
conn.modify_s(dn, mod_attr)
except Exception as e:
print("Error while updating user {}: {}".format(mail, repr(e)))
else:
print("* [SKIP] No update required for user: {}".format(mail))
count += 1
# Unbind connection.
print("* Unbind LDAP server.")
conn.unbind()
print("* Update completed.")
| 1.757813 | 2 |
app/api/v1/others_workload.py | run-nerver/performance-management-backend | 20 | 12758579 | <filename>app/api/v1/others_workload.py<gh_stars>10-100
import os, time
import xlrd
from flask import Blueprint, request, current_app
from app.models.others_workload import OthersWorkload
from app import db
from app.models.setting import Settings
from app.models.user import User
from app.utils.others import others_to_dict, others_to_dict_year
from app.utils.utils import float_to_decimal
from app.utils.token import auth
others_workload_bp = Blueprint('others_workload_bp', __name__)
today = time.strftime("%Y-%m-%d", time.localtime())
# 获取用户其他工作量
@others_workload_bp.route('/others_workload/fetchUserInfo', methods=['GET'])
@auth.login_required
def fetch_UserInfo():
param = []
data = request.args
if ('name' in data) and data['name']:
param.append(User.name.like('%' + data['name'] + '%'))
if ('jobCatecory' in data) and data['jobCatecory']:
param.append(User.job_catecory == data['jobCatecory'])
if ('teacherTitle' in data) and data['teacherTitle']:
param.append(User.teacher_title == data['teacherTitle'])
infos = User.query.filter(*param) \
.paginate(int(data['page']), int(data['limit']))
# 传入year,返回对应年份的工作量
res = others_to_dict_year(infos.items, data['year'])
return {
"code": 20000,
"data": {
"total": infos.total,
"items": res
}
}
# 批量添加其他工作量
@others_workload_bp.route('/others_workload/UploadExcelOtherWorkload', methods=['POST'])
@auth.login_required
def upload_excel_create_user():
year = request.args.get('year')
file = request.files
upload_excel = file.get('file')
upload_excel.save(os.path.join(current_app.config['CONFIG_UPLOAD_PATH'],
today + '-' + upload_excel.filename))
excel_file = os.path.join(os.path.join(current_app.config['CONFIG_UPLOAD_PATH'],
today + '-' + upload_excel.filename))
excel_file = xlrd.open_workbook(excel_file)
tem_money = Settings.query.filter_by(name='工作量金额').first()
sheet = excel_file.sheet_by_name("Sheet1")
for r in range(1, sheet.nrows):
work_number = int(sheet.cell(r, 0).value)
attendances = sheet.cell(r, 1).value if sheet.cell(r, 1).value else 0
union_activities = sheet.cell(r, 2).value if sheet.cell(r, 2).value else 0
ideological = sheet.cell(r, 3).value if sheet.cell(r, 3).value else 0
news = sheet.cell(r, 4).value if sheet.cell(r, 4).value else 0
counselors = sheet.cell(r, 5).value if sheet.cell(r, 5).value else 0
characteristics_activities = sheet.cell(r, 6).value if sheet.cell(r, 6).value else 0
mini_professional = sheet.cell(r, 7).value if sheet.cell(r, 7).value else 0
information = sheet.cell(r, 8).value if sheet.cell(r, 8).value else 0
undergraduatecolleges = sheet.cell(r, 9).value if sheet.cell(r, 9).value else 0
graduation_design_manage = sheet.cell(r, 10).value if sheet.cell(r, 10).value else 0
course_quality = sheet.cell(r, 11).value if sheet.cell(r, 11).value else 0
organization = sheet.cell(r, 12).value if sheet.cell(r, 12).value else 0
graduation_design_personal = sheet.cell(r, 13).value if sheet.cell(r, 13).value else 0
professional_tab = sheet.cell(r, 14).value if sheet.cell(r, 14).value else 0
mentor = sheet.cell(r, 15).value if sheet.cell(r, 15).value else 0
discipline_competition = sheet.cell(r, 16).value if sheet.cell(r, 16).value else 0
teaching_watch = sheet.cell(r, 17).value if sheet.cell(r, 17).value else 0
competition_judges = sheet.cell(r, 18).value if sheet.cell(r, 18).value else 0
union_work = sheet.cell(r, 19).value if sheet.cell(r, 19).value else 0
extra_score = sheet.cell(r, 20).value if sheet.cell(r, 20).value else 0
total_score = attendances + union_activities + ideological + news + counselors \
+ characteristics_activities + mini_professional + information + undergraduatecolleges \
+ graduation_design_manage + course_quality + organization + graduation_design_personal \
+ professional_tab + mentor + discipline_competition + teaching_watch + competition_judges \
+ union_work + extra_score
total_money = float_to_decimal(total_score) * tem_money.coefficient
info = User.query.filter_by(work_number=work_number).first()
if info:
if info.others_workload_user:
info.others_workload_user.attendances = attendances,
info.others_workload_user.union_activities = union_activities,
info.others_workload_user.ideological = ideological,
info.others_workload_user.news = news,
info.others_workload_user.counselors = counselors,
info.others_workload_user.characteristics_activities = characteristics_activities,
info.others_workload_user.mini_professional = mini_professional,
info.others_workload_user.information = information,
info.others_workload_user.undergraduatecolleges = undergraduatecolleges,
info.others_workload_user.graduation_design_manage = graduation_design_manage,
info.others_workload_user.course_quality = course_quality,
info.others_workload_user.organization = organization,
info.others_workload_user.graduation_design_personal = graduation_design_personal,
info.others_workload_user.professional_tab = professional_tab,
info.others_workload_user.discipline_competition = discipline_competition,
info.others_workload_user.teaching_watch = teaching_watch,
info.others_workload_user.competition_judges = competition_judges,
info.others_workload_user.union_work = union_work,
info.others_workload_user.extra_score = extra_score,
info.others_workload_user.year = year,
info.others_workload_user.total_score = total_score,
info.others_workload_user.total_money = total_money
else:
add_info = OthersWorkload(
attendances=attendances,
union_activities=union_activities,
ideological=ideological,
news=news,
counselors=counselors,
characteristics_activities=characteristics_activities,
mini_professional=mini_professional,
information=information,
undergraduatecolleges=undergraduatecolleges,
graduation_design_manage=graduation_design_manage,
course_quality=course_quality,
organization=organization,
graduation_design_personal=graduation_design_personal,
professional_tab=professional_tab,
mentor=mentor,
discipline_competition=discipline_competition,
teaching_watch=teaching_watch,
competition_judges=competition_judges,
union_work=union_work,
extra_score=extra_score,
year=year,
user_id=info.id,
total_score=total_score,
total_money=total_money
)
db.session.add(add_info)
db.session.commit()
infos = OthersWorkload.query.paginate(1, 10)
res = others_to_dict(infos.items)
return {
"code": 20000,
"data": {
"total": infos.total,
"items": res
}
}
# 更新用户其他工作量
@others_workload_bp.route('/others_workload/updateOthersWorkload', methods=['POST'])
@auth.login_required
def update_others_workload():
data = request.get_json()
id = data['oId']
total_money = data['totalMoney']
notes = data['notes']
info = OthersWorkload.query.filter_by(id=id).first()
info.total_money = total_money
info.notes = notes
db.session.commit()
res = info.to_json()
return {
"code": 20000,
"data": res
} | 2.3125 | 2 |
explorer/mqtt_demos/1_mqtt_receiver_demo.py | JBerny/astroplant_explorer | 0 | 12758580 | """
Demo/test program for the MQTT utilities.
See https://github.com/sensemakersamsterdam/astroplant_explorer
"""
# (c) Sensemakersams.org and others. See https://github.com/sensemakersamsterdam/astroplant_explorer
# Author: <NAME>
#
##
# H O W T O U S E
#
# Edit configuration.json and pick a nice 'ae_id' for yourself.
#
# Now start a terminal window #1 on your Pi and run:
# python 1_mqtt_receiver_demo.py
# To monitor MQTT traffic open a second terminal window #2 and run:
# mosquitto_sub -v -t "#"
# Then open a terminal window #3 and run:
# python 1_mqtt_sender_demo.py
# This should get things starting. You can run rhe 1_mqtt_sender_demo.py
# repeatedly. The 1_mqtt_receiver_demo and mosquitto_sub will show the
# messages each time you run it.
# And if you want to send the stop-request to the 1_mqtt_receiver_demo.py, run
# python 1_mqtt_stop_demo.py
# in terminal window #3.
# The mosquitto_sub in terminal #2 you can abort with control-c.
###
# Warning: if import of ae_* module(s) fails, then you need to set up PYTHONPATH.
# To test start python, import sys and type sys.path. The ae 'lib' directory
# should be included in the printed path
# From the standard time library we now import the function sleep()
from time import sleep
# From the mqtt library we import the AE_Local_MQTT class which contains a bunch
# of functions we will use in this script
from ae_util.mqtt import AE_Local_MQTT
# Here we initialize our local MQTT agent.
# It imports your MQTT settings automatically from the configuration.json file.
loc_mqtt = AE_Local_MQTT()
# And now we activate the MQTT connection.
loc_mqtt.setup()
# Further down this program loops, doing the same code over and over again, until
# we set the following global variable to 'True'
stop = False
# Now we define a so-called call-back function. This fuction is automatically
# executed when 'something' happens. What 'something' is in this case comes
# further down. Here we just define that we do a print when 'something' happens.
def cb1(sub_topic, payload, rec_time):
print('call_back 1:', sub_topic, payload, rec_time)
# And here we have more of the same. It will be executed when 'another something'
# happens. And the print out is also a wee bit different.
def cb2(sub_topic, payload, rec_time):
print('call_back 2:', sub_topic, payload, rec_time)
# And here in number three. It will be called when 'something #3' happens.
# But it is different than the ones before. It actually does something.
# It sets the variable 'stop' to 'True'. Look again at the explanation
# a couple of lines higher when we initialized the 'stop' variable.
# what do you think that will happen when this function runs?
def cb_stop(sub_topic, payload, rec_time):
global stop
print('Received stop request. 1_mqtt_receiver_demo bailing out!')
stop = True
# In this script we want to recaive MQTT messages. So we need to tell MQTT what
# we want it to send to us. We do this by subscribing to so called 'topics'
# The topic '#' is special. It just means everything. Aren't we greedy?
# We also tell MQTT to stash the incoming messages for us for later pick-up.
loc_mqtt.subscribe('#', None) # All messages ques without callback
# ANd here we will do another subscription. This time the topic needs to start
# with 'aap/'. Remember that '#' means anything, so we subscribe to 'aap/one'
# and 'aap/two' and indefinately more.
# and this time we also tell mqtt to run the function 'cb1' when we actually
# get a message with a topic that starts with 'aap/'.
# So (please read the coment back where cb1() was defined), the 'something'
# for 'cb1()' is nothing other than recieving a message with a topic that starts
# with 'aap/'.
loc_mqtt.subscribe('aap/#', cb1)
# And the 'another something' we need to happen for 'cb2()' to run is nothing more
# than receiving a message with a topic starting with 'aap/noot/'.
# But hey, 'aap/noot' also starts with 'aap/'. And this is will trigger the 'cb1()'
# call back too. So if I send 'aap/noot/yes', then both cb1() and cb2() will be
# run. But if I send 'aap/hello', then only cb1() will run.
loc_mqtt.subscribe('aap/noot/#', cb2)
# And now the 3rd one for the 'control/stop' topic. When we get exactly this one,
# we will run the 'cb_stop()' call-back. Which will .....
loc_mqtt.subscribe('control/stop', cb_stop)
# Finally our main loop. Which will run until 'stop' will be set to
# true, or alternatively when we do a manual abort with contol-c
print('Abort with control-c to end prematurely.')
try:
while not stop:
# Remember that we also did a subscription to '#', meaning
# everything. And without a call-back. Which means that MQTT
# will stash incoming messages for later pick-up?
# Well, in the line below we check and get the oldest
# message in the stash, and -if found- print its content.
sub_topic, payload, rec_time = loc_mqtt.get_message()
if sub_topic is not None:
print('Dequeued:', sub_topic, payload, rec_time)
sleep(0.1)
except KeyboardInterrupt:
print('\nManually aborted....\nBye bye')
| 2.953125 | 3 |
rb/processings/pipeline/svr.py | readerbench/ReaderBench | 1 | 12758581 | from rb.processings.pipeline.estimator import Regressor
from rb.processings.pipeline.dataset import Dataset, Task
from typing import List, Dict
from sklearn import svm
class SVR(Regressor):
def __init__(self, dataset: Dataset, tasks: List[Task], params: Dict[str, str]):
super().__init__(dataset, tasks, params)
self.model = svm.SVR(gamma='scale', kernel=params["kernel"], degree=params["degree"])
self.kernel = params["kernel"]
self.degree = params["degree"]
@classmethod
def parameters(cls):
return {
"kernel": ["rbf", "poly", "sigmoid"],
"degree": [2,3,4,5],
}
@classmethod
def valid_config(cls, config):
return config["kernel"] == "poly" or config["degree"] == 3
def __str__(self):
return f"SVR - {self.kernel}" + (f"({self.degree})" if self.kernel == "poly" else "") | 2.34375 | 2 |
tests/test_node_wrapping.py | StevenCostiou/reflectivipy | 10 | 12758582 | import pytest
import ast
from .ReflectivityExample import *
import reflectivipy
from reflectivipy import MetaLink
@pytest.fixture(autouse=True)
def setup():
reflectivipy.uninstall_all()
def test_wrap_expr():
node = expr_sample_node()
assert type(node) is ast.Expr
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 1
assert transformation[0] is node
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.links.append(link)
assert type(node) is ast.Expr
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 1
assert transformation[0] is node
def test_wrap_call():
node = call_sample_node().value
assert type(node) is ast.Call
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 1
assert transformation[0] is node
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.links.append(link)
assert type(node) is ast.Call
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 4
assert type(transformation[0]) is ast.Assign
assert transformation[0].value is node.args[0]
assert transformation[0] is not node
assert len(transformation[3].value.args) == 1
assert type(transformation[3].value.args[0]) is ast.Name
assert transformation[3].value.args[0].id is node.args[0].temp_name
assert transformation[3].value.func.value.id is node.func.value.temp_name
def test_wrap_call_in_assign():
node = method_with_args_sample_node().body[0].body[0]
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.value.links.append(link)
assert type(node) is ast.Assign
assert type(node.value) is ast.Call
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 5
assert type(transformation[0]) == ast.Assign
assert transformation[0].value is node.value.args[0]
assert type(transformation[1]) is ast.Assign
assert transformation[1].value.id == 'self'
assert transformation[0] is not node
assert len(transformation[3].value.args) == 1
assert type(transformation[3].value.args[0]) is ast.Name
assert transformation[3].value.args[0].id is node.value.args[0].temp_name
assert transformation[3].value.func.value.id is node.value.func.value.temp_name
def test_wrap_complex_expr_call():
node = complex_expr_call_sample_node()
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.value.args[0].links.append(link)
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 6
assert type(transformation[3]) is ast.Assign
assert transformation[3].value.rf_id is node.value.args[0].rf_id
assert transformation[3] is not node
def test_call_receiver_flattening():
node = call_with_complex_receiver_sample_node()
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.value.links.append(link)
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 4
assert transformation[1].value.func.value.id == 'self'
assert transformation[3].value.func.value.id == transformation[1].targets[0].id
def test_call_flattening():
node = call_with_complex_receiver_sample_node()
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.value.func.value.links.append(link)
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 5
assert transformation[1].value.id == 'self'
assert transformation[3].value.func.value.id == transformation[1].targets[0].id
assert transformation[4].value.func.value.id == transformation[3].targets[0].id
def test_wrap_assign():
node = sample_node()
assert type(node) is ast.Assign
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 1
assert transformation[0] is node
link = MetaLink(ReflectivityExample(), 'tag_exec_', 'before', [])
node.links.append(link)
assert type(node) is ast.Assign
transformation = node.wrapper.flat_wrap()
assert len(transformation) == 3
assert type(transformation[0]) is ast.Assign
assert transformation[0].value is node.value
assert transformation[0] is not node
def test_flatten_children():
pass
| 2.171875 | 2 |
ivadomed/__init__.py | AshkanTaghipour/ivadomed | 87 | 12758583 | from .utils import __version__, __ivadomed_dir__
| 1.101563 | 1 |
observations/r/bmw.py | hajime9652/observations | 199 | 12758584 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def bmw(path):
"""Daily Log Returns on BMW Share Price
These data are the daily log returns on BMW share price from Tuesday 2nd
January 1973 until Tuesday 23rd July 1996. The data are contained in a
numeric vector. The dates of each observation are contained in a
`times` attribute, which is an object of class `"POSIXct"` (see
`DateTimeClasses`). Note that these data form an irregular time series
because no trading takes place at the weekend.
A numeric vector containing 6146 observations, with a `times`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bmw.csv`.
Returns:
Tuple of np.ndarray `x_train` with 6146 rows and 1 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'bmw.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/evir/bmw.csv'
maybe_download_and_extract(path, url,
save_file_name='bmw.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 3.265625 | 3 |
src/commands/ChangeModeCommand.py | UCHH57/BufferDestroyer69 | 0 | 12758585 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from Command import Command
from Option import Mode
class ChangeModeCommand(Command):
command = "cm"
usage = "[piped | arg]"
help = "\tChange binary execution mode.\n\targ:\t./vulnBin payload\n\tpiped:\techo payload | ./vulnBin"
def set_option(self, option, cmd):
if cmd[1] == "piped":
option.execMode = Mode.PIPED
elif cmd[1] == "arg":
option.execMode = Mode.ARG
| 2.78125 | 3 |
general/add-watermark-pdf/pdf_watermarker.py | caesarcc/python-code-tutorials | 1,059 | 12758586 | from PyPDF4 import PdfFileReader, PdfFileWriter
from PyPDF4.pdf import ContentStream
from PyPDF4.generic import TextStringObject, NameObject
from PyPDF4.utils import b_
import os
import argparse
from io import BytesIO
from typing import Tuple
# Import the reportlab library
from reportlab.pdfgen import canvas
# The size of the page supposedly A4
from reportlab.lib.pagesizes import A4
# The color of the watermark
from reportlab.lib import colors
PAGESIZE = A4
FONTNAME = 'Helvetica-Bold'
FONTSIZE = 40
# using colors module
# COLOR = colors.lightgrey
# or simply RGB
# COLOR = (190, 190, 190)
COLOR = colors.red
# The position attributes of the watermark
X = 250
Y = 10
# The rotation angle in order to display the watermark diagonally if needed
ROTATION_ANGLE = 45
def get_info(input_file: str):
"""
Extracting the file info
"""
# If PDF is encrypted the file metadata cannot be extracted
with open(input_file, 'rb') as pdf_file:
pdf_reader = PdfFileReader(pdf_file, strict=False)
output = {
"File": input_file, "Encrypted": ("True" if pdf_reader.isEncrypted else "False")
}
if not pdf_reader.isEncrypted:
info = pdf_reader.getDocumentInfo()
num_pages = pdf_reader.getNumPages()
output["Author"] = info.author
output["Creator"] = info.creator
output["Producer"] = info.producer
output["Subject"] = info.subject
output["Title"] = info.title
output["Number of pages"] = num_pages
# To Display collected metadata
print("## File Information ##################################################")
print("\n".join("{}:{}".format(i, j) for i, j in output.items()))
print("######################################################################")
return True, output
def get_output_file(input_file: str, output_file: str):
"""
Check whether a temporary output file is needed or not
"""
input_path = os.path.dirname(input_file)
input_filename = os.path.basename(input_file)
# If output file is empty -> generate a temporary output file
# If output file is equal to input_file -> generate a temporary output file
if not output_file or input_file == output_file:
tmp_file = os.path.join(input_path, 'tmp_' + input_filename)
return True, tmp_file
return False, output_file
def create_watermark(wm_text: str):
"""
Creates a watermark template.
"""
if wm_text:
# Generate the output to a memory buffer
output_buffer = BytesIO()
# Default Page Size = A4
c = canvas.Canvas(output_buffer, pagesize=PAGESIZE)
# you can also add image instead of text
# c.drawImage("logo.png", X, Y, 160, 160)
# Set the size and type of the font
c.setFont(FONTNAME, FONTSIZE)
# Set the color
if isinstance(COLOR, tuple):
color = (c/255 for c in COLOR)
c.setFillColorRGB(*color)
else:
c.setFillColor(COLOR)
# Rotate according to the configured parameter
c.rotate(ROTATION_ANGLE)
# Position according to the configured parameter
c.drawString(X, Y, wm_text)
c.save()
return True, output_buffer
return False, None
def save_watermark(wm_buffer, output_file):
"""
Saves the generated watermark template to disk
"""
with open(output_file, mode='wb') as f:
f.write(wm_buffer.getbuffer())
f.close()
return True
def watermark_pdf(input_file: str, wm_text: str, pages: Tuple = None):
"""
Adds watermark to a pdf file.
"""
result, wm_buffer = create_watermark(wm_text)
if result:
wm_reader = PdfFileReader(wm_buffer)
pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False)
pdf_writer = PdfFileWriter()
try:
for page in range(pdf_reader.getNumPages()):
# If required to watermark specific pages not all the document pages
if pages:
if str(page) not in pages:
continue
page = pdf_reader.getPage(page)
page.mergePage(wm_reader.getPage(0))
pdf_writer.addPage(page)
except Exception as e:
print("Exception = ", e)
return False, None, None
return True, pdf_reader, pdf_writer
def unwatermark_pdf(input_file: str, wm_text: str, pages: Tuple = None):
"""
Removes watermark from the pdf file.
"""
pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False)
pdf_writer = PdfFileWriter()
for page in range(pdf_reader.getNumPages()):
# If required for specific pages
if pages:
if str(page) not in pages:
continue
page = pdf_reader.getPage(page)
# Get the page content
content_object = page["/Contents"].getObject()
content = ContentStream(content_object, pdf_reader)
# Loop through all the elements page elements
for operands, operator in content.operations:
# Checks the TJ operator and replaces the corresponding string operand (Watermark text) with ''
if operator == b_("Tj"):
text = operands[0]
if isinstance(text, str) and text.startswith(wm_text):
operands[0] = TextStringObject('')
page.__setitem__(NameObject('/Contents'), content)
pdf_writer.addPage(page)
return True, pdf_reader, pdf_writer
def watermark_unwatermark_file(**kwargs):
input_file = kwargs.get('input_file')
wm_text = kwargs.get('wm_text')
# watermark -> Watermark
# unwatermark -> Unwatermark
action = kwargs.get('action')
# HDD -> Temporary files are saved on the Hard Disk Drive and then deleted
# RAM -> Temporary files are saved in memory and then deleted.
mode = kwargs.get('mode')
pages = kwargs.get('pages')
temporary, output_file = get_output_file(
input_file, kwargs.get('output_file'))
if action == "watermark":
result, pdf_reader, pdf_writer = watermark_pdf(
input_file=input_file, wm_text=wm_text, pages=pages)
elif action == "unwatermark":
result, pdf_reader, pdf_writer = unwatermark_pdf(
input_file=input_file, wm_text=wm_text, pages=pages)
# Completed successfully
if result:
# Generate to memory
if mode == "RAM":
output_buffer = BytesIO()
pdf_writer.write(output_buffer)
pdf_reader.stream.close()
# No need to create a temporary file in RAM Mode
if temporary:
output_file = input_file
with open(output_file, mode='wb') as f:
f.write(output_buffer.getbuffer())
f.close()
elif mode == "HDD":
# Generate to a new file on the hard disk
with open(output_file, 'wb') as pdf_output_file:
pdf_writer.write(pdf_output_file)
pdf_output_file.close()
pdf_reader.stream.close()
if temporary:
if os.path.isfile(input_file):
os.replace(output_file, input_file)
output_file = input_file
def watermark_unwatermark_folder(**kwargs):
"""
Watermarks all PDF Files within a specified path
Unwatermarks all PDF Files within a specified path
"""
input_folder = kwargs.get('input_folder')
wm_text = kwargs.get('wm_text')
# Run in recursive mode
recursive = kwargs.get('recursive')
# watermark -> Watermark
# unwatermark -> Unwatermark
action = kwargs.get('action')
# HDD -> Temporary files are saved on the Hard Disk Drive and then deleted
# RAM -> Temporary files are saved in memory and then deleted.
mode = kwargs.get('mode')
pages = kwargs.get('pages')
# Loop though the files within the input folder.
for foldername, dirs, filenames in os.walk(input_folder):
for filename in filenames:
# Check if pdf file
if not filename.endswith('.pdf'):
continue
# PDF File found
inp_pdf_file = os.path.join(foldername, filename)
print("Processing file:", inp_pdf_file)
watermark_unwatermark_file(input_file=inp_pdf_file, output_file=None,
wm_text=wm_text, action=action, mode=mode, pages=pages)
if not recursive:
break
def is_valid_path(path):
"""
Validates the path inputted and checks whether it is a file path or a folder path
"""
if not path:
raise ValueError(f"Invalid Path")
if os.path.isfile(path):
return path
elif os.path.isdir(path):
return path
else:
raise ValueError(f"Invalid Path {path}")
def parse_args():
"""
Get user command line parameters
"""
parser = argparse.ArgumentParser(description="Available Options")
parser.add_argument('-i', '--input_path', dest='input_path', type=is_valid_path,
required=True, help="Enter the path of the file or the folder to process")
parser.add_argument('-a', '--action', dest='action', choices=[
'watermark', 'unwatermark'], type=str, default='watermark',
help="Choose whether to watermark or to unwatermark")
parser.add_argument('-m', '--mode', dest='mode', choices=['RAM', 'HDD'], type=str,
default='RAM', help="Choose whether to process on the hard disk drive or in memory")
parser.add_argument('-w', '--watermark_text', dest='watermark_text',
type=str, required=True, help="Enter a valid watermark text")
parser.add_argument('-p', '--pages', dest='pages', type=tuple,
help="Enter the pages to consider e.g.: [2,4]")
path = parser.parse_known_args()[0].input_path
if os.path.isfile(path):
parser.add_argument('-o', '--output_file', dest='output_file',
type=str, help="Enter a valid output file")
if os.path.isdir(path):
parser.add_argument('-r', '--recursive', dest='recursive', default=False, type=lambda x: (
str(x).lower() in ['true', '1', 'yes']), help="Process Recursively or Non-Recursively")
# To Porse The Command Line Arguments
args = vars(parser.parse_args())
# To Display The Command Line Arguments
print("## Command Arguments #################################################")
print("\n".join("{}:{}".format(i, j) for i, j in args.items()))
print("######################################################################")
return args
if __name__ == '__main__':
# Parsing command line arguments entered by user
args = parse_args()
# If File Path
if os.path.isfile(args['input_path']):
# Extracting File Info
get_info(input_file=args['input_path'])
# Encrypting or Decrypting a File
watermark_unwatermark_file(
input_file=args['input_path'], wm_text=args['watermark_text'], action=args[
'action'], mode=args['mode'], output_file=args['output_file'], pages=args['pages']
)
# If Folder Path
elif os.path.isdir(args['input_path']):
# Encrypting or Decrypting a Folder
watermark_unwatermark_folder(
input_folder=args['input_path'], wm_text=args['watermark_text'],
action=args['action'], mode=args['mode'], recursive=args['recursive'], pages=args['pages']
)
| 3.015625 | 3 |
stubs.min/System/Security/AccessControl_parts/DirectoryObjectSecurity.py | ricardyn/ironpython-stubs | 1 | 12758587 | class DirectoryObjectSecurity(ObjectSecurity):
""" Provides the ability to control access to directory objects without direct manipulation of Access Control Lists (ACLs). """
def AccessRuleFactory(self,identityReference,accessMask,isInherited,inheritanceFlags,propagationFlags,type,objectType=None,inheritedObjectType=None):
"""
AccessRuleFactory(self: DirectoryObjectSecurity,identityReference: IdentityReference,accessMask: int,isInherited: bool,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,type: AccessControlType,objectType: Guid,inheritedObjectType: Guid) -> AccessRule
Initializes a new instance of the System.Security.AccessControl.AccessRule
class with the specified values.
identityReference: The identity to which the access rule applies. It must be an object that can
be cast as a System.Security.Principal.SecurityIdentifier.
accessMask: The access mask of this rule. The access mask is a 32-bit collection of
anonymous bits,the meaning of which is defined by the individual integrators.
isInherited: true if this rule is inherited from a parent container.
inheritanceFlags: Specifies the inheritance properties of the access rule.
propagationFlags: Specifies whether inherited access rules are automatically propagated. The
propagation flags are ignored if inheritanceFlags is set to
System.Security.AccessControl.InheritanceFlags.None.
type: Specifies the valid access control type.
objectType: The identity of the class of objects to which the new access rule applies.
inheritedObjectType: The identity of the class of child objects which can inherit the new access
rule.
Returns: The System.Security.AccessControl.AccessRule object that this method creates.
"""
pass
def AddAccessRule(self,*args):
"""
AddAccessRule(self: DirectoryObjectSecurity,rule: ObjectAccessRule)
Adds the specified access rule to the Discretionary Access Control List (DACL)
associated with this System.Security.AccessControl.DirectoryObjectSecurity
object.
rule: The access rule to add.
"""
pass
def AddAuditRule(self,*args):
"""
AddAuditRule(self: DirectoryObjectSecurity,rule: ObjectAuditRule)
Adds the specified audit rule to the System Access Control List (SACL)
associated with this System.Security.AccessControl.DirectoryObjectSecurity
object.
rule: The audit rule to add.
"""
pass
def AuditRuleFactory(self,identityReference,accessMask,isInherited,inheritanceFlags,propagationFlags,flags,objectType=None,inheritedObjectType=None):
"""
AuditRuleFactory(self: DirectoryObjectSecurity,identityReference: IdentityReference,accessMask: int,isInherited: bool,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags,objectType: Guid,inheritedObjectType: Guid) -> AuditRule
Initializes a new instance of the System.Security.AccessControl.AuditRule class
with the specified values.
identityReference: The identity to which the audit rule applies. It must be an object that can be
cast as a System.Security.Principal.SecurityIdentifier.
accessMask: The access mask of this rule. The access mask is a 32-bit collection of
anonymous bits,the meaning of which is defined by the individual integrators.
isInherited: true if this rule is inherited from a parent container.
inheritanceFlags: Specifies the inheritance properties of the audit rule.
propagationFlags: Specifies whether inherited audit rules are automatically propagated. The
propagation flags are ignored if inheritanceFlags is set to
System.Security.AccessControl.InheritanceFlags.None.
flags: Specifies the conditions for which the rule is audited.
objectType: The identity of the class of objects to which the new audit rule applies.
inheritedObjectType: The identity of the class of child objects which can inherit the new audit rule.
Returns: The System.Security.AccessControl.AuditRule object that this method creates.
"""
pass
def GetAccessRules(self,includeExplicit,includeInherited,targetType):
"""
GetAccessRules(self: DirectoryObjectSecurity,includeExplicit: bool,includeInherited: bool,targetType: Type) -> AuthorizationRuleCollection
Gets a collection of the access rules associated with the specified security
identifier.
includeExplicit: true to include access rules explicitly set for the object.
includeInherited: true to include inherited access rules.
targetType: The security identifier for which to retrieve access rules. This must be an
object that can be cast as a System.Security.Principal.SecurityIdentifier
object.
Returns: The collection of access rules associated with the specified
System.Security.Principal.SecurityIdentifier object.
"""
pass
def GetAuditRules(self,includeExplicit,includeInherited,targetType):
"""
GetAuditRules(self: DirectoryObjectSecurity,includeExplicit: bool,includeInherited: bool,targetType: Type) -> AuthorizationRuleCollection
Gets a collection of the audit rules associated with the specified security
identifier.
includeExplicit: true to include audit rules explicitly set for the object.
includeInherited: true to include inherited audit rules.
targetType: The security identifier for which to retrieve audit rules. This must be an
object that can be cast as a System.Security.Principal.SecurityIdentifier
object.
Returns: The collection of audit rules associated with the specified
System.Security.Principal.SecurityIdentifier object.
"""
pass
def RemoveAccessRule(self,*args):
"""
RemoveAccessRule(self: DirectoryObjectSecurity,rule: ObjectAccessRule) -> bool
Removes access rules that contain the same security identifier and access mask
as the specified access rule from the Discretionary Access Control List (DACL)
associated with this System.Security.AccessControl.DirectoryObjectSecurity
object.
rule: The access rule to remove.
Returns: true if the access rule was successfully removed; otherwise,false.
"""
pass
def RemoveAccessRuleAll(self,*args):
"""
RemoveAccessRuleAll(self: DirectoryObjectSecurity,rule: ObjectAccessRule)
Removes all access rules that have the same security identifier as the
specified access rule from the Discretionary Access Control List (DACL)
associated with this System.Security.AccessControl.DirectoryObjectSecurity
object.
rule: The access rule to remove.
"""
pass
def RemoveAccessRuleSpecific(self,*args):
"""
RemoveAccessRuleSpecific(self: DirectoryObjectSecurity,rule: ObjectAccessRule)
Removes all access rules that exactly match the specified access rule from the
Discretionary Access Control List (DACL) associated with this
System.Security.AccessControl.DirectoryObjectSecurity object.
rule: The access rule to remove.
"""
pass
def RemoveAuditRule(self,*args):
"""
RemoveAuditRule(self: DirectoryObjectSecurity,rule: ObjectAuditRule) -> bool
Removes audit rules that contain the same security identifier and access mask
as the specified audit rule from the System Access Control List (SACL)
associated with this System.Security.AccessControl.CommonObjectSecurity object.
rule: The audit rule to remove.
Returns: true if the audit rule was successfully removed; otherwise,false.
"""
pass
def RemoveAuditRuleAll(self,*args):
"""
RemoveAuditRuleAll(self: DirectoryObjectSecurity,rule: ObjectAuditRule)
Removes all audit rules that have the same security identifier as the specified
audit rule from the System Access Control List (SACL) associated with this
System.Security.AccessControl.DirectoryObjectSecurity object.
rule: The audit rule to remove.
"""
pass
def RemoveAuditRuleSpecific(self,*args):
"""
RemoveAuditRuleSpecific(self: DirectoryObjectSecurity,rule: ObjectAuditRule)
Removes all audit rules that exactly match the specified audit rule from the
System Access Control List (SACL) associated with this
System.Security.AccessControl.DirectoryObjectSecurity object.
rule: The audit rule to remove.
"""
pass
def ResetAccessRule(self,*args):
"""
ResetAccessRule(self: DirectoryObjectSecurity,rule: ObjectAccessRule)
Removes all access rules in the Discretionary Access Control List (DACL)
associated with this System.Security.AccessControl.DirectoryObjectSecurity
object and then adds the specified access rule.
rule: The access rule to reset.
"""
pass
def SetAccessRule(self,*args):
"""
SetAccessRule(self: DirectoryObjectSecurity,rule: ObjectAccessRule)
Removes all access rules that contain the same security identifier and
qualifier as the specified access rule in the Discretionary Access Control List
(DACL) associated with this
System.Security.AccessControl.DirectoryObjectSecurity object and then adds the
specified access rule.
rule: The access rule to set.
"""
pass
def SetAuditRule(self,*args):
"""
SetAuditRule(self: DirectoryObjectSecurity,rule: ObjectAuditRule)
Removes all audit rules that contain the same security identifier and qualifier
as the specified audit rule in the System Access Control List (SACL) associated
with this System.Security.AccessControl.DirectoryObjectSecurity object and then
adds the specified audit rule.
rule: The audit rule to set.
"""
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
"""
__new__(cls: type)
__new__(cls: type,securityDescriptor: CommonSecurityDescriptor)
"""
pass
AccessRulesModified=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a Boolean value that specifies whether the access rules associated with this System.Security.AccessControl.ObjectSecurity object have been modified.
"""
AuditRulesModified=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a Boolean value that specifies whether the audit rules associated with this System.Security.AccessControl.ObjectSecurity object have been modified.
"""
GroupModified=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a Boolean value that specifies whether the group associated with the securable object has been modified.
"""
IsContainer=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a Boolean value that specifies whether this System.Security.AccessControl.ObjectSecurity object is a container object.
"""
IsDS=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a Boolean value that specifies whether this System.Security.AccessControl.ObjectSecurity object is a directory object.
"""
OwnerModified=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a Boolean value that specifies whether the owner of the securable object has been modified.
"""
| 2.40625 | 2 |
vega/algorithms/nas/modnas/trainer/torch/image_cls.py | zjzh/vega | 0 | 12758588 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification Trainer."""
import torch
import torch.nn as nn
from modnas import backend
from modnas.registry.trainer import register
from ..base import TrainerBase
def accuracy(output, target, topk=(1, )):
"""Compute the precision@k for the specified values of k."""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
# one-hot case
if target.ndimension() > 1:
target = target.max(1)[1]
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(1.0 / batch_size))
return res
@register
class ImageClsTrainer(TrainerBase):
"""Image classification Trainer class."""
def __init__(self,
writer=None,
expman=None,
data_provider=None,
optimizer=None,
lr_scheduler=None,
criterion='CrossEntropyLoss',
w_grad_clip=0):
super().__init__(writer)
self.w_grad_clip = w_grad_clip
self.expman = expman
self.optimizer = None
self.lr_scheduler = None
self.data_provider = None
self.criterion = None
config = {
'optimizer': optimizer,
'lr_scheduler': lr_scheduler,
'data_provider': data_provider,
'criterion': criterion,
}
self.config = config
def init(self, model, config=None):
"""Initialize trainer states."""
self.config.update(config or {})
if self.config['optimizer']:
self.optimizer = backend.get_optimizer(model.parameters(), self.config['optimizer'], config)
if self.config['lr_scheduler']:
self.lr_scheduler = backend.get_lr_scheduler(self.optimizer, self.config['lr_scheduler'], config)
if self.config['data_provider']:
self.data_provider = backend.get_data_provider(self.config['data_provider'])
if self.config['criterion']:
self.criterion = backend.get_criterion(self.config['criterion'], getattr(model, 'device_ids', None))
self.device = self.config.get('device', backend.get_device())
def get_num_train_batch(self, epoch):
"""Return number of train batches."""
return 0 if self.data_provider is None else self.data_provider.get_num_train_batch(epoch=epoch)
def get_num_valid_batch(self, epoch):
"""Return number of valid batches."""
return 0 if self.data_provider is None else self.data_provider.get_num_valid_batch(epoch=epoch)
def get_next_train_batch(self):
"""Return next train batch."""
return self.proc_batch(self.data_provider.get_next_train_batch())
def get_next_valid_batch(self):
"""Return next valid batch."""
return self.proc_batch(self.data_provider.get_next_valid_batch())
def proc_batch(self, batch):
"""Return processed data batch."""
return tuple(v.to(device=self.device, non_blocking=True) for v in batch)
def state_dict(self):
"""Return current states."""
return {
'optimizer': self.optimizer.state_dict(),
'lr_scheduler': self.lr_scheduler.state_dict(),
}
def load_state_dict(self, sd):
"""Resume states."""
if self.optimizer is not None:
self.optimizer.load_state_dict(sd['optimizer'])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(sd['lr_scheduler'])
def get_lr(self):
"""Return current learning rate."""
if self.lr_scheduler:
if hasattr(self.lr_scheduler, 'get_last_lr'):
return self.lr_scheduler.get_last_lr()[0]
return self.lr_scheduler.get_lr()[0]
return self.optimizer.param_groups[0]['lr']
def get_optimizer(self):
"""Return optimizer."""
return self.optimizer
def loss(self, output=None, data=None, model=None):
"""Return loss."""
return None if self.criterion is None else self.criterion(None, None, output, *data)
def train_epoch(self, estim, model, tot_steps, epoch, tot_epochs):
"""Train for one epoch."""
for step in range(tot_steps):
self.train_step(estim, model, epoch, tot_epochs, step, tot_steps)
def train_step(self, estim, model, epoch, tot_epochs, step, tot_steps):
"""Train for one step."""
optimizer = self.optimizer
lr_scheduler = self.lr_scheduler
lr = self.get_lr()
if step == 0:
self.data_provider.reset_train_iter()
model.train()
batch = self.get_next_train_batch()
trn_X, trn_y = batch
optimizer.zero_grad()
loss, logits = estim.loss_output(batch, model=model, mode='train')
loss.backward()
# gradient clipping
if self.w_grad_clip > 0:
nn.utils.clip_grad_norm_(model.parameters(), self.w_grad_clip)
optimizer.step()
prec1, prec5 = accuracy(logits, trn_y, topk=(1, 5))
if step == tot_steps - 1:
lr_scheduler.step()
return {
'acc_top1': prec1.item(),
'acc_top5': prec5.item(),
'loss': loss.item(),
'LR': lr,
'N': len(trn_y),
}
def valid_epoch(self, estim, model, tot_steps, epoch=0, tot_epochs=1):
"""Validate for one epoch."""
if not tot_steps:
return None
for step in range(tot_steps):
self.valid_step(estim, model, epoch, tot_epochs, step, tot_steps)
def valid_step(self, estim, model, epoch, tot_epochs, step, tot_steps):
"""Validate for one step."""
if step == 0:
self.data_provider.reset_valid_iter()
model.eval()
with torch.no_grad():
batch = self.get_next_valid_batch()
val_X, val_y = batch
loss, logits = estim.loss_output(batch, model=model, mode='eval')
prec1, prec5 = accuracy(logits, val_y, topk=(1, 5))
return {
'acc_top1': prec1.item(),
'acc_top5': prec5.item(),
'loss': loss.item(),
'N': len(val_y),
}
| 2.0625 | 2 |
data/base_dataset.py | ArlenCHEN/SNE-RoadSeg | 213 | 12758589 | import torch.utils.data as data
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def __len__(self):
return 0
| 2.671875 | 3 |
src/view/gtkview/dccManager.py | iivvoo-abandoned/most | 0 | 12758590 | #!/usr/bin/python
"""
Graphical representation of all dcc sessions
"""
from gtk import *
from GDK import *
from libglade import *
class session:
def __init__(self):
pass
class dccManager:
def __init__(self):
self.tree = GladeXML("../../glade/most.glade", "dccwindow")
self.table = self.tree.get_widget('table')
def addTransfer(self, nick, file, size):
typelabel = GtkLabel('File Transfer')
flags = GtkLabel('Dunno')
state = GtkLabel('In Progres')
nick = GtkLabel(nick)
statistics = GtkProgressBar()
arg = GtkLabel(file)
row = 1
self.table.attach(typelabel, 0, 1, row, row+1)
self.table.attach(flags, 1, 2, row, row+1)
self.table.attach(state, 2, 3, row, row+1)
self.table.attach(nick, 3, 4, row, row+1)
self.table.attach(statistics, 4, 5, row, row+1)
self.table.attach(arg, 5, 6, row, row+1)
typelabel.show()
flags.show()
state.show()
nick.show()
statistics.show()
arg.show()
if __name__ == '__main__':
d = dccManager()
d.addTransfer('VladDrac', '/etc/passwd', 1234)
mainloop()
| 2.375 | 2 |
tests/test_async_httpio.py | samdbmg/httpio | 22 | 12758591 | <gh_stars>10-100
import asyncio
from unittest import TestCase
from httpio import HTTPIOFile
import mock
import random
import re
import warnings
from io import SEEK_CUR, SEEK_END
def async_func(f):
async def __inner(*args, **kwargs):
return f(*args, **kwargs)
return __inner
# 8 MB of random data for the HTTP requests to return
DATA = bytes(random.randint(0, 0xFF)
for _ in range(0, 8*1024*1024))
OTHER_DATA = bytes(random.randint(0, 0xFF)
for _ in range(0, 8*1024*1024))
ASCII_LINES = ["Line0\n",
"Line the first\n",
"Line Returns\n",
"Line goes forth"]
ASCII_DATA = b''.join(line.encode('ascii') for line in ASCII_LINES)
IOBaseError = OSError
class HTTPException(Exception):
pass
class AsyncContextManagerMock(mock.MagicMock):
def __init__(self, *args, **kwargs):
super(AsyncContextManagerMock, self).__init__(*args, **kwargs)
async def __aenter__(self):
return self.async_context_object
async def __aexit__(self, *args, **kwargs):
pass
def async_test(f):
def __inner(*args, **kwargs):
loop = asyncio.get_event_loop()
loop.set_debug(True)
E = None
warns = []
try:
with warnings.catch_warnings(record=True) as warns:
loop.run_until_complete(f(*args, **kwargs))
except AssertionError as e:
E = e
except Exception as e:
E = e
for w in warns:
warnings.showwarning(w.message,
w.category,
w.filename,
w.lineno)
if E is None:
args[0].assertEqual(len(warns), 0,
msg="asyncio subsystem generated warnings due to unawaited coroutines")
else:
raise E
return __inner
class TestAsyncHTTPIOFile(TestCase):
def setUp(self):
self.patchers = {}
self.mocks = {}
self.patchers['ClientSession'] = mock.patch("aiohttp.ClientSession")
for key in self.patchers:
self.mocks[key] = self.patchers[key].start()
self.session = AsyncContextManagerMock()
self.mocks['ClientSession'].return_value = self.session
self.session.async_context_object = self.session
self.data_source = DATA
self.error_code = None
def _head(url, **kwargs):
m = AsyncContextManagerMock()
if self.error_code is None:
m.async_context_object.status_code = 204
m.async_context_object.headers = {'content-length':
len(self.data_source),
'Accept-Ranges':
'bytes'}
else:
m.async_context_object.status_code = self.error_code
m.async_context_object.raise_for_status = mock.MagicMock(side_effect=HTTPException)
return m
self.session.head.side_effect = _head
def _get(*args, **kwargs):
(start, end) = (None, None)
if 'headers' in kwargs:
if 'Range' in kwargs['headers']:
m = re.match(r'bytes=(\d+)-(\d+)',
kwargs['headers']['Range'])
if m:
start = int(m.group(1))
end = int(m.group(2)) + 1
if self.error_code is None:
return AsyncContextManagerMock(
async_context_object=mock.MagicMock(status_code=200,
read=mock.MagicMock(
side_effect=async_func(
lambda: self.data_source[start:end]))))
else:
return AsyncContextManagerMock(
async_context_object=mock.MagicMock(
status_code=self.error_code,
raise_for_status=mock.MagicMock(side_effect=HTTPException)))
self.session.get.side_effect = _get
def tearDown(self):
for key in self.patchers:
self.mocks[key] = self.patchers[key].stop()
@async_test
async def test_throws_exception_when_head_returns_error(self):
self.error_code = 404
with self.assertRaises(HTTPException):
async with HTTPIOFile('http://www.example.com/test/', 1024):
pass
@async_test
async def test_read_after_close_fails(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
pass
with self.assertRaises(IOBaseError):
await io.read()
@async_test
async def test_closed(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertTrue(hasattr(io, 'closed'))
self.assertFalse(io.closed)
self.assertTrue(io.closed)
@async_test
async def test_flush_dumps_cache(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertEqual(await io.read(1024), DATA[:1024])
self.data_source = OTHER_DATA
await io.seek(0)
self.assertEqual(await io.read(1024), DATA[:1024])
await io.flush()
await io.seek(0)
self.assertEqual(await io.read(1024), OTHER_DATA[:1024])
@async_test
async def test_peek(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
await io.seek(1500)
data = await io.peek(1024)
self.assertEqual(data, DATA[1500:1500 + 1024])
self.assertEqual(await io.tell(), 1500)
@async_test
async def test_read_gets_data(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
data = await io.read(1024)
self.assertEqual(data, DATA[0:1024])
@async_test
async def test_read_gets_data_without_buffering(self):
async with HTTPIOFile('http://www.example.com/test/') as io:
data = await io.read(1024)
self.assertEqual(data, DATA[0:1024])
@async_test
async def test_throws_exception_when_get_returns_error(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.error_code = 404
with self.assertRaises(HTTPException):
await io.read(1024)
self.assertEqual(await io.tell(), 0)
@async_test
async def test_read1(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
await io.seek(1024)
await io.read(1024)
await io.seek(0)
self.session.reset_mock()
data = await io.read1()
self.session.get.assert_called_once()
self.assertEqual(data, DATA[:2048])
await io.seek(1536)
@async_test
async def test_readable(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertTrue(await io.readable())
@async_test
async def test_readinto(self):
b = bytearray(1536)
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertEqual(await io.readinto(b), len(b))
self.assertEqual(bytes(b), DATA[:1536])
@async_test
async def test_readinto1(self):
b = bytearray(len(DATA))
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
await io.seek(1024)
await io.read(1024)
await io.seek(0)
self.session.reset_mock()
self.assertEqual(await io.readinto1(b), 2048)
self.session.get.assert_called_once()
self.assertEqual(b[:2048], DATA[:2048])
await io.seek(1536)
self.session.reset_mock()
self.assertEqual(await io.readinto1(b), len(DATA) - 1536)
self.session.get.assert_called_once()
self.assertEqual(b[:len(DATA) - 1536], DATA[1536:])
@async_test
async def test_readline(self):
self.data_source = ASCII_DATA
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertEqual((await io.readline()).decode('ascii'),
ASCII_LINES[0])
@async_test
async def test_readlines(self):
self.data_source = ASCII_DATA
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertEqual([line.decode('ascii') async for line in io.readlines()],
[line for line in ASCII_LINES])
@async_test
async def test_aiter(self):
self.data_source = ASCII_DATA
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertEqual([line.decode('ascii') async for line in io],
[line for line in ASCII_LINES])
@async_test
async def test_tell_starts_at_zero(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertEqual(await io.tell(), 0)
@async_test
async def test_seek_and_tell_match(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertEqual(await io.seek(1536), 1536)
self.assertEqual(await io.tell(), 1536)
self.assertEqual(await io.seek(10, whence=SEEK_CUR), 1546)
self.assertEqual(await io.tell(), 1546)
self.assertEqual(await io.seek(-20, whence=SEEK_CUR), 1526)
self.assertEqual(await io.tell(), 1526)
self.assertEqual(await io.seek(-20, whence=SEEK_END), len(DATA) - 20)
self.assertEqual(await io.tell(), len(DATA) - 20)
@async_test
async def test_random_access(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
await io.seek(1536)
self.assertEqual(await io.read(1024), DATA[1536:2560])
await io.seek(10, whence=SEEK_CUR)
self.assertEqual(await io.read(1024), DATA[2570:3594])
await io.seek(-20, whence=SEEK_CUR)
self.assertEqual(await io.read(1024), DATA[3574:4598])
await io.seek(-1044, whence=SEEK_END)
self.assertEqual(await io.read(1024), DATA[-1044:-20])
@async_test
async def test_seekable(self):
async with HTTPIOFile('http://www.example.com/test/', 1024) as io:
self.assertTrue(await io.seekable())
| 2.375 | 2 |
cas/vault.py | tencentyun/cas_python_sdk | 4 | 12758592 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import sys
from cas.utils.file_utils import *
from cas.archive import Archive
from cas.conf.common_conf import DEFAULT_NORMAL_UPLOAD_THRESHOLD, CAS_PREFIX
from job import Job
from multipart_upload import MultipartUpload
class Vault(object):
NormalUploadThreshold = DEFAULT_NORMAL_UPLOAD_THRESHOLD
ResponseDataParser = (('CreationDate', 'creation_date', None),
('LastInventoryDate', 'last_inventory_date', None),
('NumberOfArchives', 'number_of_archives', 0),
('SizeInBytes', 'size', 0),
('VaultQCS', 'qcs', None),
('VaultName', 'name', None))
def __init__(self, cas_api, vault_props):
"""
vault 的构造器
:param cas_api: CasAPI对象,包含客户端的访问信息以及SDK的基本调用接口
:param vault_props: vault的属性信息
"""
self.api = cas_api
for response_name, attr_name, default in self.ResponseDataParser:
value = vault_props.get(response_name)
setattr(self, attr_name, value or default)
def __repr__(self):
return 'Vault: %s' % self.name
@classmethod
def create(cls, cas_api, name):
api = cas_api
response = api.create_vault(name)
response = api.describe_vault(name)
return Vault(api, response)
@classmethod
def get_vault_by_name(cls, cas_api, vault_name):
vaults = cls.list_all_vaults(cas_api)
for vault in vaults:
if vault_name == vault.name:
return vault
raise ValueError('Vault not exists: %s' % vault_name)
@classmethod
def delete_vault_by_name(cls, cas_api, vault_name):
vaults = cls.list_all_vaults(cas_api)
for vault in vaults:
if vault_name == vault.name:
return vault.delete()
raise ValueError('Vault not exists: %s' % vault_name)
@classmethod
def list_all_vaults(cls, cas_api):
api = cas_api
result = api.list_all_vaults()
return [Vault(api, data) for data in result['VaultList']]
def list_all_multipart_uploads(self):
result = self.api.list_all_multipart_uploads(self.name)
return [MultipartUpload(self, data) for data in result['UploadsList']]
def get_archive(self, archive_id):
return Archive(self, archive_id)
def retrieve_archive(self, archive_id, desc=None, byte_range=None, tier=None):
byte_range_str = None
if byte_range is not None:
byte_range_str = '%d-%d' % byte_range
else:
byte_range_str = None
response = self.api.initiate_job(self.name, 'archive-retrieval',
archive_id=archive_id, desc=desc,
byte_range=byte_range_str, tier=tier)
return self.get_job(response['x-cas-job-id'])
def initiate_retrieve_inventory(self, desc=None):
response = self.api.initiate_job(self.name, 'inventory-retrieval', desc=desc)
return self.get_job(response['x-cas-job-id'])
def upload_archive(self, file_path, desc=None):
length = os.path.getsize(file_path)
if length > self.NormalUploadThreshold:
uploader = self.initiate_multipart_upload(file_path, desc=desc)
print "====== start the multipart upload: ", uploader
archive_id = uploader.start()
return archive_id
elif length > 0:
with open_file(file_path=file_path) as content:
mmaped_file = mmap.mmap(content.fileno(),length=length, offset=0, access=mmap.ACCESS_READ)
try:
cas_response = self.api.upload_archive(self.name, mmaped_file,
etag=compute_etag_from_file(file_path),
tree_tag=compute_tree_etag_from_file(file_path),
size=content_length(content), desc=desc)
finally:
mmaped_file.close()
return cas_response['x-cas-archive-id']
else:
raise ValueError('CAS does not support zero byte archive.')
def delete_archive(self, archive_id):
return self.api.delete_archive(self.name, archive_id)
def initiate_multipart_upload(self, file_path, desc=None):
f = open_file(file_path=file_path)
with f:
size_total = content_length(f)
part_size = MultipartUpload.calc_part_size(size_total)
cas_response = self.api.initiate_multipart_upload(self.name, part_size, desc=desc)
upload_id = cas_response['x-cas-multipart-upload-id']
cas_response = self.api.describe_multipart(self.name, upload_id)
return MultipartUpload(self, cas_response, file_path=file_path)
def get_multipart_uploader(self, upload_id):
cas_response = self.api.describe_multipart(self.name, upload_id)
return MultipartUpload(self, cas_response)
def delete(self):
return self.api.delete_vault(self.name)
def get_job(self, job_id):
cas_response = self.api.describe_job(self.name, job_id)
return Job(self, cas_response)
def push_archive_to_cos(self, archive_id, bucket_endpoint, object_name, desc=None, byte_range=None, tier=None):
response = self.api.initiate_job(self.name, 'push-to-cos', archive_id=archive_id, byte_range=byte_range, desc=desc, tier=tier, bucket_endpoint=bucket_endpoint, object_name=object_name)
return response['x-cas-job-id']
def parse_vault_name(path):
if not path.lower().startswith(CAS_PREFIX):
sys.stderr.write('cas vault path must start with %s\n' % CAS_PREFIX)
sys.exit(1)
path_fields = path[len(CAS_PREFIX):].split('/')
name = path_fields[0]
return name
| 2.125 | 2 |
demo_custom.py | ankit007im/audio-to-text-converter | 2 | 12758593 | <filename>demo_custom.py
import os
import cmu_sphinx4
def from_cwd(relative_path):
return os.path.join(os.getcwd(), relative_path.lstrip('\/'))
# path parameters passed into Transcriber must be full paths, not relative
lang_model = from_cwd('lib/models/language_model.arpaformat.DMP')
accoustic_model = from_cwd('lib/models/hub4opensrc.cd_continuous_8gau')
dictionary = from_cwd('lib/models/cmudict.0.7a_SPHINX_40')
filler = from_cwd('lib/models/wsj_noisedict')
# note that numbers must be expressed as strings
parameters = {
'absoluteBeamWidth': '500',
'absoluteWordBeamWidth': '100',
'relativeBeamWidth': '1E-80',
'relativeWordBeamWidth': '1E-60',
'wordInsertionProbability': '0.2',
'silenceInsertionProbability': '.1',
'languageWeight': '10.5',
'languageModelLocation': lang_model,
'acousticModelLocation': accoustic_model,
'dictionaryPath': dictionary,
'fillerPath': filler
}
# WARNING: the audio file specified MUST be 16 kHz 16 bit mono files in
# MS WAV format.
audio_URL = 'file://localhost' + from_cwd('audio/npr_short.wav')
transcriber = cmu_sphinx4.Transcriber(audio_URL, parameters)
# due to some initialization in Sphinx4, it may take 30 - 60 seconds before you
# start seeing any transcriptions
for line in transcriber.transcript_stream():
print (line)
transcriber.close()
| 2.28125 | 2 |
daseki/test/coverageDS.py | cuthbertLab/daseki | 0 | 12758594 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: coverageM21.py
# Purpose: Starts Coverage w/ default arguments
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2014-22 <NAME>
# License: LGPL or BSD, see license.txt
# ------------------------------------------------------------------------------
omit_modules = [
'daseki/ext/*',
]
exclude_lines = [
r'\s*import daseki\s*',
r'\s*daseki.mainTest\(\)\s*',
]
def getCoverage():
try:
import coverage
cov = coverage.coverage(omit=omit_modules)
for e in exclude_lines:
cov.exclude(e, which='exclude')
cov.start()
except ImportError:
cov = None
return cov
def stopCoverage(cov):
if cov is not None:
cov.stop()
cov.save()
| 2.21875 | 2 |
tests/bgp/test_bgp_speaker.py | mykolaf/sonic-mgmt | 0 | 12758595 | <gh_stars>0
import pytest
from netaddr import *
import time
import logging
import requests
from common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from ptf_runner import ptf_runner
from common.utilities import wait_tcp_connection
pytestmark = [
pytest.mark.topology('t0')
]
def generate_ips(num, prefix, exclude_ips):
"""
Generate random ips within prefix
"""
prefix = IPNetwork(prefix)
exclude_ips.append(prefix.broadcast)
exclude_ips.append(prefix.network)
available_ips = list(prefix)
if len(available_ips) - len(exclude_ips)< num:
raise Exception("Not enough available IPs")
generated_ips = []
for available_ip in available_ips:
if available_ip not in exclude_ips:
generated_ips.append(IPNetwork(str(available_ip) + '/' + str(prefix.prefixlen)))
if len(generated_ips) == num:
break
return generated_ips
def announce_route(ptfip, neighbor, route, nexthop, port):
url = "http://%s:%d" % (ptfip, port)
data = {"command": "neighbor %s announce route %s next-hop %s" % (neighbor, route, nexthop)}
r = requests.post(url, data=data)
assert r.status_code == 200
@pytest.fixture(scope="module")
def common_setup_teardown(duthost, ptfhost, localhost):
logging.info("########### Setup for bgp speaker testing ###########")
ptfip = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host']
logging.info("ptfip=%s" % ptfip)
ptfhost.script("./scripts/remove_ip.sh")
mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']
interface_facts = duthost.interface_facts()['ansible_facts']
res = duthost.shell("sonic-cfggen -m -d -y /etc/sonic/constants.yml -v \"constants.deployment_id_asn_map[DEVICE_METADATA['localhost']['deployment_id']]\"")
bgp_speaker_asn = res['stdout']
vlan_ips = generate_ips(3, "%s/%s" % (mg_facts['minigraph_vlan_interfaces'][0]['addr'],
mg_facts['minigraph_vlan_interfaces'][0]['prefixlen']),
[IPAddress(mg_facts['minigraph_vlan_interfaces'][0]['addr'])])
logging.info("Generated vlan_ips: %s" % str(vlan_ips))
speaker_ips = generate_ips(2, mg_facts['minigraph_bgp_peers_with_range'][0]['ip_range'][0], [])
speaker_ips.append(vlan_ips[0])
logging.info("speaker_ips: %s" % str(speaker_ips))
for ip in vlan_ips:
duthost.command("ip route flush %s/32" % ip.ip)
duthost.command("ip route add %s/32 dev %s" % (ip.ip, mg_facts['minigraph_vlan_interfaces'][0]['attachto']))
port_num = [7000, 8000, 9000]
lo_addr = mg_facts['minigraph_lo_interfaces'][0]['addr']
lo_addr_prefixlen = int(mg_facts['minigraph_lo_interfaces'][0]['prefixlen'])
vlan_addr = mg_facts['minigraph_vlan_interfaces'][0]['addr']
vlan_ports = []
for i in range(0, 3):
vlan_ports.append(mg_facts['minigraph_port_indices'][mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][0]['attachto']]['members'][i]])
logging.info("vlan_ports: %s" % str(vlan_ports))
logging.info("setup ip/routes in ptf")
ptfhost.shell("ifconfig eth%d %s" % (vlan_ports[0], vlan_ips[0]))
ptfhost.shell("ifconfig eth%d:0 %s" % (vlan_ports[0], speaker_ips[0]))
ptfhost.shell("ifconfig eth%d:1 %s" % (vlan_ports[0], speaker_ips[1]))
ptfhost.shell("ifconfig eth%d %s" % (vlan_ports[1], vlan_ips[1]))
ptfhost.shell("ifconfig eth%d %s" % (vlan_ports[2], vlan_ips[2]))
ptfhost.shell("ip route flush %s/%d" % (lo_addr, lo_addr_prefixlen))
ptfhost.shell("ip route add %s/%d via %s" % (lo_addr, lo_addr_prefixlen, vlan_addr))
logging.info("Start exabgp on ptf")
for i in range(0, 3):
local_ip = str(speaker_ips[i].ip)
ptfhost.exabgp(name="bgps%d" % i,
state="started",
local_ip=local_ip,
router_id=local_ip,
peer_ip=lo_addr,
local_asn=bgp_speaker_asn,
peer_asn=mg_facts['minigraph_bgp_asn'],
port=str(port_num[i]))
# check exabgp http_api port is ready
http_ready = True
for i in range(0, 3):
http_ready = wait_tcp_connection(localhost, ptfip, port_num[i])
if not http_ready:
break
logging.info("########### Done setup for bgp speaker testing ###########")
yield ptfip, mg_facts, interface_facts, vlan_ips, speaker_ips, port_num, http_ready
logging.info("########### Teardown for bgp speaker testing ###########")
for i in range(0, 3):
ptfhost.exabgp(name="bgps%d" % i, state="absent")
for ip in vlan_ips:
duthost.command("ip route flush %s/32" % ip.ip, module_ignore_errors=True)
ptfhost.script("./scripts/remove_ip.sh")
logging.info("########### Done teardown for bgp speaker testing ###########")
def test_bgp_speaker_bgp_sessions(common_setup_teardown, duthost, ptfhost, collect_techsupport):
"""Setup bgp speaker on T0 topology and verify bgp sessions are established
"""
ptfip, mg_facts, interface_facts, vlan_ips, speaker_ips, port_num, http_ready = common_setup_teardown
assert http_ready
logging.info("Wait some time to verify that bgp sessions are established")
time.sleep(20)
bgp_facts = duthost.bgp_facts()['ansible_facts']
assert all([v["state"] == "established" for _, v in bgp_facts["bgp_neighbors"].items()]), \
"Not all bgp sessions are established"
assert str(speaker_ips[2].ip) in bgp_facts["bgp_neighbors"], "No bgp session with PTF"
@pytest.mark.parametrize("ipv4, ipv6, mtu", [pytest.param(True, False, 1514)])
def test_bgp_speaker_announce_routes(common_setup_teardown, testbed, duthost, ptfhost, ipv4, ipv6, mtu, collect_techsupport):
"""Setup bgp speaker on T0 topology and verify routes advertised by bgp speaker is received by T0 TOR
"""
ptfip, mg_facts, interface_facts, vlan_ips, speaker_ips, port_num, http_ready = common_setup_teardown
assert http_ready
logging.info("announce route")
peer_range = mg_facts['minigraph_bgp_peers_with_range'][0]['ip_range'][0]
lo_addr = mg_facts['minigraph_lo_interfaces'][0]['addr']
lo_addr_prefixlen = int(mg_facts['minigraph_lo_interfaces'][0]['prefixlen'])
prefix = '10.10.10.0/26'
announce_route(ptfip, lo_addr, prefix, vlan_ips[1].ip, port_num[0])
announce_route(ptfip, lo_addr, prefix, vlan_ips[2].ip, port_num[1])
announce_route(ptfip, lo_addr, peer_range, vlan_ips[0].ip, port_num[2])
logging.info("Wait some time to make sure routes announced to dynamic bgp neighbors")
time.sleep(30)
# The ping here is workaround for known issue:
# https://github.com/Azure/SONiC/issues/387 Pre-ARP support for static route config
# When there is no arp entry for next hop, routes learnt from exabgp will not be set down to ASIC
# Traffic to prefix 10.10.10.0 will be routed to vEOS VMs via default gateway.
duthost.shell("ping %s -c 3" % vlan_ips[1].ip)
duthost.shell("ping %s -c 3" % vlan_ips[2].ip)
time.sleep(5)
logging.info("Verify accepted prefixes of the dynamic neighbors are correct")
bgp_facts = duthost.bgp_facts()['ansible_facts']
for ip in speaker_ips:
assert bgp_facts['bgp_neighbors'][str(ip.ip)]['accepted prefixes'] == 1
logging.info("Generate route-port map information")
extra_vars = {'announce_prefix': '10.10.10.0/26',
'minigraph_portchannels': mg_facts['minigraph_portchannels'],
'minigraph_vlans': mg_facts['minigraph_vlans'],
'minigraph_port_indices': mg_facts['minigraph_port_indices']}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
logging.info("extra_vars: %s" % str(ptfhost.host.options['variable_manager'].extra_vars))
ptfhost.template(src="bgp_speaker/bgp_speaker_route.j2", dest="/root/bgp_speaker_route.txt")
logging.info("run ptf test")
ptf_runner(ptfhost,
"ptftests",
"fib_test.FibTest",
platform_dir="ptftests",
params={"testbed_type": testbed['topo']['name'],
"router_mac": interface_facts['ansible_interface_facts']['Ethernet0']['macaddress'],
"fib_info": "/root/bgp_speaker_route.txt",
"ipv4": ipv4,
"ipv6": ipv6,
"testbed_mtu": mtu },
log_file="/tmp/bgp_speaker_test.FibTest.log",
socket_recv_size=16384)
| 2.03125 | 2 |
taurex/cia/hitrancia.py | ucl-exoplanets/TauREx3_public | 10 | 12758596 | <filename>taurex/cia/hitrancia.py
"""Module contains classes that handle loading of HITRAN cia files"""
from .cia import CIA
import numpy as np
from taurex.util.math import interp_lin_only
class EndOfHitranCIAException(Exception):
"""
An exception that occurs when the end of a HITRAN file
is reached
"""
pass
def hashwn(start_wn, end_wn):
"""
Simple wavenumber hash function
"""
return str(start_wn)+str(end_wn)
class HitranCiaGrid(object):
"""
Class that handles a particular HITRAN cia wavenumber grid
Since temperatures for CIA sometimes have different wavenumber grids this
class helps to simplify managing them by only dealing with one at a time.
These will help us unify into a single grid eventually
Parameters
----------
wn_min : float
The minimum wavenumber for this grid
wn_max : float
The maximum wavenumber for this grid
"""
def __init__(self, wn_min, wn_max):
self.wn = None
self.Tsigma = []
def add_temperature(self, T, sigma):
"""
Adds a temeprature and crossection to this wavenumber grid
Parameters
----------
T : float
Temeprature in Kelvin
sigma : :obj:`array`
cross-sections for this grid
"""
self.Tsigma.append((T, sigma))
@property
def temperature(self):
"""
Gets the current temeprature grid for this wavenumber grid
Returns
-------
:obj:`array`
Temeprature grid in Kelvin
"""
return [t for t, s in self.Tsigma]
@property
def sigma(self):
"""
Gets the currently loaded crossections for this wavenumber grid
Returns
-------
:obj:`array`
Cross-section grid
"""
return [s for t, s in self.Tsigma]
def find_closest_temperature_index(self, temperature):
"""
Finds the nearest indices for a particular temperature
Parameters
----------
temperature : float
Temeprature in Kelvin
Returns
-------
t_min : int
index on temprature grid to the left of ``temperature``
t_max : int
index on temprature grid to the right of ``temperature``
"""
t_min = np.array(self.temperature).searchsorted(temperature,
side='right')-1
t_max = t_min+1
return t_min, t_max
def interp_linear_grid(self, T, t_idx_min, t_idx_max):
"""
For a given temperature and indicies. Interpolate the cross-sections
linearly from temperature grid to temperature ``T``
Parameters
----------
temperature : float
Temeprature in Kelvin
t_min : int
index on temprature grid to the left of ``temperature``
t_max : int
index on temprature grid to the right of ``temperature``
Returns
-------
out : :obj:`array`
Interpolated cross-section
"""
temp_grid = np.array(self.temperature)
Tmax = temp_grid[t_idx_max]
Tmin = temp_grid[t_idx_min]
fx0 = self.sigma[t_idx_min]
fx1 = self.sigma[t_idx_max]
return interp_lin_only(fx0, fx1, T, Tmin, Tmax)
def sortTempSigma(self):
"""
Sorts the temperature-sigma list
"""
import operator
self.Tsigma.sort(key=operator.itemgetter(0))
def fill_temperature(self, temperatures):
"""
Here the 'master' temperature grid is passed into here and
any gaps in our grid is filled with zero cross-sections to produce
our final temperature-crosssection grid that matches with every other
wavenumber grid. Temperatures that don't exist in the current grid but are withing
the minimum and maximum for us are produced by linear interpolation
Parameters
----------
temperatures : array_like
Master temperature grid
"""
for t in temperatures:
if t in self.temperature:
continue
self.debug('Tempurature %s, %s', t)
if t < min(self.temperature) or t > max(self.temperature):
self.add_temperature(t, np.zeros_like(self.wn))
else:
indicies = self.find_closest_temperature_index(t)
self.add_temperature(t, self.interp_linear_grid(t, *indicies))
self.sortTempSigma()
class HitranCIA(CIA):
"""
A class that directly deals with HITRAN `cia <https://hitran.org/cia/>`_
files and turns them into generic CIA objects that nicely produces
cross sections for us.
This will handle CIAs that have wavenumber grids split
across temperatures by unifying
them into single grids.
To use it simply do:
>>> h2h2 = HitranCIA('path/to/H2-He.cia')
And now you can painlessly compute cross-sections like this:
>>> h2h2.cia(400)
Or if you have a wavenumber grid, we can also interpolate it:
>>> h2h2.cia(400,mywngrid)
And all it cost was buying me a beer!
Parameters
----------
filename : str
Path to HITRAN cia file
"""
def __init__(self, filename):
super().__init__(self.__class__.__name__, 'None')
self._filename = filename
self._molecule_name = None
self._wavenumber_grid = None
self._temperature_grid = None
self._xsec_grid = None
self._wn_dict = {}
self.load_hitran_file(filename)
def load_hitran_file(self, filename):
"""
Handles loading of the HITRAN file by reading and figuring
out the wavenumber and temperature grids and matching them up
Parameters
----------
filename : str
Path to HITRAN cia file
"""
temp_list = []
with open(filename, 'r') as f:
# Read number of points
while True:
try:
start_wn, end_wn, total_points, T, \
max_cia = self.read_header(f)
except EndOfHitranCIAException:
break
if T not in temp_list:
temp_list.append(T)
wn_hash = hashwn(start_wn, end_wn)
wn_obj = None
if wn_hash not in self._wn_dict:
self._wn_dict[wn_hash] = HitranCiaGrid(start_wn, end_wn)
wn_obj = self._wn_dict[wn_hash]
# Clear the temporary list
sigma_temp = []
wn_temp = []
for x in range(total_points):
line = f.readline()
self.debug('Line %s', line)
splits = line.split()
_wn = splits[0]
_sigma = splits[1]
wn_temp.append(float(_wn))
_sig = float(_sigma)*1e-10
if _sig < 0:
_sig = 0
sigma_temp.append(_sig)
# Ok we're done lets add the sigma
wn_obj.add_temperature(T, np.array(sigma_temp))
# set the wavenumber grid
wn_obj.wn = np.array(wn_temp)
temp_list.sort()
self._temperature_grid = np.array(temp_list)
self.fill_gaps(temp_list)
self.compute_final_grid()
def fill_gaps(self, temperature):
"""
Fills gaps in temperature grid for all wavenumber grid objects
we've created
Parameters
----------
temperature : array_like
Master temperature grid
"""
for wn_obj in self._wn_dict.values():
wn_obj.sortTempSigma()
wn_obj.fill_temperature(temperature)
def compute_final_grid(self):
"""
Collects all :class:`HitranCiaGrid` objects we've created
and unifies them into a single temperature, cross-section and
wavenumber grid for us to FINALLY interpolate and produce
collisionaly induced cross-sections
"""
_wngrid = []
for w in self._wn_dict.values():
_wngrid.append(w.wn)
self._wavenumber_grid = np.concatenate(_wngrid)
sorted_idx = np.argsort(self._wavenumber_grid)
self._wavenumber_grid = self._wavenumber_grid[sorted_idx]
_sigma_array = []
for idx, t in enumerate(self._temperature_grid):
_temp_sigma = []
for w in self._wn_dict.values():
_temp_sigma.append(w.Tsigma[idx][1])
_sigma_array.append(np.concatenate(_temp_sigma)[sorted_idx])
self._xsec_grid = np.array(_sigma_array)
def find_closest_temperature_index(self, temperature):
"""
Finds the nearest indices for a particular temperature
Parameters
----------
temperature : float
Temeprature in Kelvin
Returns
-------
t_min : int
index on temprature grid to the left of ``temperature``
t_max : int
index on temprature grid to the right of ``temperature``
"""
from taurex.util.util import find_closest_pair
t_min, t_max = find_closest_pair(self.temperatureGrid, temperature)
return t_min, t_max
def interp_linear_grid(self, T, t_idx_min, t_idx_max):
"""
For a given temperature and indicies. Interpolate the cross-sections
linearly from temperature grid to temperature ``T``
Parameters
----------
temperature : float
Temeprature in Kelvin
t_min : int
index on temprature grid to the left of ``temperature``
t_max : int
index on temprature grid to the right of ``temperature``
Returns
-------
out : :obj:`array`
Interpolated cross-section
"""
if T > self._temperature_grid.max():
return self._xsec_grid[-1]
elif T < self._temperature_grid.min():
return self._xsec_grid[0]
Tmax = self._temperature_grid[t_idx_max]
Tmin = self._temperature_grid[t_idx_min]
fx0 = self._xsec_grid[t_idx_min]
fx1 = self._xsec_grid[t_idx_max]
return interp_lin_only(fx0, fx1, T, Tmin, Tmax)
def read_header(self, f):
"""
Reads single header in the file
Parameters
----------
f : file object
Returns
-------
start_wn : float
Start wavenumber for temperature
end_wn : float
End wavenumber for temperature
total_points : int
total number of points in temperature
T : float
Temperature in Kelvin
max_cia : float
Maximum CIA value in temperature
"""
line = f.readline()
if line is None or line == '':
raise EndOfHitranCIAException
split = line.split()
self._pair_name = split[0]
start_wn = float(split[1])
end_wn = float(split[2])
total_points = int(split[3])
T = float(split[4])
max_cia = float(split[5])
return start_wn, end_wn, total_points, T, max_cia
@property
def wavenumberGrid(self):
"""
Unified wavenumber grid
Returns
-------
:obj:`array`
Native wavenumber grid
"""
return self._wavenumber_grid
@property
def temperatureGrid(self):
"""
Unified temperature grid
Returns
-------
:obj:`array`
Native temperature grid in Kelvin
"""
return self._temperature_grid
def compute_cia(self, temperature):
"""
Computes the collisionally induced absorption cross-section
using our final native temperature and cross-section grids
Parameters
----------
temperature : float
Temperature in Kelvin
Returns
-------
out : :obj:`array`
Temperature interpolated cross-section
"""
indicies = self.find_closest_temperature_index(temperature)
return self.interp_linear_grid(temperature, *indicies)
| 2.71875 | 3 |
app/demo_data/users.py | PythonVinkit/oma-sanakirja | 0 | 12758597 | <gh_stars>0
from app.models.schemas import User
opa = User(id=1, name='opa', email='<EMAIL>')
demo_users = [opa]
| 1.554688 | 2 |
flowcorder/flowcorder/daemons/dnstap/instrumentation.py | oliviertilmans/flowcorder | 4 | 12758598 | <gh_stars>1-10
"""
This modules defines the instrumentation of the DNS stack.
We initiate a network tap such that all DNS traffic is sniffed.
A BPF shared map then contains the entries for such packets. This user-space
daemon then walks through the table and processes the intercepted messages.
"""
import bcc
import time
import os
import socket
import select
import logging
from enum import IntEnum
from hashlib import sha1
from .template import DNSTemplate
from .compute_timeout import get_timeout
from ..utils import ThreadedComponent
from ..transition import FLOW_STATES
from ...utils import embedded_ipv4, hash_seq_to_u64
LOG = logging.getLogger(__name__)
BPF_SRC_NAME = 'dnstap.c'
def int_in_range(v, default, name, minv=0, maxv=65535):
"""
Assess that a given value is an integer within a range.
:v: The integer to check
:minv: The minimal value (included)
:maxv: The maximal value (included)
"""
try:
val = int(v)
if val < minv or val > maxv:
LOG.warning('%s %d is not in the range [%d, %d], defaulting to %d'
% (name, val, minv, maxv, default))
return default
except (ValueError, TypeError) as e:
LOG.warning('%d(%s) is not an integer: %s', val, name, e)
return default
return val
class DNSInstrumentation(ThreadedComponent):
"""Component instrumenting the DNS stack."""
def __init__(self, Exporter=None, BPFManager=None,
dns_port=53, ipv6_depth=5, support_dot1q=False):
"""
Initialize a new DNS stack instrumentation.
:dns_port: The UDP destination port to match on to contact the DNS
server.
:ipv6_depth: The maximal head chain depth to accept for IPv6 packets
"""
super(DNSInstrumentation, self).__init__()
self.dns_port = int_in_range(dns_port, 53, 'dns_port', minv=1)
self.ipv6_depth = int_in_range(ipv6_depth, 5, 'ipv6_depth', maxv=10)
self.connection_table = None
self.ebpf = BPFManager
self.ebpf.configure(BPF_SRC_NAME, DNS_DST_PORT=self.dns_port,
MAX_IPV6_DEPTH=self.ipv6_depth,
SUPPORT_DOT1Q='' if support_dot1q else None)
self.exporter = Exporter
self.exporter.configure(DNSTemplate)
def start(self):
"""Start instrumenting DNS requests."""
sk, poll = self._attach_filter()
timeout = get_timeout() * 1e6 # in us
super(DNSInstrumentation, self).start(
timeout, poll=poll, sk=sk, timeout_ns=timeout)
def _attach_filter(self):
"""
Attach the BPF filter to a raw socket and prepare to poll it.
:return: sock_fd, poll_object
"""
func = self.ebpf.load_func('forward_dns', bcc.BPF.SOCKET_FILTER)
bcc.BPF.attach_raw_socket(func, "")
self.connection_table = self.ebpf["connection_map"]
poll = select.epoll()
poll.register(func.sock, select.EPOLLIN | select.POLLPRI)
return func.sock, poll
def do_work(self, timeout, poll=None, sk=None, timeout_ns=None):
"""Either wait for a new packet or for a query to expire."""
events = poll.poll(timeout=float(timeout) / 1e6)
if events:
# Discard packet data
os.read(sk, 1024)
# Walk down connection table to find new connections, timeouts, ...
return self._walk_bpf_table(timeout_ns)
def _walk_bpf_table(self, timeout_val):
"""Walk down the BPF table to update flow stats."""
now = _now_in_us()
to_remove = []
timeout = timeout_val
for connection, info in self.connection_table.iteritems():
if info.status == DNS_STATUS.STATUS_QUERY:
tleft = timeout_val - (now - info.sent_ts)
# Did the query time out ?
if tleft <= 0:
self._export_event(connection, info,
flow_state=FLOW_STATES.UNREACHABLE,
rtt=info.sent_ts + int(timeout_val))
to_remove.append(connection)
else:
# Update the max timeout value
timeout = min(timeout, tleft)
else:
self._export_event(connection, info,
flow_state=DNS_STATUS_TO_FLOW[info.status],
rtt=info.reply_ts - info.sent_ts)
to_remove.append(connection)
# Removal and export done in 2-phase to avoid ctype pointer corruption
for k in to_remove:
try:
del self.connection_table[k]
except KeyError:
pass
return timeout
def _export_event(self, connection, info, flow_state, rtt):
saddr, daddr = _extract_addr(connection,
bool(info.version_retries & 0x8))
self.exporter.export({
'flowStartReason': FLOW_STATES.NEW,
'flowEndReason': flow_state,
'flowStartMicroseconds': info.first_ts,
'flowEndMicroseconds': info.reply_ts,
'saddr': saddr,
'daddr': daddr,
'selectorId': hash_seq_to_u64(connection.saddr,
connection.daddr,
connection.sport.to_bytes(16, 'big'),
connection.id.to_bytes(16, 'big')),
'applicationName': 'dnstap',
'sourceTransportPort': connection.sport,
'destinationTransportPort': self.dns_port,
'transferredOctetTotalCount': info.query_size,
'receivedOctetTotalCount': info.reply_size,
'retransmittedPacketTotalCount': info.version_retries & 0x7F,
'meanLatencyMilliseconds': int(rtt // 1e3)
})
MAX_UINT64 = (2 ** 64) - 1
def _extract_addr(connection, is_ipv6):
if is_ipv6:
family = socket.AF_INET6
extract = _mirror
else:
family = socket.AF_INET
extract = embedded_ipv4
return (socket.inet_ntop(family, extract(connection.saddr)),
socket.inet_ntop(family, extract(connection.daddr)))
def _mirror(x):
return x
CLK_RES = time.clock_getres(time.CLOCK_MONOTONIC) * 1e6 # in us
def _now_in_us():
return time.clock_gettime(time.CLOCK_MONOTONIC) * CLK_RES
class DNS_STATUS(IntEnum):
"""The events that can be reported by the BPF filter."""
STATUS_QUERY = 1
STATUS_ANSWER = 2
STATUS_FAIL = 3
DNS_STATUS_TO_FLOW = {
DNS_STATUS.STATUS_QUERY: FLOW_STATES.NEW,
DNS_STATUS.STATUS_ANSWER: FLOW_STATES.FINISHED,
DNS_STATUS.STATUS_FAIL: FLOW_STATES.BROKEN,
}
| 2.328125 | 2 |
lib/bno08x.py | ifurusato/ros | 9 | 12758599 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-03-27
# modified: 2020-03-27
#
# Support for the Adafruit 9-DOF Orientation IMU Fusion Breakout - BNO085 (BNO080).
#
# See:
# https://www.adafruit.com/product/4754
# https://learn.adafruit.com/adafruit-9-dof-orientation-imu-fusion-breakout-bno085
# https://learn.adafruit.com/adafruit-9-dof-orientation-imu-fusion-breakout-bno085/report-types
# https://www.ceva-dsp.com/wp-content/uploads/2019/10/BNO080_085-Datasheet.pdf
# https://cdn-learn.adafruit.com/downloads/pdf/adafruit-9-dof-orientation-imu-fusion-breakout-bno085.pdf
# https://circuitpython.readthedocs.io/projects/bno08x/en/latest/
#
import math, sys, time, traceback
import board, busio
from enum import Enum
from colorama import init, Fore, Style
init()
# if not busio.I2C, then use this:
#try:
# from adafruit_extended_bus import ExtendedI2C as I2C
#except ImportError as ie:
# sys.exit("This script requires the adafruit_extended_bus module.\n"\
# + "Install with: pip3 install --user adafruit_extended_bus")
try:
import adafruit_bno08x
from adafruit_bno08x.i2c import BNO08X_I2C
# from adafruit_bno08x.i2c import BNO08X
from adafruit_bno08x import (
PacketError,
BNO_REPORT_ACCELEROMETER,
BNO_REPORT_GYROSCOPE,
BNO_REPORT_MAGNETOMETER,
REPORT_ACCURACY_STATUS,
BNO_REPORT_ACTIVITY_CLASSIFIER,
BNO_REPORT_STABILITY_CLASSIFIER,
# BNO_REPORT_ROTATION_VECTOR,
# BNO_REPORT_GEOMAGNETIC_ROTATION_VECTOR,
# BNO_REPORT_GAME_ROTATION_VECTOR,
)
except ImportError as ie:
sys.exit("This script requires the adafruit_bno08x module.\n"\
+ "Install with: pip3 install --user adafruit-circuitpython-bno08x")
try:
from pyquaternion import Quaternion
except ImportError:
sys.exit("This script requires the pyquaternion module.\nInstall with: pip3 install --user pyquaternion")
from lib.logger import Level, Logger
from lib.config_loader import ConfigLoader
from lib.message import Message
from lib.queue import MessageQueue
from lib.message_factory import MessageFactory
from lib.convert import Convert
# ..............................................................................
class BNO08x:
'''
Reads from a BNO08x 9DoF sensor.
'''
def __init__(self, config, queue, level):
self._log = Logger("bno085", level)
if config is None:
raise ValueError("no configuration provided.")
self._queue = queue
self._config = config
# config
_config = self._config['ros'].get('bno085')
self._loop_delay_sec = _config.get('loop_delay_sec')
_i2c_device = _config.get('i2c_device')
# default trim, can be overridden by methods
self.set_heading_trim(_config.get('heading_trim'))
self.set_pitch_trim(_config.get('pitch_trim'))
self.set_roll_trim(_config.get('roll_trim'))
i2c = busio.I2C(board.SCL, board.SDA, frequency=800000)
self._bno = BNO08X_I2C(i2c, debug=False)
# self._bno = BNO08X()
self._error_range = 5.0 # permitted error between Euler and Quaternion (in degrees) to allow setting value, was 3.0
self._min_calib_status = 1
self._settle_sec = 0.0
self._calibrated = False
self._verbose = False # True for stack traces
self._configure()
self._log.info('ready.')
# ..........................................................................
def set_heading_trim(self, trim):
'''
Set the heading trim in degrees.
'''
self._heading_trim = trim
self._log.info('heading trim:\t{:>6.2f}°'.format(self._heading_trim))
# ..........................................................................
@property
def heading_trim(self):
'''
Return the heading trim in degrees.
'''
return self._heading_trim
# ..........................................................................
def set_pitch_trim(self, trim):
'''
Set the pitch trim in degrees.
'''
self._pitch_trim = trim
self._log.info('pitch trim: \t{:>6.2f}°'.format(self._pitch_trim))
# ..........................................................................
@property
def pitch_trim(self):
'''
Return the pitch trim in degrees.
'''
return self._pitch_trim
# ..........................................................................
def set_roll_trim(self, trim):
'''
Set the roll trim in degrees.
'''
self._roll_trim = trim
self._log.info('roll trim: \t{:>6.2f}°'.format(self._roll_trim))
# ..........................................................................
@property
def roll_trim(self):
'''
Return the roll trim in degrees.
'''
return self._roll_trim
# ..........................................................................
def _configure(self):
self._log.info('settle time... ({:1.0f}s)'.format(self._settle_sec))
time.sleep(self._settle_sec) # settle before calibration
self._log.info(Fore.YELLOW + 'begin configuration/calibration...')
self._bno.begin_calibration()
time.sleep(0.1)
try:
self._log.info(Fore.YELLOW + 'setting features...')
_features = [
BNO_REPORT_ACCELEROMETER,
BNO_REPORT_GYROSCOPE,
BNO_REPORT_MAGNETOMETER,
BNO_REPORT_ACTIVITY_CLASSIFIER,
BNO_REPORT_STABILITY_CLASSIFIER,
# BNO_REPORT_ROTATION_VECTOR,
# BNO_REPORT_GEOMAGNETIC_ROTATION_VECTOR,
# BNO_REPORT_GAME_ROTATION_VECTOR,
# BNO_REPORT_LINEAR_ACCELERATION,
# BNO_REPORT_STEP_COUNTER,
# BNO_REPORT_SHAKE_DETECTOR,
# BNO_REPORT_RAW_ACCELEROMETER,
# BNO_REPORT_RAW_GYROSCOPE,
# BNO_REPORT_RAW_MAGNETOMETER,
]
for feature in _features:
self._log.debug('feature {}'.format(feature))
self._bno.enable_feature(feature)
time.sleep(0.01)
self._log.info(Fore.YELLOW + 'features set. ------------------- ')
# now calibrate...
time.sleep(2.0)
self._log.info(Fore.YELLOW + 'calibrating...')
start_time = time.monotonic()
_fail_count = 0
_confidence = 0
while not self._calibrated \
and _confidence < 50 \
and _fail_count < 30:
_fail_count += 1
try:
_confidence = self._activity_report()
self._stability_report()
_calibration_status = self._calibration_report()
self._log.info("Magnetometer:")
mag_x, mag_y, mag_z = self._bno.magnetic # pylint:disable=no-member
self._log.info("X: {:0.6f} Y: {:0.6f} Z: {:0.6f} uT".format(mag_x, mag_y, mag_z))
# self._log.info("Game Rotation Vector Quaternion:")
# ( game_quat_i, game_quat_j, game_quat_k, game_quat_real,) = self._bno.game_quaternion # pylint:disable=no-member
# self._log.info("I: {:0.6f} J: {:0.6f} K: {:0.6f} Real: {:0.6f}".format(game_quat_i, game_quat_j, game_quat_k, game_quat_real))
if _calibration_status >= self._min_calib_status: # we'll settle for Low Accuracy to start
self._calibrated = True
self._log.info(Fore.GREEN + "Calibrated.")
if _calibration_status > 1: # but save it if better than Low.
self._log.info(Fore.GREEN + Style.BRIGHT + "better than low quality calibration, saving status...")
self._bno.save_calibration_data()
break
time.sleep(0.2)
except Exception as e:
self._log.error("calibration error: {}".format(e))
finally:
self._min_calib_status = 1
self._calibrated = True
self._log.info(Fore.BLACK + "fail count: {:d}".format(_fail_count))
except Exception as e:
self._log.error('error setting features: {}'.format(e))
self._log.info("calibration complete.")
# ..........................................................................
@property
def calibrated(self):
return self._calibrated
# ..........................................................................
def _stability_report(self):
'''
Prints a report indicating the current stability status, returning
a text string.
'''
_stability_classification = self._bno.stability_classification
self._log.info(Fore.BLUE + "Stability: \t{}".format(_stability_classification))
pass
# ..........................................................................
def _activity_report(self):
'''
Prints a report indicating the current activity, returning an int
indicating a confidence level from 0-100%.
'''
_activity_classification = self._bno.activity_classification
_most_likely = _activity_classification["most_likely"]
_confidence = _activity_classification[_most_likely]
self._log.info(Fore.BLUE + "Activity: \t{}".format(_most_likely))
self._log.info(Fore.BLUE + "Confidence: \t{}%".format(_confidence))
return _confidence
# ..........................................................................
def _calibration_report(self):
'''
Prints a report indicating the current calibration status,
returning an int value from 0-3, as follows: "Accuracy Unreliable",
"Low Accuracy", "Medium Accuracy", or "High Accuracy".
'''
_calibration_status = self._bno.calibration_status
self._log.info(Fore.BLUE + "Calibration:\t{} ({:d})".format(REPORT_ACCURACY_STATUS[_calibration_status], _calibration_status))
return _calibration_status
# ..........................................................................
def _process_quaternion(self, color, title, quaternion):
# ( quat_i, quat_j, quat_k, quat_real ) = self._bno.quaternion
( quat_i, quat_j, quat_k, quat_real ) = quaternion
_q = Quaternion(real=quat_real, imaginary=[quat_i, quat_j, quat_k])
_q_heading = _q.degrees
_q_yaw_pitch_roll = _q.yaw_pitch_roll
_q_yaw = _q_yaw_pitch_roll[0]
_q_pitch = _q_yaw_pitch_roll[1]
_q_roll = _q_yaw_pitch_roll[2]
self._log.info(color + 'heading: {:>6.2f}°\t({})\t'.format(_q_heading, title) + Fore.BLACK + 'p={:>5.4f}\t r={:>5.4f}\t y={:>5.4f}'.format(_q_pitch, _q_roll, _q_yaw))
# ..........................................................................
def magneto(self):
'''
Returns the current x, y, z reading of the magnetometer.
'''
try:
self._calibration_report()
# _calibration_status = self._calibration_report()
return self._bno.magnetic
except Exception as e:
return None
# ..........................................................................
def read(self):
'''
The function that reads sensor values in a loop. This checks to see
if the 'sys' calibration is at least 3 (True), and if the Euler and
Quaternion values are within an error range of each other, this sets
the class variable for heading, pitch and roll. If they aren't within
range for more than n polls, the values revert to None.
'''
# self._log.info('starting sensor read...')
try:
# reports ......................................
# _confidence = self._activity_report()
# self._stability_report()
# _calibration_status = self._calibration_report()
# if _calibration_status >= self._min_calib_status:
if True:
# Accelerometer ..................................................................
# gyro_x, gyro_y, gyro_z = self._bno.gyro # pylint:disable=no-member
# self._log.info(Fore.RED + 'Gyroscope:\tX: {:0.6f} Y: {:0.6f} Z: {:0.6f} rads/s'.format(gyro_x, gyro_y, gyro_z))
# Magnetometer ...................................................................
# self._log.info(Fore.BLACK + 'self._bno.magnetic...')
mag_x, mag_y, mag_z = self._bno.magnetic # pylint:disable=no-member
_mag_degrees = Convert.convert_to_degrees(mag_x, mag_y, mag_z)
if self._heading_trim != 0.0:
_mag_degrees = Convert.offset_in_degrees(_mag_degrees, self._heading_trim)
self._log.info(Fore.YELLOW + 'heading: {:>6.2f}°\t(magneto)'.format(_mag_degrees))
# Rotation Vector Quaternion .....................................................
# self._log.info(Fore.BLACK + 'self._bno.quaternion...')
( quat_i, quat_j, quat_k, quat_real ) = self._bno.quaternion # pylint:disable=no-member
_quaternion = self._bno.quaternion
self._process_quaternion(Fore.GREEN, 'rot-quat', self._bno.quaternion)
# Geomagnetic Rotation Vector Quatnernion ........................................
# self._log.info(Fore.BLACK + 'self._bno.geometric_quaternion...')
_geomagnetic_quaternion = self._bno.geomagnetic_quaternion
( geo_quat_i, geo_quat_j, geo_quat_k, geo_quat_real, ) = _geomagnetic_quaternion # pylint:disable=no-member
self._process_quaternion(Fore.GREEN + Style.BRIGHT, 'geo-quat', _geomagnetic_quaternion)
return _mag_degrees, _quaternion[0], _geomagnetic_quaternion[0]
else:
self._log.warning('uncalibrated...')
return 0.0, 0.0, 0.0
self._log.debug('read ended.')
except KeyError as ke:
if self._verbose:
self._log.error('bno08x key error: {} {}'.format(ke, traceback.format_exc()))
else:
self._log.error('bno08x key error: {}'.format(ke))
except RuntimeError as re:
if self._verbose:
self._log.error('bno08x runtime error: {} {}'.format(re, traceback.format_exc()))
else:
self._log.error('bno08x runtime error: {}'.format(re))
except IndexError as ie:
if self._verbose:
self._log.error('bno08x index error: {} {}'.format(ie, traceback.format_exc()))
else:
self._log.error('bno08x index error: {}'.format(ie))
except OSError as oe:
if self._verbose:
self._log.error('bno08x os error: {} {}'.format(oe, traceback.format_exc()))
else:
self._log.error('bno08x OS error: {}'.format(oe))
except PacketError as pe:
if self._verbose:
self._log.error('bno08x packet error: {} {}'.format(pe, traceback.format_exc()))
else:
self._log.error('bno08x packet error: {}'.format(pe))
except Exception as e:
if self._verbose:
self._log.error('bno08x error: {} {}'.format(e, traceback.format_exc()))
else:
self._log.error('bno08x error: {}'.format(e))
## ..............................................................................
class Calibration(Enum):
UNKNOWN = ( 0, "Unknown", "The sensor is unable to classify the current stability.", False)
ON_TABLE = ( 1, "On Table", "The sensor is at rest on a stable surface with very little vibration.", False)
STATIONARY = ( 2, "Stationary", "The sensor’s motion is below the stable threshold but the stable duration requirement has not been met. This output is only available when gyro calibration is enabled.", False)
STABLE = ( 3, "Stable", "The sensor’s motion has met the stable threshold and duration requirements.", True)
IN_MOTION = ( 4, "In motion", "The sensor is moving.", False)
# ignore the first param since it's already set by __new__
def __init__(self, num, name, description, calibrated):
self._num = num
self._name = name
self._description = description
self._calibrated = calibrated
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def calibrated(self):
return self._calibrated
#EOF
| 1.898438 | 2 |
src/main.py | martasls/pythonic-learning-machine | 0 | 12758600 | <reponame>martasls/pythonic-learning-machine
import argparse
import os
from threading import Thread
from sys import argv
from time import sleep
from benchmark.benchmarker import Benchmarker, continue_benchmark, pickup_benchmark, SLM_MODELS, MLP_MODELS, ENSEMBLES
from data.io_plm import get_benchmark_folder, read_pickle, get_resampled_folder, get_formatted_folder, read_csv_, \
get_standardized_folder, remove_extension
from data.extract import is_classification
from benchmark.formatter import format_benchmark, merge_best_results
from benchmark.results_extractor import extract_results
def start_b(data_set_name, file_name=None, models_to_run=None, models_to_run_2=MLP_MODELS, ensembles=ENSEMBLES): # change to None to choose from console
""" starts benchmark """
if models_to_run == 'SLM_MODELS':
models_to_run = SLM_MODELS
if models_to_run == 'MLP_MODELS':
models_to_run = MLP_MODELS
if models_to_run_2 == 'SLM_MODELS':
models_to_run_2 = SLM_MODELS
if models_to_run_2 == 'MLP_MODELS':
models_to_run_2 = MLP_MODELS
# SLM MODELS
if models_to_run is not None:
benchmarker = Benchmarker(data_set_name, models=models_to_run, ensembles=ensembles, benchmark_id='slm')
# benchmarker.run()
benchmarker.run_nested_cv()
# MLP MODELS
if models_to_run_2 is not None:
benchmarker = Benchmarker(data_set_name, models=models_to_run_2, ensembles=ensembles, benchmark_id='mlp')
benchmarker.run_nested_cv()
def continue_b(data_set_name, file_name):
""" continues benchmark """
continue_benchmark(data_set_name, file_name)
def pickup_b(data_set_name, file_name):
"""continues benchmark after parameter tuning"""
pickup_benchmark(data_set_name, file_name)
if __name__ == '__main__':
"""
Para o r_bio, as runs com problemas são: 2, 16, 24 e 27.
Para o r_ppb, as runs com problemas são: 16.
Para o r_student, as runs com problemas são: 0 e 16 (para o Boosting Mean + FLR, pelo menos) e 8 e 24.
#####
marta_c_credit_slm__2019_01_16__20_11_08
marta_c_credit_mlp__2019_01_16__20_11_08
marta_c_diabetes_slm__2019_01_15__22_30_24
marta_c_diabetes_mlp__2019_01_15__22_30_24
marta_r_bio_slm__2019_01_15__22_33_01
marta_r_bio_mlp__2019_01_15__22_33_01
marta_r_ppb_slm__2019_01_16__20_11_04
marta_r_ppb_mlp__2019_01_16__20_11_04
marta_r_student_slm__2019_01_15__22_30_14
marta_r_student_mlp__2019_01_15__22_30_14
"""
# pickup_b("r_bio", "marta_r_bio_slm__2019_01_15__22_33_01.pkl")
# pickup_b("r_bio", "marta_r_bio_mlp__2019_01_15__22_33_01.pkl")
# pickup_b("r_ppb", "marta_r_ppb_mlp__2019_01_16__20_11_04.pkl")
# pickup_b("r_student", "marta_r_student_mlp__2019_01_15__22_30_14.pkl")
#start_b("r_student", models_to_run='SLM_MODELS', models_to_run_2=None, ensembles=None)
#start_b("r_ppb", models_to_run='SLM_MODELS', models_to_run_2=None, ensembles=None)
#start_b("r_bio", models_to_run='SLM_MODELS', models_to_run_2=None, ensembles=None)
#start_b("c_diabetes", models_to_run='SLM_MODELS', models_to_run_2=None, ensembles=None)
#start_b("c_credit", models_to_run='SLM_MODELS', models_to_run_2=None, ensembles=None)
#start_b("r_student", models_to_run_2='MLP_MODELS', models_to_run=None)
#start_b("r_ppb", models_to_run_2='MLP_MODELS', models_to_run=None)
#start_b("r_bio", models_to_run_2='MLP_MODELS', models_to_run=None)
#start_b("c_diabetes", models_to_run_2='MLP_MODELS', models_to_run=None)
#start_b("c_credit", models_to_run_2='MLP_MODELS', models_to_run=None)
# start_b("r_bio", models_to_run_2='MLP_MODELS', models_to_run=None)
#pickup_b("r_bio", "marta_r_bio_slm__2019_01_15__22_33_01.pkl")
#pickup_b("r_ppb", "marta_r_ppb_slm__2019_01_16__20_11_04.pkl")
#pickup_b("r_student", "marta_r_student_slm__2019_01_15__22_30_14.pkl")
#pickup_b("c_credit", "marta_c_credit_slm__2019_01_16__20_11_08.pkl")
#pickup_b("c_diabetes", "marta_c_diabetes_slm__2019_01_15__22_30_24.pkl")
#pickup_b("r_bio", "marta_r_bio_mlp__2019_01_15__22_33_01.pkl")
#pickup_b("r_ppb", "marta_r_ppb_mlp__2019_01_16__20_11_04.pkl")
#pickup_b("r_student", "marta_r_student_mlp__2019_01_15__22_30_14.pkl")
#pickup_b("c_credit", "marta_c_credit_mlp__2019_01_16__20_11_08.pkl")
#pickup_b("c_diabetes", "marta_c_diabetes_mlp__2019_01_15__22_30_24.pkl")
"""
data_set_name = argv[1]
if len(argv) > 2:
models_to_run = argv[2]
else:
models_to_run='SLM_MODELS'
print('Dataset:', data_set_name, ', models:', models_to_run)
start_b(data_set_name, models_to_run=models_to_run, models_to_run_2=None)
"""
# for data_set in os.listdir(get_standardized_folder()):
# start_b(remove_extension(data_set))
# start_b("c_credit", models_to_run='SLM_MODELS', models_to_run_2=None)
# start_b("c_diabetes", models_to_run='SLM_MODELS', models_to_run_2=None)
# start_b("r_bio", models_to_run='SLM_MODELS', models_to_run_2=None)
start_b("r_ppb", models_to_run='SLM_MODELS', models_to_run_2=None)
# start_b("r_student", models_to_run='SLM_MODELS', models_to_run_2=None)
# start_b("r_ld50")
# -IG- probably the following code can be removed <start>
# parser = argparse.ArgumentParser(description='Runs benchmark for data set.')
# parser.add_argument('-d', metavar='data_set_name', type=str, dest='data_set_name',
# help='a name of a data set')
# parser.add_argument('-f', metavar='file_name', type=str, dest='file_name',
# help='a file name of an existing benchmark')
# parser.add_argument('-m1', metavar='models_to_run', dest='models_to_run',
# help='MLP_MODELS or SLM_MODELS')
# parser.add_argument('-m2', metavar='models_to_run_2', dest='models_to_run_2',
# help='MLP_MODELS or SLM_MODELS but different than the previous one')
# args = parser.parse_args()
# if args.file_name:
# thread = Thread(target=continue_b, kwargs=vars(args))
# else:
# thread = Thread(target=start_b, kwargs=vars(args))
# try:
# thread.daemon = True
# thread.start()
# while True: sleep(100)
# except (KeyboardInterrupt, SystemExit):
# print('\n! Received keyboard interrupt, quitting threads.\n')
# -IG- probably the following code can be removed <end>
| 2.484375 | 2 |
setup.py | qkudev/edtw | 0 | 12758601 |
from setuptools import setup, find_packages
setup(
name='edtw',
version='0.0.1',
license='MIT',
author="<NAME>",
author_email='<EMAIL>',
packages=find_packages('src'),
package_dir={'': 'src'},
url='https://github.com/qkudev/edtw',
keywords='python, dwt, entropy, mutual information',
install_requires=[
'scikit-learn',
'numpy'
],
)
| 1.15625 | 1 |
lib/models/common/resnet_encoder.py | aligholami/kepler | 0 | 12758602 | import torch.nn as nn
import pytorch_lightning as pl
import torchvision.models as models
class ResNet101Encoder(pl.LightningModule):
def __init__(
self,
pretrained,
show_progress,
depth_adapted
):
super().__init__()
self.depth_adapted = depth_adapted
self.image_modules = list(models.resnet101(pretrained=pretrained, progress=show_progress).children())
if self.depth_adapted:
self.depth_adapt_first_layer()
self.core = nn.Sequential(*self.image_modules)
self.before_last_layer = self.core[:-2]
self.last_layer = self.core[-2:-1]
def forward(self, image):
f = self.core(image)
return f.squeeze()
def get_spatial_features(self, image):
return self.before_last_layer(image)
def get_flattened_features(self, spatial_features):
return self.last_layer(spatial_features).squeeze()
def depth_adapt_first_layer(self):
l1_weights = self.image_modules[0].weight.data.clone()
depth_adapted_conv = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
depth_adapted_conv.weight.data[:, :3] = l1_weights
depth_adapted_conv.weight.data[:, 3] = depth_adapted_conv.weight.data[:, 0]
self.image_modules[0] = depth_adapted_conv
print("[INFO.RESNET_ENCODER.DEPTH_ADAPTATION_COMPLETED]") | 2.578125 | 3 |
official/cv/ADNet/src/utils/get_action_history_onehot.py | leelige/mindspore | 77 | 12758603 | <reponame>leelige/mindspore
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# returns action history as one-hot form
# https://github.com/hellbell/ADNet/blob/3a7955587b5d395401ebc94a5ab067759340680d/utils/get_action_history_onehot.m
import mindspore.numpy as nps
def get_action_history_onehot(action_history, opts):
onehot = nps.zeros((opts['num_actions'] * len(action_history),))
for i in range(len(action_history)):
start_idx = i * opts['num_actions']
if action_history[i] >= 0 and action_history[i] < opts['num_actions']:
onehot[start_idx + action_history[i]] = 1.
return onehot
| 2.140625 | 2 |
trainer.py | sduxzh/Single-Player-MCTS | 24 | 12758604 | <filename>trainer.py
import torch
import torch.nn as nn
class Trainer:
"""
Trainer for an MCTS policy network. Trains the network to minimize
the difference between the value estimate and the actual returns and
the difference between the policy estimate and the refined policy estimates
derived via the tree search.
"""
def __init__(self, Policy, learning_rate=0.1):
self.step_model = Policy()
value_criterion = nn.MSELoss()
optimizer = torch.optim.SGD(self.step_model.parameters(),
lr=learning_rate)
def train(obs, search_pis, returns):
obs = torch.from_numpy(obs)
search_pis = torch.from_numpy(search_pis)
returns = torch.from_numpy(returns)
optimizer.zero_grad()
logits, policy, value = self.step_model(obs)
logsoftmax = nn.LogSoftmax(dim=1)
policy_loss = 5*torch.mean(torch.sum(-search_pis
* logsoftmax(logits), dim=1))
value_loss = value_criterion(value, returns)
loss = policy_loss + value_loss
loss.backward()
optimizer.step()
return value_loss.data.numpy(), policy_loss.data.numpy()
self.train = train
| 3.140625 | 3 |
Evaluation/mnist_eva.py | wangaxe/kmeans_defense | 0 | 12758605 | import argparse
import logging
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset, TensorDataset
import torchattacks
from advertorch.defenses import MedianSmoothing2D, BitSqueezing, JPEGFilter
from mnist_net import Le_Net, classifier_A, classifier_B, classifier_C
from cluster import Kmeans_cluster
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128, help='batch_size')
parser.add_argument('--fname', type=str, default='test')
parser.add_argument('--model', default='LeNet', type=str,
choices=['LeNet', 'A', 'B', 'C'], help='models type')
parser.add_argument('--attack-type', type=str, default='fgsm',
choices=['fgsm', 'pgd', 'rfgsm', 'deepfool'])
parser.add_argument('--iter', type=int, default=50,
help='The number of iterations for iterative attacks')
parser.add_argument('--eps', type=float, default=0.3)
parser.add_argument('--alpha', type=float, default=0.01)
parser.add_argument('--defense', type=str, default='none',
choices=['km','bs','ms','jf'])
parser.add_argument('--k', type=int, default=2)
parser.add_argument('--data-dir', type=str, default='../../datasets/')
return parser.parse_args()
def main():
args = get_args()
logfile = './mnist/'+args.fname+'.log'
logger = logging.getLogger(__name__)
logging.basicConfig(
filename=logfile,
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO)
logger.info(args)
if not os.path.exists('../advdata'):
os.mkdir('../advdata')
if args.model == 'A':
model = classifier_A().cuda()
checkpoint = torch.load('../models/MNIST_A.pth')
elif args.model == 'B':
model = classifier_B().cuda()
checkpoint = torch.load('../models/MNIST_B.pth')
elif args.model == 'C':
model = classifier_C().cuda()
checkpoint = torch.load('../models/MNIST_C.pth')
elif args.model == 'LeNet':
model = Le_Net().cuda()
checkpoint = torch.load('../models/MNIST_LeNet.pth')
model.load_state_dict(checkpoint)
model.eval()
mnist_test = datasets.MNIST(args.data_dir, train=False, download=True, transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=128, shuffle=False)
if args.attack_type == 'pgd':
data_dir = "../advdata/MNIST_{}_pgd_{}-{}.pt".format(args.model,args.eps,args.iter)
if not os.path.exists(data_dir):
pgd_attack = torchattacks.PGD(model, eps = args.eps, alpha = args.alpha, iters=args.iter, random_start=False)
pgd_attack.set_mode('int')
pgd_attack.save(data_loader=test_loader, file_name=data_dir, accuracy=True)
adv_images, adv_labels = torch.load(data_dir)
elif args.attack_type == 'fgsm':
data_dir = "../advdata/MNIST_{}_fgsm_{}.pt".format(args.model, args.eps)
if not os.path.exists(data_dir):
fgsm_attack = torchattacks.FGSM(model, eps=args.eps)
fgsm_attack.set_mode('int')
fgsm_attack.save(data_loader=test_loader, file_name=data_dir, accuracy=True)
adv_images, adv_labels = torch.load(data_dir)
elif args.attack_type == 'deepfool':
data_dir = "../advdata/MNIST_{}_df_{}.pt".format(args.model, args.iter)
if not os.path.exists(data_dir):
df_attack = torchattacks.DeepFool(model, iters=args.iter)
df_attack.set_mode('int')
df_attack.save(data_loader=test_loader, file_name=data_dir, accuracy=True)
adv_images, adv_labels = torch.load(data_dir)
adv_data = TensorDataset(adv_images.float()/255, adv_labels)
adv_loader = DataLoader(adv_data, batch_size=128, shuffle=False)
model.eval()
correct = 0
total = 0
for images, labels in test_loader:
images = images.cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
logger.info('Accuracy with Clean images:%.4f',(float(correct) / total))
model.eval()
correct = 0
total = 0
for images, labels in adv_loader:
images = images.cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
logger.info('Accuracy with Adversarial images: %.4f',(float(correct) / total))
if args.defense == 'km':
def cluster_def(in_tensor,k=args.k):
return Kmeans_cluster(in_tensor,k)
defense = cluster_def
elif args.defense == 'bs':
bits_squeezing = BitSqueezing(bit_depth=2)
defense = nn.Sequential(
bits_squeezing,
)
elif args.defense == 'ms':
median_filter = MedianSmoothing2D(kernel_size=3)
defense = nn.Sequential(
median_filter,
)
elif args.defense == 'jf':
jpeg_filter = JPEGFilter(10)
defense = nn.Sequential(
jpeg_filter,
)
model.eval()
correct = 0
total = 0
for images, labels in adv_loader:
images = images.cuda()
images = defense(images)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
logger.info('Accuracy with Defenced images: %.4f',(float(correct) / total))
if __name__ == "__main__":
main() | 2.21875 | 2 |
orders/__init__.py | Zadigo/mycommerce | 0 | 12758606 | <filename>orders/__init__.py
from typing import List, Tuple, Union
import stripe
from django.conf import settings
from django.core.exceptions import ValidationError
def get_stripe_tokens() -> List[Tuple[str, str]]:
try:
stripe_tokens = settings.STRIPE_TOKENS
except:
raise ValidationError(
'In order to use the cart application, please implement the STRIPE_TOKENS in the settings file')
return stripe_tokens
def initialize_stripe():
def parse_key(key_to_get: str, stripe_tokens: list) -> str:
key = list(filter(lambda keys: key_to_get in keys, stripe_tokens))
if not key:
return ()
return key[0][-1]
stripe_tokens = get_stripe_tokens()
if not isinstance(stripe_tokens, list):
raise TypeError('The STRIPE_TOKENS should be a list')
key_to_get = 'sk_test'
if not settings.DEBUG:
key_to_get = 'sk_live'
stripe.api_key = parse_key(key_to_get, stripe_tokens)
return stripe
stripe_initialized = initialize_stripe()
| 2.203125 | 2 |
polling_stations/apps/feedback/admin.py | mtravis/UK-Polling-Stations | 0 | 12758607 | import csv
import datetime
from django.conf.urls import url
from django.contrib import admin
from django.http import HttpResponse, HttpResponseForbidden
from .models import Feedback
class FeedbackAdmin(admin.ModelAdmin):
list_filter = ("found_useful",)
list_display = ("id", "found_useful", "comments", "created")
readonly_fields = [f.name for f in Feedback._meta.get_fields()]
ordering = ("-created", "id")
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
def get_urls(self):
urls = super().get_urls()
my_urls = [
url("export_all/", self.export_all_feedback),
url("export_comments/", self.export_feedback_with_comments),
]
return my_urls + urls
def export_all_feedback(self, request):
if not request.user.is_superuser:
return HttpResponseForbidden("Access Denied")
return self.export(Feedback.objects.all().order_by("-created", "id"))
def export_feedback_with_comments(self, request):
if not request.user.is_superuser:
return HttpResponseForbidden("Access Denied")
return self.export(
Feedback.objects.all()
.exclude(comments="")
.order_by("found_useful", "-created", "id")
)
def export(self, qs):
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="feedback-%s.csv"' % (
datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
)
fields = ["id", "created", "comments", "found_useful", "source_url"]
writer = csv.writer(response)
writer.writerow(fields)
for row in qs:
writer.writerow([getattr(row, field) for field in fields])
return response
admin.site.register(Feedback, FeedbackAdmin)
| 2.234375 | 2 |
umu-python/app/service/tosca/vo/tgif/service.py | suomitek/cubeai | 0 | 12758608 | <gh_stars>0
class Service(dict):
def __init__(self, calls, provides):
dict.__init__(self, calls=calls, provides=provides)
| 2.546875 | 3 |
fledge/der_models.py | sonercandas/fledge | 2 | 12758609 | <filename>fledge/der_models.py
"""Distributed energy resource (DER) models."""
from multimethod import multimethod
import numpy as np
import pandas as pd
import pyomo.core
import pyomo.environ as pyo
import typing
import fledge.config
import fledge.database_interface
import fledge.electric_grid_models
import fledge.power_flow_solvers
import fledge.thermal_grid_models
import fledge.utils
logger = fledge.config.get_logger(__name__)
class DERModel(object):
"""DER model object."""
der_name: str
timesteps: pd.Index
active_power_nominal_timeseries: pd.Series
reactive_power_nominal_timeseries: pd.Series
class FixedDERModel(DERModel):
"""Fixed DER model object."""
def define_optimization_variables(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
):
pass
def define_optimization_constraints(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel
):
pass
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution,
electric_grid_model: fledge.electric_grid_models.ElectricGridModel
):
# Obtain DER index.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=self.der_name))
der = electric_grid_model.ders[der_index]
# Define connection constraints.
if optimization_problem.find_component('der_connection_constraints') is None:
optimization_problem.der_connection_constraints = pyo.ConstraintList()
for timestep in self.timesteps:
optimization_problem.der_connection_constraints.add(
optimization_problem.der_active_power_vector_change[timestep, der]
==
self.active_power_nominal_timeseries.at[timestep]
- np.real(
power_flow_solution.der_power_vector[der_index]
)
)
optimization_problem.der_connection_constraints.add(
optimization_problem.der_reactive_power_vector_change[timestep, der]
==
self.reactive_power_nominal_timeseries.at[timestep]
- np.imag(
power_flow_solution.der_power_vector[der_index]
)
)
def get_optimization_results(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel
):
return None
class FixedLoadModel(FixedDERModel):
"""Fixed load model object."""
def __init__(
self,
der_data: fledge.database_interface.DERData,
der_name: str
):
"""Construct fixed load model object by `der_data` and `der_name`."""
# Store DER name.
self.der_name = der_name
# Get fixed load data by `der_name`.
fixed_load = der_data.fixed_loads.loc[self.der_name, :]
# Store timesteps index.
self.timesteps = der_data.fixed_load_timeseries_dict[fixed_load['timeseries_name']].index
# Construct active and reactive power timeseries.
self.active_power_nominal_timeseries = (
der_data.fixed_load_timeseries_dict[
fixed_load['timeseries_name']
]['apparent_power_per_unit'].rename('active_power')
* fixed_load['scaling_factor']
* fixed_load['active_power']
)
self.reactive_power_nominal_timeseries = (
der_data.fixed_load_timeseries_dict[
fixed_load['timeseries_name']
]['apparent_power_per_unit'].rename('reactive_power')
* fixed_load['scaling_factor']
* fixed_load['reactive_power']
)
class EVChargerModel(FixedDERModel):
"""EV charger model object."""
def __init__(
self,
der_data: fledge.database_interface.DERData,
der_name: str
):
"""Construct EV charger model object by `der_data` and `der_name`."""
# Store DER name.
self.der_name = der_name
# Get fixed load data by `der_name`.
ev_charger = der_data.ev_chargers.loc[self.der_name, :]
# Store timesteps index.
self.timesteps = der_data.ev_charger_timeseries_dict[ev_charger['timeseries_name']].index
# Construct active and reactive power timeseries.
self.active_power_nominal_timeseries = (
der_data.ev_charger_timeseries_dict[
ev_charger['timeseries_name']
]['apparent_power_per_unit'].rename('active_power')
* ev_charger['scaling_factor']
* ev_charger['active_power']
)
self.reactive_power_nominal_timeseries = (
der_data.ev_charger_timeseries_dict[
ev_charger['timeseries_name']
]['apparent_power_per_unit'].rename('reactive_power')
* ev_charger['scaling_factor']
* ev_charger['reactive_power']
)
class FlexibleDERModel(DERModel):
"""Flexible DER model, e.g., flexible load, object."""
state_names: pd.Index
control_names: pd.Index
disturbance_names: pd.Index
output_names: pd.Index
state_vector_initial: pd.Series
state_matrix: pd.DataFrame
control_matrix: pd.DataFrame
disturbance_matrix: pd.DataFrame
state_output_matrix: pd.DataFrame
control_output_matrix: pd.DataFrame
disturbance_output_matrix: pd.DataFrame
disturbance_timeseries: pd.DataFrame
output_maximum_timeseries: pd.DataFrame
output_minimum_timeseries: pd.DataFrame
def define_optimization_variables(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
):
# Define variables.
optimization_problem.state_vector = pyo.Var(self.timesteps, [self.der_name], self.state_names)
optimization_problem.control_vector = pyo.Var(self.timesteps, [self.der_name], self.control_names)
optimization_problem.output_vector = pyo.Var(self.timesteps, [self.der_name], self.output_names)
def define_optimization_constraints(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel
):
# Define shorthand for indexing 't+1'.
# TODO: Is inferring timestep_interval from timesteps guaranteed to work?
timestep_interval = self.timesteps[1] - self.timesteps[0]
# Define constraints.
if optimization_problem.find_component('flexible_der_model_constraints') is None:
optimization_problem.flexible_der_model_constraints = pyo.ConstraintList()
# Initial state.
for state_name in self.state_names:
optimization_problem.flexible_der_model_constraints.add(
optimization_problem.state_vector[self.timesteps[0], self.der_name, state_name]
==
self.state_vector_initial.at[state_name]
)
for timestep in self.timesteps[:-1]:
# State equation.
for state_name in self.state_names:
optimization_problem.flexible_der_model_constraints.add(
optimization_problem.state_vector[timestep + timestep_interval, self.der_name, state_name]
==
sum(
self.state_matrix.at[state_name, state_name_other]
* optimization_problem.state_vector[timestep, self.der_name, state_name_other]
for state_name_other in self.state_names
)
+ sum(
self.control_matrix.at[state_name, control_name]
* optimization_problem.control_vector[timestep, self.der_name, control_name]
for control_name in self.control_names
)
+ sum(
self.disturbance_matrix.at[state_name, disturbance_name]
* self.disturbance_timeseries.at[timestep, disturbance_name]
for disturbance_name in self.disturbance_names
)
)
for timestep in self.timesteps:
# Output equation.
for output_name in self.output_names:
optimization_problem.flexible_der_model_constraints.add(
optimization_problem.output_vector[timestep, self.der_name, output_name]
==
sum(
self.state_output_matrix.at[output_name, state_name]
* optimization_problem.state_vector[timestep, self.der_name, state_name]
for state_name in self.state_names
)
+ sum(
self.control_output_matrix.at[output_name, control_name]
* optimization_problem.control_vector[timestep, self.der_name, control_name]
for control_name in self.control_names
)
+ sum(
self.disturbance_output_matrix.at[output_name, disturbance_name]
* self.disturbance_timeseries.at[timestep, disturbance_name]
for disturbance_name in self.disturbance_names
)
)
# Output limits.
for output_name in self.output_names:
optimization_problem.flexible_der_model_constraints.add(
optimization_problem.output_vector[timestep, self.der_name, output_name]
>=
self.output_minimum_timeseries.at[timestep, output_name]
)
optimization_problem.flexible_der_model_constraints.add(
optimization_problem.output_vector[timestep, self.der_name, output_name]
<=
self.output_maximum_timeseries.at[timestep, output_name]
)
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution,
electric_grid_model: fledge.electric_grid_models.ElectricGridModel,
thermal_power_flow_solution: fledge.thermal_grid_models.ThermalPowerFlowSolution,
thermal_grid_model: fledge.thermal_grid_models.ThermalGridModel,
):
# Connect electric grid.
self.define_optimization_connection_grid(
optimization_problem,
power_flow_solution,
electric_grid_model,
disconnect_thermal_grid=False
)
# Connect thermal grid.
self.define_optimization_connection_grid(
optimization_problem,
thermal_power_flow_solution,
thermal_grid_model,
disconnect_electric_grid=False
)
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution,
electric_grid_model: fledge.electric_grid_models.ElectricGridModel,
disconnect_thermal_grid=True
):
# Obtain DER index.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=self.der_name))
der = electric_grid_model.ders[der_index]
# Define connection constraints.
if optimization_problem.find_component('der_connection_constraints') is None:
optimization_problem.der_connection_constraints = pyo.ConstraintList()
for timestep in self.timesteps:
optimization_problem.der_connection_constraints.add(
optimization_problem.der_active_power_vector_change[timestep, der]
==
optimization_problem.output_vector[timestep, self.der_name, 'active_power']
- np.real(
power_flow_solution.der_power_vector[der_index]
)
)
optimization_problem.der_connection_constraints.add(
optimization_problem.der_reactive_power_vector_change[timestep, der]
==
optimization_problem.output_vector[timestep, self.der_name, 'reactive_power']
- np.imag(
power_flow_solution.der_power_vector[der_index]
)
)
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
thermal_power_flow_solution: fledge.thermal_grid_models.ThermalPowerFlowSolution,
thermal_grid_model: fledge.thermal_grid_models.ThermalGridModel,
disconnect_electric_grid=True
):
pass
def define_optimization_objective(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
price_timeseries: pd.DataFrame
):
# Define objective.
if optimization_problem.find_component('objective') is None:
optimization_problem.objective = pyo.Objective(expr=0.0, sense=pyo.minimize)
optimization_problem.objective.expr += (
sum(
-1.0
* price_timeseries.at[timestep, 'price_value']
* optimization_problem.output_vector[timestep, self.der_name, 'active_power']
for timestep in self.timesteps
)
)
def get_optimization_results(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel
):
# Instantiate results variables.
state_vector = pd.DataFrame(0.0, index=self.timesteps, columns=self.state_names)
control_vector = pd.DataFrame(0.0, index=self.timesteps, columns=self.control_names)
output_vector = pd.DataFrame(0.0, index=self.timesteps, columns=self.output_names)
# Obtain results.
for timestep in self.timesteps:
for state_name in self.state_names:
state_vector.at[timestep, state_name] = (
optimization_problem.state_vector[timestep, self.der_name, state_name].value
)
for control_name in self.control_names:
control_vector.at[timestep, control_name] = (
optimization_problem.control_vector[timestep, self.der_name, control_name].value
)
for output_name in self.output_names:
output_vector.at[timestep, output_name] = (
optimization_problem.output_vector[timestep, self.der_name, output_name].value
)
return (
state_vector,
control_vector,
output_vector
)
class FlexibleLoadModel(FlexibleDERModel):
"""Flexible load model object."""
def __init__(
self,
der_data: fledge.database_interface.DERData,
der_name: str
):
"""Construct flexible load model object by `der_data` and `der_name`."""
# Store DER name.
self.der_name = der_name
# Get flexible load data by `der_name`.
flexible_load = der_data.flexible_loads.loc[der_name, :]
# Store timesteps index.
self.timesteps = der_data.flexible_load_timeseries_dict[flexible_load['timeseries_name']].index
# Construct active and reactive power timeseries.
self.active_power_nominal_timeseries = (
der_data.flexible_load_timeseries_dict[
flexible_load['timeseries_name']
]['apparent_power_per_unit'].rename('active_power')
* flexible_load['scaling_factor']
* flexible_load['active_power']
)
self.reactive_power_nominal_timeseries = (
der_data.flexible_load_timeseries_dict[
flexible_load['timeseries_name']
]['apparent_power_per_unit'].rename('reactive_power')
* flexible_load['scaling_factor']
* flexible_load['reactive_power']
)
# Calculate nominal accumulated energy timeseries.
# TODO: Consider reactive power in accumulated energy.
accumulated_energy_nominal_timeseries = (
self.active_power_nominal_timeseries.cumsum().rename('accumulated_energy')
)
# Instantiate indexes.
self.state_names = pd.Index(['accumulated_energy'])
self.control_names = pd.Index(['active_power', 'reactive_power'])
self.disturbance_names = pd.Index([])
self.output_names = pd.Index(['accumulated_energy', 'active_power', 'reactive_power', 'power_factor_constant'])
# Instantiate initial state.
self.state_vector_initial = (
pd.Series(0.0, index=self.state_names)
)
# Instantiate state space matrices.
# TODO: Consolidate indexing approach with electric grid model.
self.state_matrix = (
pd.DataFrame(0.0, index=self.state_names, columns=self.state_names)
)
self.state_matrix.at['accumulated_energy', 'accumulated_energy'] = 1.0
self.control_matrix = (
pd.DataFrame(0.0, index=self.state_names, columns=self.control_names)
)
self.control_matrix.at['accumulated_energy', 'active_power'] = 1.0
self.disturbance_matrix = (
pd.DataFrame(0.0, index=self.state_names, columns=self.disturbance_names)
)
self.state_output_matrix = (
pd.DataFrame(0.0, index=self.output_names, columns=self.state_names)
)
self.state_output_matrix.at['accumulated_energy', 'accumulated_energy'] = 1.0
self.control_output_matrix = (
pd.DataFrame(0.0, index=self.output_names, columns=self.control_names)
)
self.control_output_matrix.at['active_power', 'active_power'] = 1.0
self.control_output_matrix.at['reactive_power', 'reactive_power'] = 1.0
self.control_output_matrix.at['power_factor_constant', 'active_power'] = -1.0 / flexible_load['active_power']
self.control_output_matrix.at['power_factor_constant', 'reactive_power'] = 1.0 / flexible_load['reactive_power']
self.disturbance_output_matrix = (
pd.DataFrame(0.0, index=self.output_names, columns=self.disturbance_names)
)
# Instantiate disturbance timeseries.
self.disturbance_timeseries = (
pd.DataFrame(0.0, index=self.active_power_nominal_timeseries.index, columns=self.disturbance_names)
)
# Construct output constraint timeseries
# TODO: Fix offset of accumulated energy constraints.
self.output_maximum_timeseries = (
pd.concat([
(
accumulated_energy_nominal_timeseries
- accumulated_energy_nominal_timeseries[int(flexible_load['time_period_power_shift_maximum'])]
),
(
(1.0 - flexible_load['power_decrease_percentage_maximum'])
* self.active_power_nominal_timeseries
),
(
(1.0 - flexible_load['power_decrease_percentage_maximum'])
* self.reactive_power_nominal_timeseries
),
pd.Series(0.0, index=self.active_power_nominal_timeseries.index, name='power_factor_constant')
], axis='columns')
)
self.output_minimum_timeseries = (
pd.concat([
(
accumulated_energy_nominal_timeseries
+ accumulated_energy_nominal_timeseries[int(flexible_load['time_period_power_shift_maximum'])]
),
(
(1.0 + flexible_load['power_increase_percentage_maximum'])
* self.active_power_nominal_timeseries
),
(
(1.0 + flexible_load['power_increase_percentage_maximum'])
* self.reactive_power_nominal_timeseries
),
pd.Series(0.0, index=self.active_power_nominal_timeseries.index, name='power_factor_constant')
], axis='columns')
)
class FlexibleBuildingModel(FlexibleDERModel):
"""Flexible load model object."""
power_factor_nominal: np.float
def __init__(
self,
der_data: fledge.database_interface.DERData,
der_name: str
):
"""Construct flexible building model object by `der_data` and `der_name`."""
# Store DER name.
self.der_name = der_name
# Obtain shorthands for flexible building data and model by `der_name`.
flexible_building = der_data.flexible_buildings.loc[der_name, :]
flexible_building_model = der_data.flexible_building_model_dict[flexible_building['model_name']]
# Store timesteps.
self.timesteps = flexible_building_model.set_timesteps
# Obtain nominal power factor.
self.power_factor_nominal = (
np.cos(np.arctan(
flexible_building['reactive_power']
/ flexible_building['active_power']
))
)
# Construct nominal active and reactive power timeseries.
self.active_power_nominal_timeseries = (
pd.Series(
1.0,
index=self.timesteps
)
* flexible_building['active_power']
)
self.reactive_power_nominal_timeseries = (
pd.Series(
1.0,
index=self.timesteps
)
* flexible_building['reactive_power']
)
# Obtain indexes.
self.state_names = flexible_building_model.set_states
self.control_names = flexible_building_model.set_controls
self.disturbance_names = flexible_building_model.set_disturbances
self.output_names = flexible_building_model.set_outputs
# Obtain initial state.
self.state_vector_initial = flexible_building_model.set_state_initial
# Obtain state space matrices.
self.state_matrix = flexible_building_model.state_matrix
self.control_matrix = flexible_building_model.control_matrix
self.disturbance_matrix = flexible_building_model.disturbance_matrix
self.state_output_matrix = flexible_building_model.state_output_matrix
self.control_output_matrix = flexible_building_model.control_output_matrix
self.disturbance_output_matrix = flexible_building_model.disturbance_output_matrix
# Instantiate disturbance timeseries.
self.disturbance_timeseries = flexible_building_model.disturbance_timeseries
# Obtain output constraint timeseries
self.output_maximum_timeseries = flexible_building_model.output_constraint_timeseries_maximum
self.output_minimum_timeseries = flexible_building_model.output_constraint_timeseries_minimum
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution,
electric_grid_model: fledge.electric_grid_models.ElectricGridModel,
thermal_power_flow_solution: fledge.thermal_grid_models.ThermalPowerFlowSolution,
thermal_grid_model: fledge.thermal_grid_models.ThermalGridModel,
):
# Connect electric grid.
self.define_optimization_connection_grid(
optimization_problem,
power_flow_solution,
electric_grid_model,
disconnect_thermal_grid=False
)
# Connect thermal grid.
self.define_optimization_connection_grid(
optimization_problem,
thermal_power_flow_solution,
thermal_grid_model,
disconnect_electric_grid=False
)
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution,
electric_grid_model: fledge.electric_grid_models.ElectricGridModel,
disconnect_thermal_grid=True
):
# Obtain DER index.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=self.der_name))
der = electric_grid_model.ders[der_index]
# Define connection constraints.
if optimization_problem.find_component('der_connection_constraints') is None:
optimization_problem.der_connection_constraints = pyo.ConstraintList()
for timestep in self.timesteps:
optimization_problem.der_connection_constraints.add(
optimization_problem.der_active_power_vector_change[timestep, der]
==
-1.0 * optimization_problem.output_vector[timestep, self.der_name, 'grid_electric_power']
- np.real(
power_flow_solution.der_power_vector[der_index]
)
)
optimization_problem.der_connection_constraints.add(
optimization_problem.der_reactive_power_vector_change[timestep, der]
==
-1.0 * (
optimization_problem.output_vector[timestep, self.der_name, 'grid_electric_power']
* np.tan(np.arccos(self.power_factor_nominal))
)
- np.imag(
power_flow_solution.der_power_vector[der_index]
)
)
# Disable thermal grid connection.
if disconnect_thermal_grid:
optimization_problem.der_connection_constraints.add(
0.0
==
optimization_problem.output_vector[timestep, self.der_name, 'grid_thermal_power_cooling']
)
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
thermal_power_flow_solution: fledge.thermal_grid_models.ThermalPowerFlowSolution,
thermal_grid_model: fledge.thermal_grid_models.ThermalGridModel,
disconnect_electric_grid=True
):
# Obtain DER index.
der_index = int(fledge.utils.get_index(thermal_grid_model.ders, der_name=self.der_name))
der = thermal_grid_model.ders[der_index]
# Define connection constraints.
if optimization_problem.find_component('der_connection_constraints') is None:
optimization_problem.der_connection_constraints = pyo.ConstraintList()
for timestep in self.timesteps:
optimization_problem.der_connection_constraints.add(
optimization_problem.der_thermal_power_vector[timestep, der]
==
-1.0 * optimization_problem.output_vector[timestep, self.der_name, 'grid_thermal_power_cooling']
)
# Disable electric grid connection.
if disconnect_electric_grid:
optimization_problem.der_connection_constraints.add(
0.0
==
optimization_problem.output_vector[timestep, self.der_name, 'grid_electric_power']
)
def define_optimization_objective(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
price_timeseries: pd.DataFrame
):
# Define objective.
if optimization_problem.find_component('objective') is None:
optimization_problem.objective = pyo.Objective(expr=0.0, sense=pyo.minimize)
optimization_problem.objective.expr += (
sum(
price_timeseries.at[timestep, 'price_value']
* optimization_problem.output_vector[timestep, self.der_name, 'grid_electric_power']
for timestep in self.timesteps
)
)
class DERModelSet(object):
"""DER model set object."""
timesteps: pd.Index
der_names: pd.Index
fixed_der_names: pd.Index
flexible_der_names: pd.Index
der_models: typing.Dict[str, DERModel]
fixed_der_models: typing.Dict[str, FixedDERModel]
flexible_der_models: typing.Dict[str, FlexibleDERModel]
def __init__(
self,
scenario_name: str
):
# Obtain data.
scenario_data = fledge.database_interface.ScenarioData(scenario_name)
der_data = fledge.database_interface.DERData(scenario_name)
# Obtain timesteps.
self.timesteps = scenario_data.timesteps
# Obtain DER names.
self.der_names = (
pd.Index(pd.concat([
der_data.fixed_loads['der_name'],
der_data.ev_chargers['der_name'],
der_data.flexible_loads['der_name'],
der_data.flexible_buildings['der_name']
]))
)
self.fixed_der_names = (
pd.Index(pd.concat([
der_data.fixed_loads['der_name'],
der_data.ev_chargers['der_name'],
]))
)
self.flexible_der_names = (
pd.Index(pd.concat([
der_data.flexible_loads['der_name'],
der_data.flexible_buildings['der_name']
]))
)
# Obtain models.
self.der_models = dict.fromkeys(self.der_names)
self.fixed_der_models = dict.fromkeys(self.fixed_der_names)
self.flexible_der_models = dict.fromkeys(self.flexible_der_names)
for der_name in self.der_names:
if der_name in der_data.fixed_loads['der_name']:
self.der_models[der_name] = self.fixed_der_models[der_name] = (
fledge.der_models.FixedLoadModel(
der_data,
der_name
)
)
elif der_name in der_data.ev_chargers['der_name']:
self.der_models[der_name] = self.fixed_der_models[der_name] = (
fledge.der_models.EVChargerModel(
der_data,
der_name
)
)
elif der_name in der_data.flexible_loads['der_name']:
self.der_models[der_name] = self.flexible_der_models[der_name] = (
fledge.der_models.FlexibleLoadModel(
der_data,
der_name
)
)
elif der_name in der_data.flexible_buildings['der_name']:
self.der_models[der_name] = self.flexible_der_models[der_name] = (
fledge.der_models.FlexibleBuildingModel(
der_data,
der_name
)
)
else:
logger.error(f"Cannot determine type of DER: {der_name}")
raise ValueError
def define_optimization_variables(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel
):
# Define flexible DER variables.
der_state_names = [
(der_name, state_name)
for der_name in self.flexible_der_names
for state_name in self.flexible_der_models[der_name].state_names
]
der_control_names = [
(der_name, control_name)
for der_name in self.flexible_der_names
for control_name in self.flexible_der_models[der_name].control_names
]
der_output_names = [
(der_name, output_name)
for der_name in self.flexible_der_names
for output_name in self.flexible_der_models[der_name].output_names
]
optimization_problem.state_vector = pyo.Var(self.timesteps, der_state_names)
optimization_problem.control_vector = pyo.Var(self.timesteps, der_control_names)
optimization_problem.output_vector = pyo.Var(self.timesteps, der_output_names)
def define_optimization_constraints(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel
):
# Define DER constraints, only for flexible DERs.
for der_name in self.flexible_der_names:
self.flexible_der_models[der_name].define_optimization_constraints(
optimization_problem
)
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution,
electric_grid_model: fledge.electric_grid_models.ElectricGridModel,
thermal_power_flow_solution: fledge.thermal_grid_models.ThermalPowerFlowSolution,
thermal_grid_model: fledge.thermal_grid_models.ThermalGridModel,
):
# Define constraints for the connection with the DER power vector of the grid.
for der_name in self.der_names:
self.der_models[der_name].define_optimization_connection_grid(
optimization_problem,
power_flow_solution,
electric_grid_model,
thermal_power_flow_solution,
thermal_grid_model
)
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
power_flow_solution: fledge.power_flow_solvers.PowerFlowSolution,
electric_grid_model: fledge.electric_grid_models.ElectricGridModel,
**kwargs
):
# Define constraints for the connection with the DER power vector of the grid.
for der_name in self.der_names:
self.der_models[der_name].define_optimization_connection_grid(
optimization_problem,
power_flow_solution,
electric_grid_model,
**kwargs
)
@multimethod
def define_optimization_connection_grid(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
thermal_power_flow_solution: fledge.thermal_grid_models.ThermalPowerFlowSolution,
thermal_grid_model: fledge.thermal_grid_models.ThermalGridModel,
**kwargs
):
# Define constraints for the connection with the DER power vector of the grid.
for der_name in self.der_names:
self.der_models[der_name].define_optimization_connection_grid(
optimization_problem,
thermal_power_flow_solution,
thermal_grid_model,
**kwargs
)
def define_optimization_objective(
self,
optimization_problem: pyomo.core.base.PyomoModel.ConcreteModel,
price_timeseries: pd.DataFrame
):
# Define objective, only for flexible DERs.
for der_name in self.flexible_der_names:
self.flexible_der_models[der_name].define_optimization_objective(
optimization_problem,
price_timeseries
)
| 2.421875 | 2 |
tests/make_def_multi.py | shlomimatichin/Pymake3 | 2 | 12758610 | <reponame>shlomimatichin/Pymake3
#!/usr/bin/python3
#---------------------------------------
# IMPORTS
#---------------------------------------
import test
from pymake3 import *
#---------------------------------------
# FUNCTIONS
#---------------------------------------
@default_target
def my_target_1(conf):
test.fail("only the last default target should be called")
@default_target
def my_target_2(conf):
test.success()
#---------------------------------------
# SCRIPT
#---------------------------------------
pymake3({}, [])
test.fail("'my_target_2' was not made")
| 2.140625 | 2 |
NMLA/NMLA.py | dinrker/Ray_Learning_Methods | 0 | 12758611 | import numpy as np
import scipy.special as sp
import matplotlib.pyplot as plt
# radius of the oberservation circle
def NMLA_radius(omega,Rest=1):
# Input: omega--frequency; Rest--estimate of the distance from source to observation point
#
# Output: the radius of the oberservation circle
poly = [1,0,1,-2.5-0.775*(omega*Rest)**0.5]
rt = np.roots(poly)
rs = np.real(rt[2])**3/omega
return rs
# NMLA filtering in the fourier space
def BGFiltrage(fu,kr,imp,L,gau,M):
# INPUT: fu: FFT of impedance quantity U
# kr: k*r
# imp: parameter in impedance quantity
# L: truncation level
# gau: parameter in gaussian kernel
# M: number of sample points on the observation cicle
#
# OUTPUT: filterd quantity BU
#
# bessel and derivative of bessel
LP = max( L+2, 3)
idx = np.array(list(range(LP)))
Bj = sp.jv(idx, kr) # bessel J_l(kr)
DBj = np.array([0.0]*(LP-1))
DBj[0] = -Bj[1]
DBj[1:] = 0.5*(Bj[:LP-2] - Bj[2:LP]) # derivative of bessel
# gausian kernel
A = gau/L
G = np.array([0.0]*(L+1))
G[0] = 1.0
idx = np.array(list(xrange(1,L+1)))
G[1:] = np.exp(-0.5*(A*idx)**2)
G /= 2*np.sum(G) - 1
# filtering operator
Fltr = np.array([0.0 + 0.0*1j]*(L+1))
Fltr[0] = Bj[0]-1j*DBj[0]*imp
Fltr[1:] = (Bj[1:L+1]-1j*DBj[1:L+1]*imp)*(1j**idx)
Fltr = G/Fltr
fb = np.array([0.0 + 0.0*1j]*(M))
fb[0] = Fltr[0]*fu[0] # FU_0
fb[idx] = Fltr[idx]*fu[idx] # FU_{1,...,L}
fb[M-idx] = Fltr[idx]*fu[M-idx] # FU_{-1,...,-L}
return fb
# NMLA to estimate the ray direction
def NMLA(x0,y0,c0,omega,Rest,u,ux,uy):
imp = 0.5 # parameter in impedance quantity
gau = 3.5 # Parameter in Gaussian function
r = NMLA_radius(omega,Rest) # radius of the oberservation circle
kr = r*omega/c0 # k*r
L = int(round(kr + (kr)**(1.0/3) -2.5)) # truncation level to obtain needed precision
L = max(1,L)
M = 2*(4*L)+1 # number of samples on the observation circle
# Angle discretizaion on the circle
angl = np.linspace(0,2*np.pi,M+1)
ang = angl[:M]
X = x0 + r*np.cos(ang)
Y = y0 + r*np.sin(ang)
# compute the impedance quantity
Field = u(X, Y, omega)
DUx = ux(X, Y, omega)
DUy = uy(X, Y, omega)
DField = DUx*np.cos(ang) + DUy*np.sin(ang)
U = imp*DField/(1j*omega/c0) + Field
# filtering
fu = np.fft.fft(U)
fbeta = BGFiltrage(fu,kr,imp,L,gau,M)
beta = np.fft.ifft(fbeta)
# estimate the ray angle
sorted_index = sorted(range(len(beta)),key=lambda x:abs(beta[x]), reverse = True)
est_ang = ang[sorted_index[0]]
# plot
plt.plot(ang/np.pi,np.abs(beta))
plt.xlabel(r'$\theta/\pi$')
plt.show()
return est_ang | 2.625 | 3 |
Modules/Biophotonics/python/iMC/regression/preprocessing.py | SVRTK/MITK | 5 | 12758612 | '''
Created on Oct 26, 2015
@author: wirkert
'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
def preprocess2(df, nr_samples=None, snr=None, movement_noise_sigma=None,
magnification=None, bands_to_sortout=None):
# first set 0 reflectances to nan
df["reflectances"] = df["reflectances"].replace(to_replace=0.,
value=np.nan)
# remove nan
df.dropna(inplace=True)
# extract nr_samples samples from data
if nr_samples is not None:
df = df.sample(nr_samples)
# get reflectance and oxygenation
X = df.reflectances
if bands_to_sortout is not None and bands_to_sortout.size > 0:
X.drop(X.columns[bands_to_sortout], axis=1, inplace=True)
snr = np.delete(snr, bands_to_sortout)
X = X.values
y = df.layer0[["sao2", "vhb"]]
# do data magnification
if magnification is not None:
X_temp = X
y_temp = y
for i in range(magnification - 1):
X = np.vstack((X, X_temp))
y = pd.concat([y, y_temp])
# add noise to reflectances
camera_noise = 0.
if snr is not None:
sigmas = X / snr
noises = np.random.normal(loc=0., scale=1, size=X.shape)
camera_noise = sigmas*noises
movement_noise = 0.
if movement_noise_sigma is not None:
nr_bands = X.shape[1]
nr_samples = X.shape[0]
# we assume no correlation between neighboring bands
CORRELATION_COEFFICIENT = 0.0
movement_variance = movement_noise_sigma ** 2
movement_variances = np.ones(nr_bands) * movement_variance
movement_covariances = np.ones(nr_bands-1) * CORRELATION_COEFFICIENT * \
movement_variance
movement_covariance_matrix = np.diag(movement_variances) + \
np.diag(movement_covariances, -1) + \
np.diag(movement_covariances, 1)
# percentual sample errors
sample_errors_p = np.random.multivariate_normal(mean=np.zeros(nr_bands),
cov=movement_covariance_matrix,
size=nr_samples)
# errors w.r.t. the curve height.
movement_noise = X * sample_errors_p
X += camera_noise + movement_noise
X = np.clip(X, 0.00001, 1.)
# do normalizations
X = normalize(X)
return X, y
def preprocess(batch, nr_samples=None, snr=None, movement_noise_sigma=None,
magnification=None, bands_to_sortout=None):
X, y = preprocess2(batch, nr_samples, snr, movement_noise_sigma,
magnification, bands_to_sortout)
return X, y["sao2"]
def normalize(X):
# normalize reflectances
normalizer = Normalizer(norm='l1')
X = normalizer.transform(X)
# reflectances to absorption
absorptions = -np.log(X)
X = absorptions
# get rid of sorted out bands
normalizer = Normalizer(norm='l2')
X = normalizer.transform(X)
return X
| 2.3125 | 2 |
gamepie-ios/__init__.py | JadedTuna/gamepie | 4 | 12758613 | from gamepie import *
| 1.070313 | 1 |
th2_common_utils/message_fields_access.py | th2-net/th2-common-utils-py | 1 | 12758614 | <reponame>th2-net/th2-common-utils-py<filename>th2_common_utils/message_fields_access.py
# Copyright 2022-2022 Exactpro (Exactpro Systems Limited)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pformat
from typing import Any, Dict, List, Union
from th2_common_utils.converters import _dict_to_message_convert_value, message_to_dict, TypeName
from th2_common_utils.util.common import SimpleType
from th2_grpc_common.common_pb2 import ListValue, Message, Value
# =========================
# Value
# =========================
def value_get(self: Value, item: SimpleType) -> Union[str, ListValue, Message]:
return getattr(self, self.WhichOneof('kind')) # type: ignore
setattr(Value, '__get__', value_get)
# =========================
# ListValue
# =========================
def listvalue_getitem(self: ListValue, index: int) -> Union[str, List, Dict]:
value = self.values[index]
return value.__get__(index) # type: ignore
def listvalue_len(self: ListValue) -> int:
return len(self.values)
setattr(ListValue, '__getitem__', listvalue_getitem)
setattr(ListValue, '__len__', listvalue_len)
# =========================
# Message
# =========================
def message_setitem(self: Message, key: str, value: Any) -> None:
value_type = type(value).__name__
if value_type in {TypeName.STR, TypeName.INT, TypeName.FLOAT}:
self.fields[key].simple_value = str(value)
elif value_type == TypeName.VALUE:
self.fields[key].simple_value = value.simple_value
elif value_type in {TypeName.LIST, TypeName.LIST_VALUE}:
th2_value = _dict_to_message_convert_value(value)
self.fields[key].list_value.CopyFrom(th2_value.list_value)
elif value_type in {TypeName.DICT, TypeName.MESSAGE}:
th2_value = _dict_to_message_convert_value(value)
self.fields[key].message_value.CopyFrom(th2_value.message_value)
else:
raise TypeError('Cannot set %s object as field value.' % value_type)
def message_getitem(self: Message, item: str) -> Union[str, List, Dict]:
if item in self.fields:
value = self.fields[item]
return value.__get__(item) # type: ignore
else:
raise KeyError(item)
def message_contains(self: Message, item: str) -> bool:
return item in self.fields
def message_repr(self: Message) -> str:
return pformat(message_to_dict(self))
setattr(Message, '__setitem__', message_setitem)
setattr(Message, '__getitem__', message_getitem)
setattr(Message, '__contains__', message_contains)
setattr(Message, '__repr__', message_repr)
| 1.96875 | 2 |
dl4j-examples/tutorials/docker/json-folder-ids.py | marshzg/https-github.com-deeplearning4j-deeplearning4j | 0 | 12758615 | <reponame>marshzg/https-github.com-deeplearning4j-deeplearning4j
import simplejson
import shutil
import os, sys
input_path_prefix = 'notebook_json/'
output_path_prefix = 'notebook/'
files = [input_path_prefix + file for file in os.listdir("notebook_json")]
shutil.rmtree(output_path_prefix, ignore_errors=False, onerror=None)
os.mkdir(output_path_prefix)
for file in files:
with open(file) as data_file:
data = simplejson.load(data_file)
folder_name = data['id']
notebook_name = data['name']
print(folder_name, notebook_name)
os.mkdir(output_path_prefix + folder_name)
output_path = output_path_prefix + folder_name + '/note.json'
data_file.seek(0)
out = open(output_path,'w')
out.write(data_file.read())
out.close() | 2.578125 | 3 |
jorldy/core/env/gym_env.py | zenoengine/JORLDY | 300 | 12758616 | <filename>jorldy/core/env/gym_env.py
import gym
import numpy as np
from .base import BaseEnv
class _Gym(BaseEnv):
"""Gym environment.
Args:
name (str): name of environment in Gym.
render (bool): parameter that determine whether to render.
custom_action (bool): parameter that determine whether to use custom action.
"""
def __init__(
self,
name,
render=False,
custom_action=False,
**kwargs,
):
self.env = gym.make(name)
self.state_size = self.env.observation_space.shape[0]
if not custom_action:
self.action_size = (
self.env.action_space.shape[0]
if self.action_type == "continuous"
else self.env.action_space.n
)
self.render = render
def reset(self):
self.score = 0
state = self.env.reset()
state = np.expand_dims(state, 0) # for (1, state_size)
return state
def step(self, action):
if self.render:
self.env.render()
if self.action_type == "continuous":
action = ((action + 1.0) / 2.0) * (
self.env.action_space.high - self.env.action_space.low
) + self.env.action_space.low
action = np.reshape(action, self.env.action_space.shape)
else:
action = action.item()
next_state, reward, done, info = self.env.step(action)
self.score += reward
next_state, reward, done = map(
lambda x: np.expand_dims(x, 0), [next_state, [reward], [done]]
) # for (1, ?)
return (next_state, reward, done)
def close(self):
self.env.close()
class Cartpole(_Gym):
def __init__(self, action_type="discrete", **kwargs):
self.action_type = action_type
if action_type == "continuous":
super(Cartpole, self).__init__("CartPole-v1", custom_action=True, **kwargs)
self.action_size = 1
else:
super(Cartpole, self).__init__("CartPole-v1", **kwargs)
def step(self, action):
if self.render:
self.env.render()
action = action.item()
if self.action_type == "continuous":
action = 0 if action < 0 else 1
next_state, reward, done, info = self.env.step(action)
self.score += reward
reward = -1 if done else 0.1
next_state, reward, done = map(
lambda x: np.expand_dims(x, 0), [next_state, [reward], [done]]
) # for (1, ?)
return (next_state, reward, done)
class Pendulum(_Gym):
def __init__(self, **kwargs):
self.action_type = "continuous"
super(Pendulum, self).__init__("Pendulum-v1", **kwargs)
class MountainCar(_Gym):
def __init__(self, **kwargs):
self.action_type = "discrete"
super(MountainCar, self).__init__("MountainCar-v0", **kwargs)
| 2.71875 | 3 |
cpdb/data/migrations/0048_investigator.py | invinst/CPDBv2_backend | 25 | 12758617 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-30 08:43
from __future__ import unicode_literals
import data.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0047_officerallegation_outcome'),
]
operations = [
migrations.AddField(
model_name='investigator',
name='gender',
field=models.CharField(blank=True, max_length=1),
),
migrations.AddField(
model_name='investigator',
name='race',
field=models.CharField(default=b'Unknown', max_length=50, validators=[data.validators.validate_race]),
),
migrations.AddField(
model_name='investigatorallegation',
name='investigator_type',
field=models.CharField(max_length=32, null=True),
),
]
| 1.789063 | 2 |
kernel/scripts/generate_interrupts_idt.py | losfair/FlatMk-v0 | 5 | 12758618 | <filename>kernel/scripts/generate_interrupts_idt.py
print("{")
for i in range(32, 256):
print("IDT[{}].set_handler_fn(core::mem::transmute(intr_{} as usize));".format(i, i))
print("}") | 2.15625 | 2 |
plugin/tooling.py | kaste/LSP | 0 | 12758619 | <filename>plugin/tooling.py<gh_stars>0
from .core.css import css
from .core.registry import windows
from .core.transports import create_transport
from .core.transports import Transport
from .core.transports import TransportCallbacks
from .core.types import Capabilities
from .core.types import ClientConfig
from .core.typing import Any, Callable, Dict, List, Optional
from .core.version import __version__
from .core.views import extract_variables
from .core.views import make_command_link
from base64 import b64decode
from base64 import b64encode
from subprocess import list2cmdline
import json
import mdpopups
import os
import sublime
import sublime_plugin
import textwrap
class LspParseVscodePackageJson(sublime_plugin.ApplicationCommand):
def __init__(self) -> None:
self.view = None # type: Optional[sublime.View]
def writeline(self, contents: str, indent: int = 0) -> None:
if self.view is not None:
self.view.run_command("append", {"characters": " " * indent + contents + "\n"})
def writeline4(self, contents: str) -> None:
self.writeline(contents, indent=4)
def run(self) -> None:
package = json.loads(sublime.get_clipboard())
contributes = package.get("contributes")
if not isinstance(contributes, dict):
sublime.error_message('No "contributes" key found!')
return
configuration = contributes.get("configuration")
if not isinstance(configuration, dict):
sublime.error_message('No "contributes.configuration" key found!')
return
properties = configuration.get("properties")
if not isinstance(properties, dict):
sublime.error_message('No "contributes.configuration.properties" key found!')
return
self.view = sublime.active_window().new_file()
self.view.set_scratch(True)
self.view.set_name("--- PARSED SETTINGS ---")
self.view.assign_syntax("Packages/JSON/JSON.sublime-syntax")
self.writeline("{")
# schema = {} TODO: Also generate a schema. Sublime settings are not rigid.
for k, v in sorted(properties.items()):
typ = v["type"]
description = v.get("description")
if isinstance(description, str):
for line in description.splitlines():
for wrapped_line in textwrap.wrap(line, width=73):
self.writeline4('// {}'.format(wrapped_line))
else:
self.writeline4('// unknown setting')
enum = v.get("enum")
has_default = "default" in v
default = v.get("default")
if isinstance(enum, list):
self.writeline4('// possible values: {}'.format(", ".join(enum)))
if has_default:
value = default
else:
self.writeline4('// NO DEFAULT VALUE <-- NEEDS ATTENTION')
if typ == "string":
value = ""
elif typ == "boolean":
value = False
elif typ == "array":
value = []
elif typ == "object":
value = {}
elif typ == "number":
value = 0
else:
self.writeline4('// UNKNOWN TYPE: {} <-- NEEDS ATTENTION'.format(typ))
value = ""
value_lines = json.dumps(value, ensure_ascii=False, indent=4).splitlines()
for index, line in enumerate(value_lines, 1):
is_last_line = index == len(value_lines)
terminator = ',' if is_last_line else ''
if index == 1:
self.writeline4('"{}": {}{}'.format(k, line, terminator))
else:
self.writeline4('{}{}'.format(line, terminator))
self.writeline("}")
class LspTroubleshootServerCommand(sublime_plugin.WindowCommand, TransportCallbacks):
def run(self) -> None:
window = self.window
active_view = window.active_view()
configs = [c for c in windows.lookup(window).get_config_manager().get_configs() if c.enabled]
config_names = [config.name for config in configs]
if config_names:
window.show_quick_panel(config_names, lambda index: self.on_selected(index, configs, active_view),
placeholder='Select server to troubleshoot')
def on_selected(self, selected_index: int, configs: List[ClientConfig],
active_view: Optional[sublime.View]) -> None:
if selected_index == -1:
return
config = configs[selected_index]
output_sheet = mdpopups.new_html_sheet(
self.window, 'Server: {}'.format(config.name), '# Running server test...',
css=css().sheets, wrapper_class=css().sheets_classname)
sublime.set_timeout_async(lambda: self.test_run_server_async(config, self.window, active_view, output_sheet))
def test_run_server_async(self, config: ClientConfig, window: sublime.Window,
active_view: Optional[sublime.View], output_sheet: sublime.HtmlSheet) -> None:
server = ServerTestRunner(
config, window,
lambda output, exit_code: self.update_sheet(config, active_view, output_sheet, output, exit_code))
# Store the instance so that it's not GC'ed before it's finished.
self.test_runner = server # type: Optional[ServerTestRunner]
def update_sheet(self, config: ClientConfig, active_view: Optional[sublime.View], output_sheet: sublime.HtmlSheet,
server_output: str, exit_code: int) -> None:
self.test_runner = None
frontmatter = mdpopups.format_frontmatter({'allow_code_wrap': True})
contents = self.get_contents(config, active_view, server_output, exit_code)
# The href needs to be encoded to avoid having markdown parser ruin it.
copy_link = make_command_link('lsp_copy_to_clipboard_from_base64', '<kbd>Copy to clipboard</kbd>',
{'contents': b64encode(contents.encode()).decode()})
formatted = '{}{}\n{}'.format(frontmatter, copy_link, contents)
mdpopups.update_html_sheet(output_sheet, formatted, css=css().sheets, wrapper_class=css().sheets_classname)
def get_contents(self, config: ClientConfig, active_view: Optional[sublime.View],
server_output: str, exit_code: int) -> str:
lines = []
def line(s: str) -> None:
lines.append(s)
line('# Troubleshooting: {}'.format(config.name))
line('## Version')
line(' - LSP: {}'.format('.'.join([str(n) for n in __version__])))
line(' - Sublime Text: {}'.format(sublime.version()))
line('## Server Test Run')
line(' - exit code: {}\n - output\n{}'.format(exit_code, self.code_block(server_output)))
line('## Server Configuration')
line(' - command\n{}'.format(self.json_dump(config.command)))
line(' - shell command\n{}'.format(self.code_block(list2cmdline(config.command), 'sh')))
line(' - languages')
languages = [
{
'language_id': lang.id,
'document_selector': lang.document_selector,
'feature_selector': lang.feature_selector,
} for lang in config.languages
]
line(self.json_dump(languages))
line(' - init_options')
line(self.json_dump(config.init_options.get()))
line(' - settings')
line(self.json_dump(config.settings.get()))
line(' - env')
line(self.json_dump(config.env))
line('\n## Active view')
if active_view:
line(' - File name\n{}'.format(self.code_block(active_view.file_name() or 'None')))
line(' - Settings')
keys = ['auto_complete_selector', 'lsp_active', 'syntax']
settings = {}
view_settings = active_view.settings()
for key in keys:
settings[key] = view_settings.get(key)
line(self.json_dump(settings))
if isinstance(settings['syntax'], str):
syntax = sublime.syntax_from_path(settings['syntax'])
if syntax:
line(' - base scope\n{}'.format(self.code_block(syntax.scope)))
else:
line('no active view found!')
window = self.window
line('\n## Project / Workspace')
line(' - folders')
line(self.json_dump(window.folders()))
is_project = bool(window.project_file_name())
line(' - is project: {}'.format(is_project))
if is_project:
line(' - project data:\n{}'.format(self.json_dump(window.project_data())))
line('\n## LSP configuration\n')
lsp_settings_contents = self.read_resource('Packages/User/LSP.sublime-settings')
if lsp_settings_contents is not None:
line(self.json_dump(sublime.decode_value(lsp_settings_contents)))
else:
line('<not found>')
line('## System PATH')
lines += [' - {}'.format(p) for p in os.environ['PATH'].split(os.pathsep)]
return '\n'.join(lines)
def json_dump(self, contents: Any) -> str:
return self.code_block(json.dumps(contents, indent=2, sort_keys=True, ensure_ascii=False), 'json')
def code_block(self, contents: str, lang: str = '') -> str:
return '```{}\n{}\n```'.format(lang, contents)
def read_resource(self, path: str) -> Optional[str]:
try:
return sublime.load_resource(path)
except Exception:
return None
class LspCopyToClipboardFromBase64Command(sublime_plugin.ApplicationCommand):
def run(self, contents: str = '') -> None:
sublime.set_clipboard(b64decode(contents).decode())
class LspDumpWindowConfigs(sublime_plugin.WindowCommand):
"""
Very basic command to dump all of the window's resolved configurations.
"""
def run(self) -> None:
view = self.window.new_file()
view.set_scratch(True)
view.set_name("Window {} configs".format(self.window.id()))
view.settings().set("word_wrap", False)
view.set_syntax_file("Packages/Python/Python.sublime-syntax")
for config in windows.lookup(self.window).get_config_manager().get_configs():
view.run_command("append", {"characters": str(config) + "\n"})
class LspDumpBufferCapabilities(sublime_plugin.TextCommand):
"""
Very basic command to dump the current view's static and dynamically registered capabilities.
"""
def run(self, edit: sublime.Edit) -> None:
window = self.view.window()
if not window:
return
file_name = self.view.file_name()
if not file_name:
return
manager = windows.lookup(window)
listener = manager.listener_for_view(self.view)
if not listener or not any(listener.session_views_async()):
sublime.error_message("There is no language server running for this view.")
return
v = window.new_file()
v.set_scratch(True)
v.assign_syntax("Packages/Markdown/Markdown.sublime-settings")
v.set_name("{} (capabilities)".format(os.path.basename(file_name)))
def p(s: str) -> None:
v.run_command("append", {"characters": s + "\n"})
def print_capabilities(capabilities: Capabilities) -> str:
return "```json\n{}\n```".format(json.dumps(capabilities.get(), indent=4, sort_keys=True))
for sv in listener.session_views_async():
p("# {}\n".format(sv.session.config.name))
p("## Global capabilities\n")
p(print_capabilities(sv.session.capabilities) + "\n")
p("## View-specific capabilities\n")
p(print_capabilities(sv.session_buffer.capabilities) + "\n")
class ServerTestRunner(TransportCallbacks):
"""
Used to start the server and collect any potential stderr output and the exit code.
Server is automatically closed after defined timeout.
"""
CLOSE_TIMEOUT_SEC = 2
def __init__(self, config: ClientConfig, window: sublime.Window, on_close: Callable[[str, int], None]) -> None:
self._on_close = on_close # type: Callable[[str, int], None]
self._transport = None # type: Optional[Transport]
self._stderr_lines = [] # type: List[str]
try:
cwd = window.folders()[0] if window.folders() else None
variables = extract_variables(window)
self._transport = create_transport(config, cwd, window, self, variables)
sublime.set_timeout_async(self.force_close_transport, self.CLOSE_TIMEOUT_SEC * 1000)
except Exception as ex:
self.on_transport_close(-1, ex)
def force_close_transport(self) -> None:
if self._transport:
self._transport.close()
def on_payload(self, payload: Dict[str, Any]) -> None:
pass
def on_stderr_message(self, message: str) -> None:
self._stderr_lines.append(message)
def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None:
self._transport = None
output = str(exception) if exception else '\n'.join(self._stderr_lines).rstrip()
sublime.set_timeout(lambda: self._on_close(output, exit_code))
| 2.046875 | 2 |
application/pages/awesome_panel_express_tests/test_code.py | slamer59/awesome-panel | 0 | 12758620 | <reponame>slamer59/awesome-panel
"""Panel does not have a pane for code. I've created a `Code` pane in `awesome_panel.express`"""
import awesome_panel.express as pnx
import panel as pn
from awesome_panel.express.testing import TestApp
def test_code():
"""A manual test of the Code pane.
We expect to see nicely formatted Python code inside a gray box."""
code = """\
def my_add(a,b):
return a+b
"""
return TestApp(
test_code,
pnx.Code(
code,
language="python",
),
sizing_mode="stretch_width",
)
def view() -> pn.Column:
"""Wraps all tests in a Column that can be included in the Gallery or served independently
Returns:
pn.Column -- A Column containing all the tests
"""
return pn.Column(
pn.pane.Markdown(__doc__),
test_code(),
)
if __name__.startswith("bokeh"):
view().servable("test_code")
| 2.40625 | 2 |
create_page_from_json.py | wunanpty/Canvas-tools | 26 | 12758621 | <reponame>wunanpty/Canvas-tools<gh_stars>10-100
#!/usr/bin/python3
#
# ./create_page_from_json.py course_id input.json
#
# it outputs a file that could be put into a Canvas page with index related data for a Canvas course
#
# Examples:
# ./create_page_from_json.py -s 17234 keywords_and_phrases_testdik1552.json
#
# to split on stop words rather than use the NLTK
# ./create_page_from_json.py -s 17234 keywords_and_phrases_testdik1552.json
#
# To minimize the output by eliimnating some pages (i.e., URLs):
# ./create_page_from_json.py -m 17234 keywords_and_phrases_testdik1552.json
#
# <NAME>: <NAME>.
#
# 2020.03.31
#
# 2020.08.01 extended to include the front page as a place that index items can point to
#
import csv, requests, time
from pprint import pprint
import optparse
import sys
import os
import pathlib # to get each of the files
import json
from lxml import html
# Use Python Pandas to create XLSX files
import pandas as pd
# to handle regular expressions
import re
import nltk
language_info={
"en": {'en': '<span lang="en_us">English</span>', 'sv': '<span lang="sv_se">engelska</span>'},
"en_us": {'en': '<span lang="en_us">English</span>', 'sv': '<span lang="sv_se">engelska</span>'},
"de_de": {'en': '<span lang="en_us">German</span>', 'sv': '<span lang="sv_se">tyska</span>'},
"no_nb": {'en': '<span lang="en_us">Norwegian</span>', 'sv': '<span lang="sv_se">norska</span>'},
"sv": {'en': '<span lang="en_us">Swedish</span>', 'sv': '<span lang="sv_se">svenska</span>'},
"sv_se": {'en': '<span lang="en_us">Swedish</span>', 'sv': '<span lang="sv_se">svenska</span>'},
"fr_fr": {'en': '<span lang="en_us">French</span>', 'sv': '<span lang="sv_se">franska</span>'},
}
StopWords=[
u'a',
u'à',
u'able',
u'about',
u'above',
u'additional',
u'additionally',
u'after',
u'against',
u'all',
u'allows',
u'along',
u'almost',
u'already',
u'also',
u'also:',
u'although',
u'an',
u'and',
u'another',
u'any',
u'anyone',
u'are',
u'as',
u'at',
u'average',
u'be',
u'been',
u'because',
u'before',
u'being',
u'below',
u'between',
u'both',
u'but',
u'by',
u'can',
u'could',
u'course',
u'currently',
u'decrease',
u'decreasing',
u'did',
u'do',
u'doing',
u'does',
u'done',
u'down',
u'due',
u'during',
u'each',
u'early',
u'earlier',
u'easy',
u'e.g',
u'eigth',
u'either',
u'else',
u'end',
u'especially',
u'etc',
u'even',
u'every',
u'far',
u'few',
u'five',
u'first',
u'follow',
u'following',
u'for',
u'formerly',
u'four',
u'from',
u'further',
u'general',
u'generally',
u'get',
u'going',
u'good',
u'had',
u'has',
u'have',
u'having',
u'he',
u'hence',
u'her',
u'here',
u'hers',
u'herself',
u'high',
u'higher',
u'him',
u'himself',
u'his',
u'how',
u'however',
u'i',
u'i.e',
u'if',
u'in',
u'include',
u'includes',
u'including',
u'increase',
u'increasing',
u'into',
u'is',
u'it',
u"it's",
u'its',
u'itself',
u'just',
u'know',
u'known',
u'knows',
u'last',
u'later',
u'large',
u'least',
u'like',
u'long',
u'longer',
u'low',
u'made',
u'many',
u'make',
u'makes',
u'me',
u'might',
u'much',
u'more',
u'most',
u'must',
u'my',
u'myself',
u'near',
u'need',
u'needs',
u'needed',
u'next',
u'new',
u'no',
u'nor',
u'not',
u'now',
u'of',
u'off',
u'often',
u'on',
u'once',
u'one',
u'only',
u'or',
u'other',
u'others',
u'otherwise',
u'our',
u'ours',
u'ourselves',
u'out',
u'over',
u'own',
u'pass',
u'per',
u'pg',
u'pp',
u'provides',
u'rather',
u'require',
u's',
u'same',
u'see',
u'several',
u'she',
u'should',
u'simply',
u'since',
u'six',
u'small',
u'so',
u'some',
u'such',
u'take',
u'takes',
u'th',
u'than',
u'that',
u'the',
u'then',
u'their',
u'theirs',
u'them',
u'themselves',
u'then',
u'there',
u'therefore',
u'these',
u'three',
u'they',
u'this',
u'those',
u'through',
u'thus',
u'time',
u'to',
u'too',
u'try',
u'two',
u'under',
u'unit',
u'until',
u'up',
u'used',
u'verison',
u'very',
u'vs',
u'want',
u'was',
u'we',
u'were',
u'what',
u'when',
u'where',
u'which',
u'while',
u'who',
u'whom',
u'why',
u'wide',
u'will',
u'with',
u'within',
u'would',
u'you',
u'your',
u'yourself',
u'yourselves'
]
punctuation_list=[
u'.', # add some punctuation to this list
u',',
u';',
u'?',
u'!',
u'\t',
u'\n',
u'⇒',
u'…',
u'(',
u')',
]
def get_text_for_tag(document, tag, dir):
tag_xpath='.//'+tag
text_dir=tag+'_text'
tmp_path=document.xpath(tag_xpath)
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
dir[text_dir]=tmp
def remove_tag(document, tag):
tag_xpath='//'+tag
for bad in document.xpath(tag_xpath):
bad.getparent().remove(bad)
def split_into_sentences(txt):
regexPattern = '|'.join(map(re.escape, punctuation_list))
#print(regexPattern)
return re.split(regexPattern, txt)
#return re.split('[.,;?!()]\s', txt)
def split_into_words(txt):
#return re.findall(r"[\w']+", txt)
#return re.findall(r"[\w']+|[.,!?;]", txt)
#return re.findall(r"[a-zA-Z0-9_:åäö']+|[.,!?;]", txt)
return re.findall(r"[a-zA-Z0-9_:/]+|[.!?;]", txt)
def split_on_stop_words(s1):
global Verbose_Flag
global Stop_flag
output_list=list()
working_list=list()
lower_case_next_word=True
if Stop_flag:
#lwords=split_into_words(s1)
#words=[w[0] for w in lwords]
words=split_into_words(s1)
else:
#words=nltk.word_tokenize(s1)
# The method below does fine grain tokenization that the method above
lwords=[nltk.word_tokenize(t) for t in nltk.sent_tokenize(s1)]
words=[w[0] for w in lwords]
for w in words:
if (w not in StopWords) and (w not in punctuation_list):
working_list.append(w)
else:
output_list.append(working_list)
working_list=list()
# handle remainder - if necessary
if len(working_list) > 0:
output_list.append(working_list)
# remove empty list from the list
output_list = [x for x in output_list if x != []]
return output_list
def combine_sublists_into_strings(l1):
new_list=list()
for l in l1:
working_string=""
for w in l:
working_string=working_string+' '+w
new_list.append(working_string.strip())
return new_list
def process_page(page, remove):
global Verbose_Flag
global null
d=dict()
# handle the case of an empty document
if not page or len(page) == 0:
return d
# remove material after <hr>
if remove:
page=page[page.rfind("<hr>")+1:]
document = html.document_fromstring(page)
# raw_text = document.text_content()
# remove those parts that are not going to be index or otherwise processed
#
# exclude regions inside <code> ... </code>
for bad in document.xpath("//code"):
bad.getparent().remove(bad)
# remove <iframe> .. </iframe>
for bad in document.xpath("//iframe"):
bad.getparent().remove(bad)
# process the different elements
#
# get the alt text for each image - as this should describe the image
tmp_path=document.xpath('.//img')
if tmp_path:
tmp=[item.get('alt') for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['img_alt_text']=tmp
# now that we have the alt strings, remove <img> .. </img>
for bad in document.xpath("//img"):
bad.getparent().remove(bad)
# get figcapations
tmp_path=document.xpath('.//figcaption')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['figcaption_text']=tmp
# tmp_path=document.xpath('.//pre')
# if tmp_path:
# tmp=[item.text for item in tmp_path]
# tmp[:] = [item for item in tmp if item != None and item != "\n"]
# if tmp:
# d['pre_text']=tmp
get_text_for_tag(document, 'pre', d)
print("d is {}".format(d))
# # after getting the <pre>...</pre> text - remove it so that it is not further processed
# for bad in document.xpath("//pre"):
# bad.getparent().remove(bad)
remove_tag(document, 'pre')
# get the headings at levels 1..4
tmp_path=document.xpath('.//h1')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['h1_text']=tmp
tmp_path=document.xpath('.//h2')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['h2_text']=tmp
tmp_path=document.xpath('.//h3')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['h3_text']=tmp
tmp_path=document.xpath('.//h4')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['h4_text']=tmp
# get list items - note that we ignore ul and ol
tmp_path=document.xpath('.//li')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['list_item_text']=tmp
# get table cells and headings - note that a empty cell will return a value of null
# note that we ignore tr, thead, tbody, and table - as we are only interested in the contents of the table or its caption
tmp_path=document.xpath('.//caption')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['caption_text']=tmp
tmp_path=document.xpath('.//td')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['table_cell_text']=tmp
tmp_path=document.xpath('.//th')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['table_heading_text']=tmp
# get paragraphs
tmp_path=document.xpath('.//p')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['paragraph_text']=tmp
tmp_path=document.xpath('.//blockquote')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['blockquote_text']=tmp
tmp_path=document.xpath('.//q')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['q_text']=tmp
tmp_path=document.xpath('.//span')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['span_text']=tmp
#get the different types of emphasized text strong, bold, em, underlined, italics
tmp_path=document.xpath('.//strong')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['strong_text']=tmp
tmp_path=document.xpath('.//b')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['b_text']=tmp
tmp_path=document.xpath('.//em')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['em_text']=tmp
tmp_path=document.xpath('.//u')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['u_text']=tmp
tmp_path=document.xpath('.//i')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['i_text']=tmp
# get superscripts and subscripts
tmp_path=document.xpath('.//sup')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['sup_text']=tmp
tmp_path=document.xpath('.//sub')
if tmp_path:
tmp=[item.text for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['sub_text']=tmp
# for anchors - if there is a title remember it
tmp_path=document.xpath('.//a[@title]')
if tmp_path:
tmp=[item.get('title') for item in tmp_path]
tmp[:] = [item for item in tmp if item != None and item != "\n"]
if tmp:
d['anchor_title_text']=tmp
# collect text of specially tagged elements with the lang attribute
tmp_path=document.xpath('//*[@lang]')
if tmp_path:
language_specific_tagged_material=list()
for item in tmp_path:
entry=dict()
entry['tag']=item.tag
entry['lang']=item.get('lang')
entry['text']=item.text
language_specific_tagged_material.append(entry)
# add collected material
d['lang_specific']=language_specific_tagged_material
if Verbose_Flag:
print("page is now {}".format(html.tostring(document)))
return d
def html_url_from_page_url(course_info, page_url):
if page_url.endswith('.html'):
page_url=page_url[:-5]
# check for front page
#"front_page": {"title": "II2202 Home page", "created_at": "2020-07-15T11:31:36Z", "url": "ii2202-home-page", "editing_roles": "teachers", "page_id": 96714, "published": true, "hide_from_students": false, "front_page": true, "html_url": "https://canvas.kth.se/courses/20979/pages/ii2202-home-page"
front_page=course_info.get('front_page', None)
if front_page and front_page['url'] == page_url:
print("found front_page html_url_from_page_url:: {0}".format(page_url))
return [front_page['html_url'], front_page['title']]
for m in course_info['modules']:
if m == 'front_page': # skip the front page as we processed it above
continue
#print("course_info['modules'][m] m={}".format(m))
for mi in course_info['modules'][m]['module_items']:
#print("course_info['modules'][m]['module_items'][mi]={}".format(course_info[m]['module_items'][mi]))
url=course_info['modules'][m]['module_items'][mi].get('page_url', [])
if url == page_url:
html_url=course_info['modules'][m]['module_items'][mi]['html_url']
ofset_to_modules=html_url.find('/modules/')
if ofset_to_modules > 1:
trimmed_html_url='..'+html_url[ofset_to_modules:]
return [trimmed_html_url, course_info['modules'][m]['module_items'][mi]['title']]
else:
return [html_url, course_info['modules'][m]['module_items'][mi]['title']]
else:
continue
# else
return None
def add_words_to_dict(lang, words, url):
global Verbose_Flag
global page_entries
if Verbose_Flag:
print("(lang={0}, words={1}, url={2})".format(lang, words, url))
# get or make the dict for the target language
dict_for_target_lang=page_entries.get(lang, False)
if not dict_for_target_lang:
page_entries[lang]=dict()
if Verbose_Flag:
print("dict_for_target_lang={}".format(dict_for_target_lang))
# look up urls for given words in the dict or start an empty list
url_list_for_words=page_entries[lang].get(words, set())
url_list_for_words.add(url)
if Verbose_Flag:
print("url_list_for_words={}".format(url_list_for_words))
page_entries[lang][words]=url_list_for_words
def add_words_to_default_dict(words, url):
global Verbose_Flag
global page_entries_in_language_of_course
#
# look up URLs for given words in the dict or start an empty set
# sets are used so that the only unique URLs are added
# (i.e., if a word is used multiple times on the page, it will only have one URL)
urls_for_words=page_entries_in_language_of_course.get(words, set())
urls_for_words.add(url)
#
# if numbers separated by commas or spaces, do not index
#s1=wprds.split(' ')
words=words.strip()
if len(words) == 0:
return None
# do not index stop words, starting characters to be removed, or numbers
if (words in StopWords) or (words in starting_characters_to_remove) or is_number(words):
return None
else:
if words.find('yourself') >= 0:
print("found yourself in {0} of length={1}".format(words, len(words)))
page_entries_in_language_of_course[words]=urls_for_words
return words
def compute_page_for_tag(tag, heading, json_data, course_info):
global Verbose_Flag
global page_entries_in_language_of_course
page_entries_in_language_of_course=dict()
for p in json_data:
data=json_data[p].get(tag, [])
if data and len(data) > 0:
for i in data:
add_words_to_default_dict(i, p)
if Verbose_Flag:
print("tag={0}, page_entries_in_language_of_course is {1}".format(tag, page_entries_in_language_of_course))
# create page for entries in the default lamguage of the course
page=""
page=page+'<h3><a id="'+heading+'">'+heading+'</h3><ul>'
for words in sorted(page_entries_in_language_of_course.keys()):
page=page+'<li>'+words+'<ul>'
for p in page_entries_in_language_of_course[words]:
url=html_url_from_page_url(course_info, p)
if not url:
print("could not find URL and title for {}".format(p))
else:
page=page+'<li><a href="'+url[0]+'">'+url[1]+'</a></li>'
page=page+'</ul></li>'
page=page+'</ul>'
return page
def cleanup_list(l1):
global Verbose_Flag
new_list=list()
for e in l1:
if Verbose_Flag:
print("e: {}".format(e))
cs=cleanup_string(e)
if Verbose_Flag:
print("cs is {}".format(cs))
if cs:
new_list.append(cs)
if Verbose_Flag:
print("new_list is {}".format(new_list))
return new_list
#[['Internet', 'Corporation'], ['Assigned', 'Names'], ['Numbers']]
# becomes: ['Internet Corporation'], ['Assigned Names'], ['Numbers']]
def cleanup_two_layer_list(l1):
global Verbose_Flag
new_list=list()
for l2 in l1:
new_string=''
for e in l2:
if Verbose_Flag:
print("e: {}".format(e))
cs=cleanup_string(e)
if Verbose_Flag:
print("cs is {}".format(cs))
if cs:
new_string=new_string+' '+cs
new_list.append(new_string.strip())
if Verbose_Flag:
print("new_list is {}".format(new_list))
return new_list
def is_number(n):
try:
float(n) # Type-casting the string to `float`.
# If string is not a valid `float`,
# it'll raise `ValueError` exception
except ValueError:
return False
return True
starting_characters_to_remove =[
u' ',
u',',
u':',
u';',
u'&',
u'"',
u'(',
u')',
u'[',
u']',
u'{',
u'}',
u'+',
u'-', # 0x2d
u'‒', # 0x2012
u'–', # 0x2013
u'―', # 0x2015
u'--',
# u'.', # note that we cannot remove a leading period as this might be an example of a domain name
u'..',
u'...',
u'...',
u'…',
u'*',
u'< < <',
u'†',
u'‡',
u'``',
u"`",
u"’",
u'“',
u"=",
u'<',
u'<',
u'≤',
u'>',
u'…',
u'¨',
u'®',
u'→',
u'⇒',
u'⇨',
u'⇨ '
u'∴',
u'≡',
u'≤',
u'✔️',
u'✔',
u'✝',
u'❌ ',
u'❌',
u'#',
]
ending_characters_to_remove =[
u',',
u'.',
u'!',
u'?',
u':',
u';',
u'&',
u'%',
u"''",
u'"', # a double quote mark
u"‘",
u'(',
u')',
u'[',
u']',
u'{',
u'}',
u'-',
u'[ online',
u'*',
u'†',
u'†',
u'✝',
u'‡',
u" ’",
#u' (see',
u' e.g',
u"`",
u"=",
u'<',
u'≤',
u'>',
u'…',
u'®',
u'→',
u'⇒',
u'⇒',
u'⇨',
u'∴',
u'≡',
u'≤',
u'✔️',
u'✔',
u'❌',
]
def cleanup_string(s):
s=s.strip() # first remove any trailing or leading white space
if s.endswith(')') and s.endswith('('): # remove initial and trailing parentheses
s=cleanup_string(s[1:-1])
if s.endswith('[') and s.endswith(']'): # remove initial and trailing brackets
s=cleanup_string(s[1:-1])
if s.endswith('{') and s.endswith('}'): # remove initial and trailing brackets
s=cleanup_string(s[1:-1])
for c in ending_characters_to_remove:
if s.endswith(c):
s=cleanup_string(s[:-(len(c))])
for c in starting_characters_to_remove:
if s.startswith(c):
s=cleanup_string(s[len(c):])
if is_number(s):
return ""
#
return s.strip()
Letter_in_Index=[
u'A',
u'B',
u'C',
u'D',
u'E',
u'F',
u'G',
u'H',
u'I',
u'J',
u'K',
u'L',
u'M',
u'N',
u'O',
u'P',
u'Q',
u'R',
u'S',
u'T',
u'U',
u'V',
u'W',
u'X',
u'Y',
u'Z',
u'Å',
u'Ä',
u'Ö'
]
def id_in_Index(s):
return '*'+s+'*'
def label_in_Index(s):
return '- '+s+' -'
def main():
global Verbose_Flag
global Stop_flag
global page_entries
global page_entries_in_language_of_course
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option('-s', '--stop',
dest="stop",
default=False,
action="store_true",
help="split on stopwords with regular expression and not NLTK tokenizer"
)
parser.add_option('-m', '--minimize',
dest="minimize",
default=False,
action="store_true",
help="minimize by leaving out some pages"
)
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
Stop_flag=options.stop
if Verbose_Flag:
print("ARGV : {}".format(sys.argv[1:]))
print("VERBOSE : {}".format(options.verbose))
print("REMAINING : {}".format(remainder))
if (len(remainder) < 2):
print("Inusffient arguments\n must provide the course number and name of file to process\n")
sys.exit()
course_id=remainder[0]
file_name=remainder[1]
course_info_file="modules-in-course-{}.json".format(course_id)
if Verbose_Flag:
print("processing course_info JSON from {}".format(course_info_file))
try:
with open(course_info_file) as json_file:
course_info=json.load(json_file)
except:
print("Unable to open file named {}".format(course_info_file))
sys.exit()
if Verbose_Flag:
print("processing JSON from {}".format(file_name))
try:
with open(file_name) as json_file:
json_data=json.load(json_file)
except:
print("Unable to open file named {}".format(file_name))
sys.exit()
# load words for the course, if the file exists
course_words_file="words-for-course-{}.json".format(course_id)
if Verbose_Flag:
print("loading course words from {}".format(course_words_file))
try:
with open(course_words_file) as json_file:
course_words=json.load(json_file)
except:
#print("Unable to open file named {}".format(course_words_file))
print("No file {} - so no course words to specially process".format(course_words_file))
course_words=dict()
course_words['words_to_ignore']=[] # empty list
course_words['words_to_merge']=[]
if Verbose_Flag:
print("course_words is {}".format(course_words))
# for each of the stop words add a version with an initial capital letter - so that these can also be removed
oldStopWords=StopWords.copy()
for w in oldStopWords:
if len(w) > 0:
capitalized_word=w[0].upper()+w[1:]
StopWords.append(capitalized_word)
if Verbose_Flag:
print("Extended StopWords are {}".format(StopWords))
print("Processing language specific elements")
# page_entries will have the structure
# {"sv_se": {
# "words1": (url1, url2, ...),
# "words2": (url1, url2, ...),
# },
# "no_nb": {
# "words3": (url1, url2, ...),
# "words4": (url1, url2, ...),
# },
# ...
# }
page_entries=dict()
for p in json_data:
# [{"tag": "span", "lang": "sv_se", "text": "
lang_specific_data=json_data[p].get("lang_specific", [])
if lang_specific_data and len(lang_specific_data) > 0:
if Verbose_Flag:
print("lang_specific_data is {0}, p={1}".format(lang_specific_data, p))
for i in lang_specific_data:
add_words_to_dict(i['lang'], i['text'], p)
if Verbose_Flag:
print("page_entries is {}".format(page_entries))
# create page
page='<h3><a id="Foreign_words_and_phrases">Foreign words and phrases</h3>'
for lang in sorted(page_entries.keys(), key=lambda v: (v.casefold(), v)):
print("lang={}".format(lang))
page=page+'<h3>'+lang+': '+language_info[lang]['en']+': '+language_info[lang]['sv']+'</h3><ul>'
for words in sorted(page_entries[lang].keys(), key=lambda v: (v.casefold(), v)):
page=page+'<li>'+words+'<ul>'
for p in page_entries[lang][words]:
url=html_url_from_page_url(course_info, p)
if not url:
print("could not find URL and title for {}".format(p))
else:
page=page+'<li><a href="'+url[0]+'"><span lang="'+lang+'">'+url[1]+'</span></a></li>'
page=page+'</ul></li>'
page=page+'</ul>'
if Verbose_Flag:
print("page is {}".format(page))
print("Processing figcaption text")
page_figcaption=compute_page_for_tag('figcaption_text', "Figure captions", json_data, course_info)
if Verbose_Flag:
print("page_figcaption is {}".format(page_figcaption))
page=page+page_figcaption
print("Processing caption text")
page_caption=compute_page_for_tag('caption_text', "Table captions", json_data, course_info)
if Verbose_Flag:
print("page_caption is {}".format(page_caption))
page=page+page_caption
if Verbose_Flag:
print("page is {}".format(page))
save_page=page # save current page contents
print("Processing all of the word groups")
# process all of the things that were extracted and index them
page_entries=dict()
for p in json_data:
format("p={}".format(p))
d1=json_data[p]
list_of_strings=list()
for de in d1:
if de == 'pre_text': # do not index <pre> tagged content
continue
l=json_data[p][de]
if Verbose_Flag:
print("de is {0}, l is {1}".format(de, l))
if de == 'span_text':
if Verbose_Flag:
print("special case of span l={}".format(l))
if len(l) == 0:
continue
elif len(l) == 1:
# check for single characters to skip
if l[0].strip() in starting_characters_to_remove:
if Verbose_Flag:
print("special case of span with single element in l[0]={0}, len={1}".format(l[0], len(l[0])))
continue
else:
add_words_to_default_dict(l[0], p)
else:
for s in l:
add_words_to_default_dict(s, p)
continue
# do not index superscripts or subscripts
if de == 'sup_text' or de == 'sub_text':
continue
# other cases
if Verbose_Flag:
print("de is {0}, l is {1}".format(de, l))
for s in l:
if Verbose_Flag:
print("s={}".format(s))
if isinstance(s, dict):
s_text=s.get('text', '')
s_lang=s.get('lang', [])
if s_text:
if s_lang:
add_words_to_dict(s_lang, s_text, p)
else:
add_words_to_default_dict(s_text, p)
continue
if isinstance(s, str):
l1=split_into_sentences(s)
if Verbose_Flag:
print("l1 is {}".format(l1))
if len(l1) >= 1:
if Verbose_Flag:
print("l1 is longer than 1")
for s1 in l1:
if Verbose_Flag:
print("s1 is {}".format(s1))
l2=split_on_stop_words(s1)
if Verbose_Flag:
print("l2 is {}".format(l2))
l3=cleanup_two_layer_list(l2)
for s3 in l3:
if Verbose_Flag:
print("s3 is {}".format(s3))
add_words_to_default_dict(s3, p)
else:
l2=split_on_stop_words(s)
l3=cleanup_two_layer_list(l2)
for words in l3:
w2=cleanup_string(words)
if Verbose_Flag:
print("w2 is {}".format(w2))
add_words_to_default_dict(w2, p)
continue
else:
print("not a dict or str - s is {}".format(s))
else:
if Verbose_Flag:
print("There is no content to index on page: {}".format(p))
continue
if Verbose_Flag:
print("page_entries is {}".format(page_entries))
# create index page
index_page=""
# index_page_offset will contain information about the offset to each new letter in the index
index_page_offset=dict()
# added quick references to the different parts of the index
#<ul>
# <li><a href="#*A*"><strong>*A*</strong></a></li>
# <li><a href="#*B*"><strong>*B*</strong></a></li>
# </ul>
#
# Use id_in_Index(s) to name the IDs
# At each letter one needs to add:
# </ul><a id="*A*" name="*A*"></a><h3>- A -</h3><ul>
#
# Use label_in_Index(s) to name the visible heading
index_page_heading='<h3><a id="Quick_Index">Quick Index</h3><ul>'
for l in Letter_in_Index:
index_page_heading=index_page_heading+'<li><a href="#'+id_in_Index(l)+'"><strong>'+label_in_Index(l)+'</strong></a></li>'
index_page_heading=index_page_heading+'</ul>'
index_page=index_page+'<h3>groups of words</h3><ul>'
current_index_letter=""
previous_word=""
url_entry=""
url_dict=dict()
global page_entries_in_language_of_course
#merge entries
sorted_page_entries=sorted(page_entries_in_language_of_course.keys(), key=lambda v: (v.casefold(), v))
for words in sorted_page_entries:
#merge entries
for w in course_words['words_to_merge']:
if words in course_words['words_to_merge'][w]:
if Verbose_Flag:
print("words is {0} and w is {1}".format(words, w))
print("merging for {}".format(words))
urls_for_words=page_entries_in_language_of_course.get(words, set())
if Verbose_Flag:
print("page_entries_in_language_of_course[words] is {}".format(urls_for_words))
if len(urls_for_words) > 0:
if Verbose_Flag:
print("clearing page entry for {}".format(words))
page_entries_in_language_of_course[words]=set()
unified_url_entries=page_entries_in_language_of_course.get(w, set())
page_entries_in_language_of_course[w]=unified_url_entries.union(urls_for_words)
if Verbose_Flag:
print("unified_url_entries is {}".format(unified_url_entries))
# the casefold sorts upper and lower case together, but gives a stable result
# see <NAME>, Sep 13 '19 at 12:15, https://stackoverflow.com/questions/13954841/sort-list-of-strings-ignoring-upper-lower-case
for words in sorted(page_entries_in_language_of_course.keys(), key=lambda v: (v.casefold(), v)):
# ignore words in the course's 'words_to_ignore' list
if words in course_words['words_to_ignore']:
print("ignoring {}".format(words))
continue
# if the previous word was an acronym or the new word is different, output the record
if previous_word.isupper() or words.casefold() != previous_word:
previous_word=words.casefold()
#if len(url_entry) > 0: # only add an entry for this word if there is atleast one URL
if len(url_dict)> 0:
for d in sorted(url_dict, key=url_dict.get, reverse=False):
if options.minimize:
if d == 'Learning outcomes' or d == 'Learning Outcomes' or d == 'Acronyms and Abbreviations':
continue
url_entry=url_entry+'<li><a href="'+url_dict[d]+'">'+d+'</a></li>'
index_page=index_page+word_entry+url_entry+'</ul></li>'
url_entry=""
url_dict=dict()
print("new words={}".format(words))
if len(words) == 0:
print("words={0} and len(words)={1}".format(words, len(words)))
first_letter=words[0].upper()
if (first_letter in Letter_in_Index) and (first_letter != current_index_letter):
if Verbose_Flag:
print("first_letter={0} current_index_letter={1}".format(first_letter,current_index_letter))
current_index_letter=first_letter
# store the current offset to the start of this letter in the index_page
index_page_offset[current_index_letter]=len(index_page)+5
index_page=index_page+'</ul><a id="'+id_in_Index(current_index_letter)+'" name="'+id_in_Index(current_index_letter)+'"></a><h3>'+label_in_Index(current_index_letter)+'</h3><ul>'
word_entry='<li>'+words+'<ul>'
for p in page_entries_in_language_of_course[words]:
url=html_url_from_page_url(course_info, p)
if not url:
print("for words '{0}' could not find URL and title for page {1}".format(words, p))
else:
url_dict[url[1]]=url[0]
index_page=index_page+'</ul>'
if Verbose_Flag:
print("index_page is {}".format(index_page))
page_heading='<h3>Automatically extracted index information</h3><ul>'
page_heading=page_heading+'<li><a href="#Foreign_words_and_phrases">Foreign_words_and_phrases</li>'
page_heading=page_heading+'<li><a href="#Figure captions">Figure captions</li>'
page_heading=page_heading+'<li><a href="#Table captions">Table captions</li>'
page_heading=page_heading+'<li><a href="#Quick_Index">Quick Index</li></ul>'
page=page_heading+save_page+index_page_heading+index_page
print("sizes index_page={0} ({3} MB), page_caption={1}, save_page={2}".format(len(index_page),
len(page_caption),
len(save_page),
(len(index_page)/(1024*1024))))
# write out body of response as a .html page
new_file_name="stats_for_course-{}.html".format(course_id)
with open(new_file_name, 'wb') as f:
encoded_output = bytes(page, 'UTF-8')
f.write(encoded_output)
#page=index_page_heading+index_page
letters_on_index_page=dict()
number_of_index_pages=0
maximum_page_size=(400*1000)-1
#
base_offset_of_page=0
#
for index, letter in enumerate(Letter_in_Index):
if Verbose_Flag:
print("index={0}, letter is {1}".format(index, letter))
offset_for_letter=index_page_offset.get(letter, False)
letters_on_index_page[number_of_index_pages]=letters_on_index_page.get(number_of_index_pages, [])
if Verbose_Flag:
print("offset_for_letter={0}, letters_on_index_page={1}".format(offset_for_letter, letters_on_index_page))
if offset_for_letter:
# check if the current letter's contents will fit, i.e., if next offset is too large
next_index=index+1
if next_index < len(Letter_in_Index):
offset_for_next_letter=index_page_offset.get(Letter_in_Index[next_index], False)
if offset_for_next_letter:
delta=(offset_for_next_letter - base_offset_of_page)
else:
delta=0
if Verbose_Flag:
print("delta={}".format(delta))
if delta > maximum_page_size:
base_offset_of_page=index_page_offset[Letter_in_Index[index]]
number_of_index_pages=number_of_index_pages+1
letters_on_index_page[number_of_index_pages]=letters_on_index_page.get(number_of_index_pages, [])
letters_on_index_page[number_of_index_pages].append(letter)
if Verbose_Flag:
print("base_offset_of_page={0}, number_of_index_pages={1}".format(base_offset_of_page, number_of_index_pages))
else:
letters_on_index_page[number_of_index_pages].append(letter)
else:
letters_on_index_page[number_of_index_pages].append(letter)
else:
letters_on_index_page[number_of_index_pages].append(letter)
print("letters_on_index_page={}".format(letters_on_index_page))
for i in range(number_of_index_pages+1):
print("i is {}".format(i))
if i == 0:
start_offset=0
else:
start_offset=index_page_offset[letters_on_index_page[i][0]]
if i == number_of_index_pages:
end_offset=len(index_page)
else:
end_offset=(index_page_offset[letters_on_index_page[i+1][0]])-1
print("start_offset={0}, end_offset={1}".format(start_offset, end_offset))
index_page_heading='<h3><a id="Quick_Index">Quick Index</h3><ul>'
for j in range(0,len(letters_on_index_page[i])):
l=letters_on_index_page[i][j]
print("i={0}, j={1}, l={2}".format(i, j, l))
index_page_heading=index_page_heading+'<li><a href="#'+id_in_Index(l)+'"><strong>'+label_in_Index(l)+'</strong></a></li>'
index_page_heading=index_page_heading+'</ul>'
if i == 0:
index_page_header=index_page_heading
else:
index_page_header=index_page_heading+'<h3>groups of words</h3>'
page=index_page_header+index_page[start_offset:end_offset]
# write out body of response as a .html page
new_file_name="stats_for_course-{0}-index-{1}.html".format(course_id, i)
with open(new_file_name, 'wb') as f:
encoded_output = bytes(page, 'UTF-8')
f.write(encoded_output)
print("index_page_offset={}".format(index_page_offset))
if __name__ == "__main__": main()
| 3.046875 | 3 |
tests/simulate/test_half_filling_2.py | PROMNY/pymc_pp | 2 | 12758622 | <reponame>PROMNY/pymc_pp
import pymc
import numpy as np
import pytest
class TestHalfFilling2():
@pytest.fixture(scope='class')
def get_results(self):
lattice = pymc.GrapheneLattice(6)
FK = pymc.Hamiltonian(lattice, t=-1, U=2, cp=-1, T=0.2)
FK.put_adatoms(18, "random")
obs = pymc.ObsList([pymc.DeltaObs(FK),
pymc.EnergyObs(FK), pymc.CVObs(FK),
pymc.CorrelationObs(FK), pymc.NeObs(FK)])
series = pymc.ObsSeries(obs, ["T"])
sym = pymc.Simulator(FK, pymc.metropolis_numpy, obs)
T_range = [0.2, 0.18, 0.16, 0.14, 0.12,
0.10, 0.08, 0.06, 0.04, 0.03, 0.02, 0.01]
for T in T_range:
FK.T = T
sym.run_termalization(10**2)
res = sym.run_measurements(10**2)
series.add(res, [T])
expected = np.loadtxt(
"tests/simulate/half_filling_2.csv", delimiter=",")
res = series.get_df().values
return (expected, res)
@pytest.mark.long
def test_half_filling_1(self, get_results):
# tempertature
expected, res = get_results
np.testing.assert_array_equal(res[:, 0], expected[:, 0])
@pytest.mark.long
def test_half_filling_2(self, get_results):
# delta
expected, res = get_results
np.testing.assert_array_almost_equal(
res[:, 1], expected[:, 1], verbose=True, decimal=1)
@pytest.mark.long
def test_half_filling_3(self, get_results):
# free energy
expected, res = get_results
np.testing.assert_array_almost_equal(
res[:, 2], expected[:, 2], verbose=True, decimal=0)
@pytest.mark.long
def test_half_filling_5(self, get_results):
# Ne
expected, res = get_results
np.testing.assert_array_almost_equal(
res[:, 5], expected[:, 5], verbose=True, decimal=0)
| 2.078125 | 2 |
cli/psym/graphql/enum/cellular_network_type.py | danielrh135568/symphony-1 | 0 | 12758623 | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from enum import Enum
class CellularNetworkType(Enum):
CDMA = "CDMA"
GSM = "GSM"
LTE = "LTE"
WCDMA = "WCDMA"
MISSING_ENUM = ""
@classmethod
def _missing_(cls, value: object) -> "CellularNetworkType":
return cls.MISSING_ENUM
| 2.890625 | 3 |
setup.py | almirjgomes/DE_LibUtil | 0 | 12758624 | from setuptools import setup
setup(
name='DE_LibUtil',
version='0.0.19',
packages=[''],
url='https://github.com/almirjgomes/DE_LibUtil.git',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='LibUtil - Biblioteca de Utilidades'
)
| 1.007813 | 1 |
models/relation_classifier/train.py | MarcoFavorito/knowledge-based-telegram-bot | 0 | 12758625 | from models.ModelManager import ModelManager
from models.relation_classifier import DATA_DIR, split_line, MODEL_PATH, read_file, DATA_DEV, DATA_TRAIN
from models.relation_classifier.RelationClassifierRNNBased import RelationClassifierRNNBased
def main():
X_train, Y_train = read_file(DATA_TRAIN)
X_dev, Y_dev = read_file(DATA_DEV)
rc = RelationClassifierRNNBased()
rc.make_vocab(X_train)
rc.train(X_train, Y_train, dev=(X_dev, Y_dev))
rc.save(MODEL_PATH)
if __name__ == '__main__':
main() | 2.359375 | 2 |
scribbles/datasets/decorators.py | ltiao/scribbles | 1 | 12758626 | <gh_stars>1-10
import numpy as np
def binarize(positive_label=3, negative_label=5):
"""
MNIST binary classification.
Examples
--------
.. plot::
:context: close-figs
import tensorflow as tf
from scribbles.datasets import binarize
from scribbles.plotting import plot_image_grid
@binarize(positive_label=2, negative_label=7)
def binary_mnist_load_data():
return tf.keras.datasets.mnist.load_data()
(X_train, Y_train), (X_test, Y_test) = binary_mnist_load_data()
num_train, img_rows, img_cols = X_train.shape
num_test, img_rows, img_cols = X_test.shape
fig, (ax1, ax2) = plt.subplots(ncols=2)
plot_image_grid(ax1, X_train[Y_train == 0],
shape=(img_rows, img_cols), nrows=10, cmap="cividis")
plot_image_grid(ax2, X_train[Y_train == 1],
shape=(img_rows, img_cols), nrows=10, cmap="cividis")
plt.show()
"""
# TODO: come up with remote descriptive name
def d(X, y, label, new_label=1):
X_val = X[y == label]
y_val = np.full(len(X_val), new_label)
return X_val, y_val
def binarize_decorator(load_data_fn):
def new_load_data_fn():
(X_train, Y_train), (X_test, Y_test) = load_data_fn()
X_train_pos, Y_train_pos = d(X_train, Y_train,
label=positive_label, new_label=1)
X_train_neg, Y_train_neg = d(X_train, Y_train,
label=negative_label, new_label=0)
X_train_new = np.vstack([X_train_pos, X_train_neg])
Y_train_new = np.hstack([Y_train_pos, Y_train_neg])
X_test_pos, Y_test_pos = d(X_test, Y_test,
label=positive_label, new_label=1)
X_test_neg, Y_test_neg = d(X_test, Y_test,
label=negative_label, new_label=0)
X_test_new = np.vstack([X_test_pos, X_test_neg])
Y_test_new = np.hstack([Y_test_pos, Y_test_neg])
return (X_train_new, Y_train_new), (X_test_new, Y_test_new)
return new_load_data_fn
return binarize_decorator
| 3.078125 | 3 |
utils/bot.py | drx/archfinch | 1 | 12758627 | <reponame>drx/archfinch<filename>utils/bot.py<gh_stars>1-10
import socket
class Bot():
def __init__(self):
self.host = 'localhost'
self.port = 40037
def send_message(self, channel, message):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.send(channel + ' ' + message)
s.close()
except Exception:
pass
bot = Bot()
| 2.390625 | 2 |
krispy/nustardo.py | KriSun95/kripsy | 2 | 12758628 | '''
Functions to go in here (I think!?):
KC: 01/12/2018, ideas-
KC: 19/12/2018, added-
~NuSTAR class
'''
from . import data_handling
import sys
#from os.path import *
import os
from os.path import isfile
import astropy
from astropy.io import fits
import astropy.units as u
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
from pylab import figure, cm
from astropy.coordinates import SkyCoord
import numpy as np
import nustar_pysolar as nustar
from . import filter_with_tmrng ######Kris
from . import custom_map ######Kris
import sunpy.map
from scipy import ndimage
from scipy.optimize import curve_fit
from scipy.ndimage import rotate
import re #for regular expressions
import warnings #suppress astropy warnings
import datetime
from datetime import timedelta
from astropy.io.fits.verify import VerifyWarning
import matplotlib.dates as mdates
import pickle
import subprocess
import pytz
from skimage import restoration
# from . import interp
from scipy import interpolate
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters() # was told to do this by the machine
'''
Alterations:
KC: 22/01/2019 - .
'''
#NuSTAR class for Python
class NustarDo:
np.seterr(divide='ignore', invalid='ignore') #ignore warnings resulting from missing header info
warnings.simplefilter('ignore', VerifyWarning)
warnings.simplefilter('ignore', RuntimeWarning)
warnings.simplefilter('ignore', UserWarning)
def __init__(self, evt_filename='', energy_range=[2.5,79], time_range = None): #set-up parameters
#if a filename is not given then the static functions can still be used
if evt_filename == '':
return
#directory of the file
directory_regex = re.compile(r'\w+/')
directory = directory_regex.findall(evt_filename)
self.evt_directory = '/'+''.join(directory)
#search of the form of stuff (no slashes included), dot, then more stuff
evt_filename_regex = re.compile(r'\w+\.\w+')
name_of_file = evt_filename_regex.findall(evt_filename)[0]
#for a sunpy map object to be made then the file has to be positioned on the Sun
sunpos_regex = re.compile(r'sunpos')
sunpos = sunpos_regex.findall(name_of_file)
if sunpos == []:
raise ValueError('\nThe file must be a \'sunpos\' file, i.e. the observation is converted to appropriate solar coordinates.')
#search for 2 digits, a non-digit, then 2 digits again
fpm_regex = re.compile(r'\d{2}\D\d{2}')
focal_plane_module = fpm_regex.findall(name_of_file)[0][2]
#search for chu followed by however many consecutive digits
chu_regex = re.compile(r'chu\d+')
chu = chu_regex.findall(name_of_file)
if chu != []:
chu_state = chu[0]
else:
chu_state = 'not_split'
# search for a underscore, a non-digit, and an underscore (for the mode the pipeline was run if a chu file is given)
mode_regex = re.compile(r"_\D_")
mode = mode_regex.findall(name_of_file)
self.pipeline_mode = mode[0] if len(mode)>0 else ""
#search for all seperate sub-strings composed of digits, first one in evt_filename is observation id
obs_id_regex = re.compile(r'\d+')
obs_id = obs_id_regex.findall(name_of_file)[0]
self.obs_id = obs_id
#set attributes of the file and parameters used in other functions on the class
self.evt_filename = name_of_file
self.fpm = focal_plane_module
self.time_range = time_range
self.energy_range = energy_range
self.chu_state = chu_state
self.rectangles = None #set so that you don't have to plot a map to get a light curve
# for plot title
self.e_range_str = str(self.energy_range[0])+'-'+str(self.energy_range[1]) if self.energy_range[1]<79 else ">"+str(self.energy_range[0])
self.rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00") # nustar times are measured in seconds from this date
#extract the data within the provided parameters
hdulist = fits.open(evt_filename) #not self.evt_filename as fits.open needs to know the full path to the file
self.evt_data = hdulist[1].data
self.evt_header = hdulist[1].header
hdulist.close()
############*********** this is a hacky fix but will do for now ***********############
# if Python code is used for the sunpos file creation the re-written header keywords aren't saved properly, so...
if (round(self.evt_header['TCDLT13'], 1)!=2.5) or (round(self.evt_header['TCDLT14'], 1)==2.5):
self.evt_header['TCDLT13'] = 2.45810736 # x
self.evt_header['TCDLT14'] = 2.45810736 # y
#check evt_filename matches evt_header info
assert obs_id == self.evt_header['OBS_ID'], 'Observation ID in the .evt filename does not match ID in the .evt header info. {} =/= {}'.format(obs_id, self.evt_header['OBS_ID'])
assert focal_plane_module == self.evt_header['INSTRUME'][-1], 'Focal Plane Module (FPM) in the .evt filename does not match FPM in the .evt header info. {} =/= {}'.format(focal_plane_module, self.evt_header['INSTRUME'][-1])
if self.time_range == None:
#filter away the non grade zero counts and bad pixels
self.cleanevt = filter_with_tmrng.event_filter(self.evt_data, fpm=focal_plane_module,
energy_low=self.energy_range[0],
energy_high=self.energy_range[1])
#start and end time of the NuSTAR observation as datetime objects
self.time_range = [(self.rel_t+ timedelta(seconds=np.min(self.cleanevt['TIME']))).strftime('%Y/%m/%d, %H:%M:%S'),
(self.rel_t + timedelta(seconds=np.max(self.cleanevt['TIME']))).strftime('%Y/%m/%d, %H:%M:%S')]
elif len(self.time_range) == 2:
try:
self.cleanevt = filter_with_tmrng.event_filter(self.evt_data, fpm=focal_plane_module,
energy_low=self.energy_range[0],
energy_high=self.energy_range[1],
tmrng=self.time_range) ######Kris
except TypeError as error:
raise TypeError('\nTimes need to be a string in the form \'%y/%m/%d, %H:%M:%S\', '
'e.g.\'2018/12/25, 12:30:52\'')
else:
raise TypeError('\nCheck that it is only a start time and end time you are giving.')
#if there are no counts in cleanevt
if len(self.cleanevt) == 0:
raise ValueError('\nThere there are no counts within these paramenters. '
'\nThis may be because no counts were recorded or that the paramenters are outwith the '
'scope of NuSTAR and/or the observation.')
# now for the time tick marks...
clevt_duration = np.max(self.cleanevt['TIME'])-np.min(self.cleanevt['TIME'])
if clevt_duration > 3600*0.5:
self.xlocator = mdates.MinuteLocator(byminute=[0, 10, 20, 30, 40, 50], interval = 1)
elif 600 < clevt_duration <= 3600*0.5:
self.xlocator = mdates.MinuteLocator(byminute=[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55], interval = 1)
elif 240 < clevt_duration <= 600:
self.xlocator = mdates.MinuteLocator(interval = 2)
else:
self.xlocator = mdates.MinuteLocator(interval = 1)
@staticmethod
def shift(evt_data, pix_xshift=None, pix_yshift=None):
if pix_xshift != None:
for X in evt_data:
X['X'] = X['X'] + pix_xshift
if pix_yshift != None:
for Y in evt_data:
Y['Y'] = Y['Y'] + pix_yshift
return evt_data
@staticmethod
def arcsec_to_pixel(*args, **kwargs):
#NuSTAR values: ['crpix1'+0.5,'crpix2','cdelt1']
meta = {'centre_pix_val': [1499.5+0.5, 1500], 'arc_per_pix':[2.45810736], 'length':False}
#change list with kwargs
for key, kwarg in kwargs.items():
meta[key] = kwarg
#convert numbers so that they are easier to work with
indices_for_centre = {'x':meta['centre_pix_val'][0], 'y':meta['centre_pix_val'][1]}
assert 1 <= len(meta['arc_per_pix']) <= 2, '\'arc_per_pix\' needs to have one or two arguments only.'
if len(meta['arc_per_pix']) == 2:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][1]
elif len(meta['arc_per_pix']) == 1:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][0]
# if have an arcsec length and want the length in pixels
pixel_lengths = []
if meta['length'] == True:
for arg in args:
x_length = (arg[0] / delta_x)
y_length = (arg[1] / delta_y)
pixel_lengths.append([int(round(x_length,0)), int(round(y_length,0))])
return pixel_lengths
#input coordinates as [x,y] in arcseconds
pixel_coords = []
for arg in args:
x_index = indices_for_centre['x'] + (arg[0] / delta_x)
y_index = indices_for_centre['y'] + (arg[1] / delta_y)
pixel_coords.append([int(round(x_index,0)), int(round(y_index,0))])
return pixel_coords
@staticmethod
def pixel_to_arcsec(*args, **kwargs):
#NuSTAR values: ['crpix1'+0.5,'crpix2','cdelt1']
meta = {'centre_pix_val': [1499.5+0.5, 1500], 'arc_per_pix':[2.45810736], 'length':False}
#change list with kwargs
for key, kwarg in kwargs.items():
meta[key] = kwarg
#convert numbers so that they are easier to work with
indices_for_centre = {'x':meta['centre_pix_val'][0], 'y':meta['centre_pix_val'][1]}
assert 1 <= len(meta['arc_per_pix']) <= 2, '\'arc_per_pix\' needs to have one or two arguments only.'
if len(meta['arc_per_pix']) == 2:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][1]
elif len(meta['arc_per_pix']) == 1:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][0]
# if have a pixel length and want the length in arcsec
arcsec_lengths = []
if meta['length'] == True:
for arg in args:
x_length = arg[0] * delta_x
y_length = arg[1] * delta_y
arcsec_lengths.append([x_length, y_length])
return arcsec_lengths
#input coordinates as [col,row] in pixels
arcsec_coords = []
for arg in args:
# arg[0] is x pixel position, so column
x_arcsec = (arg[0] - indices_for_centre['x']) * delta_x
# arg[1] is y pixel position, so row
y_arcsec = (arg[1] - indices_for_centre['y']) * delta_y
arcsec_coords.append([x_arcsec, y_arcsec])
return arcsec_coords
def nustar_shift_map(self, x_shift_arc, y_shift_arc):
#find shift in pix
shift_pix = self.arcsec_to_pixel([x_shift_arc, y_shift_arc], length=True)
#shift data now
shift_cleanevt = self.shift(self.cleanevt, pix_xshift=shift_pix[0][0], pix_yshift=shift_pix[0][1])
self.cleanevt = shift_cleanevt
@staticmethod
def fov_rotation(evt_data):
""" Returns the average rotation of the NuSTAR FoV from the gradient of the edges between
det0&3 and 1&2.
Parameters
----------
*args : list [rawx0, rawy0, solx0, soly0, int]
Each input should contain the raw X and Y coordinates from the (sunpos) evt file and the
solar X and Y coordinates from the sunpos evt file as well as the detector these
coordinates come from as an integer from 0 to 3.
Returns
-------
A float of the average rotation from "North" in degrees where anticlockwise is positive.
This assumes the rotation is between 90 and -90 degrees.
Examples
--------
getMeanAngle([rawx0, rawy0, solx0, soly0, 0],
[rawx1, rawy1, solx1, soly1, 1],
[rawx2, rawy2, solx2, soly2, 2],
[rawx3, rawy3, solx3, soly3, 3])
>>> a number
"""
## split the detectors
d0_counts = evt_data[evt_data["det_id"]==0]
d1_counts = evt_data[evt_data["det_id"]==1]
d2_counts = evt_data[evt_data["det_id"]==2]
d3_counts = evt_data[evt_data["det_id"]==3]
## now split up for the coordinates
rawx0, rawy0, solx0, soly0 = d0_counts["RAWX"], d0_counts["RAWY"], d0_counts["X"], d0_counts["Y"]
rawx1, rawy1, solx1, soly1 = d1_counts["RAWX"], d1_counts["RAWY"], d1_counts["X"], d1_counts["Y"]
rawx2, rawy2, solx2, soly2 = d2_counts["RAWX"], d2_counts["RAWY"], d2_counts["X"], d2_counts["Y"]
rawx3, rawy3, solx3, soly3 = d3_counts["RAWX"], d3_counts["RAWY"], d3_counts["X"], d3_counts["Y"]
args = [[rawx0, rawy0, solx0, soly0, 0],
[rawx1, rawy1, solx1, soly1, 1],
[rawx2, rawy2, solx2, soly2, 2],
[rawx3, rawy3, solx3, soly3, 3]]
gradients = 0
for a in args:
rawx, rawy, solx, soly, det = a
# use the pixel edges between det 0&3 and 1&2, use the raw pixel coordinates for this
# orientation from the nustar_swguide.pdf, Figure 3
if det==0:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==1:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==2:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==3:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
# working with rawx and y to make sure using correct edge then find the
# corresponding entries in solar coords
aAndY = getXandY(m_row_per_col)
x, y = aAndY[0], aAndY[1]
## do I want to filter some out?
## leave for now
#if det in [0, 1]:
# x = x[y>np.median(y)]
# y = y[y>np.median(y)]
#elif det in [2, 3]:
# x = x[y<np.median(y)]
# y = y[y<np.median(y)]
# fit a straight line to the edge
popt, pcov = curve_fit(straightLine, x, y, p0=[0, np.mean(y)])
gradients += getDegrees(popt[0])
return gradients/len(args)
def nustar_deconv(self, map_array=None, psf_array=None, it=10, OA2source_offset=None, hor2SourceAngle=None, clip=False):
"""Class mathod to take a map (map_array) and a point spread function (psf_array) and deconvolve using
the Richardson-Lucy method with a number of iterations (it).
Parameters
----------
map_array : 2d array
The map of the data. Should be over the field of view. If "None" then the self.nustar_map class
attribute is used.
Default: None
psf_array : file string or 2d array
The PSF you want to use. This can be a string of the fits file for the PSF or a 2d numpy array.
If "None" then several common paths for nu'+self.fpm+'2dpsfen1_20100101v001.fits' are check and
if the file cannot be found the original map is returned. Currently this won't be rescaled if
it is a different resolution to the map data, it will just crash instead.
Default: None
it : int
Number of iterations for the deconvolution.
Default: 10
OA2source_offset : float
Angle subtended between the optical axis (OA), observer, and the X-ray source in arcminutes
(0<=OA2source_angle<8.5 arcminutes), i.e. radial distance to the source from the OA. Chooses
the correct PSF data to use.
Default: None
hor2SourceAngle : float
Angle subtended between horizontal through the optical axis (OA), and the line through the X-ray source and OA in degrees.
Clockwise is positive and anticlockwise is negative. Symmetric reflected in the origin so -90<=hor2SourceAngle<=90.
Default: None
clip : bool
Set values >1 and <-1 to 1 and -1 respectively after each iteration. Unless working with a
normalised image this should be "False" otherwise it's a mess.
Default: False
Returns
-------
A 2d numpy array of the deconvolved map.
Examples
--------
*Use within the class:
NU_SUNPOS_FILE, ITERATIONS = "nustar_filename", 10
nu = NustarDo(NU_SUNPOS_FILE)
nu.deconvolve['apply'] = True
nu.deconvolve['iterations'] = ITERATIONS
nu.nustar_setmap(submap='FoV')
deconv_map = nu.nustar_map.data
*Use without class:
STRING, FPM = "psf_filename", "A" or "B"
nu = NustarDo()
nu.fpm = FPM
nu.nustar_map = Sunpy NuSTAR map
deconv_map = nu.nustar_deconv(psf_array=STRING)
-or-
MAP, ARRAY, FPM = nustar data 2d numpy array, psf 2d numpy array, "A" or "B"
nu = NustarDo()
nu.fpm = FPM
deconv_map = nu.nustar_deconv(map_array=MAP, psf_array=ARRAY)
"""
## for defaults
if type(map_array) == type(None):
map_array = self.nustar_map.data
if type(psf_array) == type(None):
# defualt is to check for the nu'+self.fpm+'2dpsfen1_20100101v001.fits' PSF file (the one used in Glesener code)
trials = ['/opt/caldb/data/nustar/fpm/bcf/psf/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/usr/local/caldb/data/nustar/fpm/bcf/psf/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/home/kris/Desktop/link_to_kris_ganymede/old_scratch_kris/data_and_coding_folder/nustar_psfs/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/home/kris/Desktop/nustar_psfs/nu'+self.fpm+'2dpsfen1_20100101v001.fits']
if type(OA2source_offset) != type(None):
psf_OA_angles = np.arange(0,9,0.5) # angles of 0 to 8.5 arcmin in 0.5 arcmin increments
index = np.argmin([abs(psfoaangles - OA2source_offset) for psfoaangles in psf_OA_angles]) # find the closest arcmin array
hdr_unit = index+1 # header units 1 to 18 (one for each of the arcmin entries) and 0 arcmin would be hdr_unit=1, hence the +1
# print("using angle: ", hdr_unit)
else:
hdr_unit = 1
#assume we can't find the file
found_psf = False
for t in trials:
# try the files, if one exists use it
if os.path.exists(t):
psfhdu = fits.open(t)
psf_h = psfhdu[hdr_unit].header['CDELT1'] # increment in degrees/pix
psf_array = psfhdu[hdr_unit].data
psfhdu.close()
psf_used = t
found_psf = True
# if we still couldn't find a defualt PSF then print this, set self.deconvole to False, and just return the original map
if found_psf == False:
print('Could not find PSF file. Please provide the PSF filename or array.')
print('Returning original map.')
self.deconvolve['apply'] = False
self.deconv_settings_info = {'map':None, 'psf_file':None, 'psf_array':None, 'iterations':None}
return map_array
# check same res, at least in 1-D
assert psf_h*3600 == self.nustar_map.meta['CDELT1'], "The resolution in the PSF and the current map are different."
# if you have provided your own psf file use that instead
elif type(psf_array) == str:
psf_used = psf_array
psfhdu = fits.open(psf_array)
psf_h = psfhdu[1].header['CDELT1'] # increment in degrees/pix
psf_array = psfhdu[1].data
psfhdu.close()
# check same res, at least in 1-D
assert psf_h*3600 == self.nustar_map.meta['CDELT1'], "The resolution in the PSF and the current map are different."
else:
psf_used = 'Custom Array. Hopefully some numbers though.'
if type(hor2SourceAngle)!=type(None):
assert -90<=hor2SourceAngle<=90, "Please give \"hor2SourceAngle\" as an angle from horzontal to the source -90<=hor2SourceAngle<=90 where clockwise is positive and anticlockwise is negative"
psf_array = rotate(psf_array, hor2SourceAngle, reshape=True)
# deconvolve
deconvolved_RL = restoration.richardson_lucy(map_array, psf_array, iterations=it, clip=False)
# deconvolution info for later use
self.deconv_settings_info = {'map':map_array, 'psf_file':psf_used, 'psf_array':psf_array, 'iterations':it}
return deconvolved_RL
@staticmethod
def find_boxOfData(array):
'''If there is an array with loads of 0s or nans and a region of numbers then this returns the rows
and columns the block of numbers is encased between'''
array = np.array(array)
array[np.isnan(array)] = 0
# first and last row
dataRows = []
for i,row in enumerate(array):
rSum = np.sum(row)
if rSum > 0:
dataRows.append(i)
between_rows = [dataRows[0], dataRows[-1]]
# first and last column
dataCols = []
for j,col in enumerate(array.T):
cSum = np.sum(col)
if cSum > 0:
dataCols.append(j)
between_cols = [dataCols[0], dataCols[-1]]
return {'rowIndices':between_rows, 'columnIndices':between_cols}
@staticmethod
def create_submap(sunpy_map_obj, lose_off_limb, submap):
if (lose_off_limb == True) and (len(submap) == 0):
#fix really large plot, instead of going from -3600 to 3600 in x and y
bl = SkyCoord(-1200*u.arcsec, -1200*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
tr = SkyCoord(1200*u.arcsec, 1200*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
return sunpy_map_obj.submap(bl,top_right=tr)
elif len(submap) == 4: #Submap to plot?
bottom_left = {'x':submap[0], 'y':submap[1]}
top_right = {'x':submap[2], 'y':submap[3]}
bl = SkyCoord(bottom_left['x']*u.arcsec, bottom_left['y']*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
tr = SkyCoord(top_right['x']*u.arcsec, top_right['y']*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
return sunpy_map_obj.submap(bl,top_right=tr)
if (lose_off_limb == False):
return sunpy_map_obj
else:
raise TypeError('\nCheck the submap coordinates that were given please. It should be a list with four '
'float/int entries in arcseconds in the form [bottom left x, bottom left y, top right x, '
'top right y].')
if (self.deconvolve['apply'] == True) and (self.gaussian_filter['apply'] == True):
print('Caution! Did you mean to set deconvolve AND gaussian blurr to True? If so, then the'
'deconvolution will happen first then the Gaussian filter is applied.')
# might be best to only allow one of these at a time, either deconvolve OR gaussian filter
deconvolve = {'apply':False, 'iterations':10, 'OA2source_offset':None, 'hor2SourceAngle':None, 'clip':False} # set before nustar_setmap to run deconvolution on map
gaussian_filter = {'apply':False, 'sigma':2, 'mode':'nearest'}
sub_lt_zero = np.nan # replace less than zeroes with this value for plotting in a linear scale
own_map = None # if you already have a map that you want a submap of then set this, be careful not to time normalize again though
def nustar_setmap(self, time_norm=True, lose_off_limb=True, limits=None,
submap=None, rebin_factor=1, norm='linear', house_keeping_file=None):
# adapted from Iain's python code
# Map the filtered evt, into one corrected for livetime (so units count/s)
if type(self.own_map) == type(None):
self.nustar_map = custom_map.make_sunpy(self.cleanevt, self.evt_header, norm_map=False)
else:
self.nustar_map = self.own_map
if time_norm == True:
time_norm = input('Caution! Do you mean to time normalize your \'own_map\'? True or False: ')
# field of view in arcseconds
FoVlimits = self.find_boxOfData(self.nustar_map.data)
bottom_left = self.pixel_to_arcsec([FoVlimits['columnIndices'][0], FoVlimits['rowIndices'][0]])[0]
top_right = self.pixel_to_arcsec([FoVlimits['columnIndices'][1]+1, FoVlimits['rowIndices'][1]+1])[0] # plus one as index stops one short
self.FoV = [*bottom_left, *top_right]
if limits == None:
limits = []
if submap == None:
submap = []
elif type(submap) == str:
if submap.upper() == 'FOV':
submap = self.FoV
else:
print('The only string input to submap that is supported at the moment is FOV, fov, FoV, etc.')
self.submap = submap
self.time_norm = time_norm
if self.time_norm == True:
self.livetime(hk_filename=house_keeping_file, set_up_plot=False, show_fig=False)
#livetime correction
time_range = [(data_handling.getTimeFromFormat(tm) - self.rel_t).total_seconds() for tm in self.time_range]
indices = ((self.hk_times>=time_range[0]) & (self.hk_times<time_range[1]))
ltimes_in_range = self.hk_livetimes[indices]
livetime = np.average(ltimes_in_range)
lc_cor_nustar_map = self.nustar_map.data / (livetime * (time_range[1] - time_range[0]))
self.nustar_map = sunpy.map.Map(lc_cor_nustar_map, self.nustar_map.meta)
if (self.deconvolve['apply'] == False):
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.submap)
elif (self.deconvolve['apply'] == True):
# make sure it's over the FoV
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.FoV)
dconv = self.nustar_deconv(it=self.deconvolve['iterations'], OA2source_offset=self.deconvolve['OA2source_offset'],
hor2SourceAngle=self.deconvolve['hor2SourceAngle'], clip=self.deconvolve['clip'])
# make new map
self.nustar_map = sunpy.map.Map(dconv, self.nustar_map.meta)
# now cut to the shape you want
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.submap)
if self.gaussian_filter['apply'] == True:
gaussian_width = self.gaussian_filter['sigma']
m = self.gaussian_filter['mode']
#Apply a guassian blur to the data to bring out the faint feature
dd = ndimage.gaussian_filter(self.nustar_map.data, gaussian_width, mode=m)
if limits == []:
dmin = np.min(dd[np.nonzero(self.nustar_map.data)])#*1e6 factor was here as the lowest value will come (came from dd) from the gaussian
#filter and not the actual lowest count rate hence the factor
dmax = np.max(dd[np.isfinite(self.nustar_map.data)])
elif len(limits) == 2:
if norm == 'lognorm':
if limits[0] <= 0:
dmin = 0.1
dmax=limits[1]
else:
dmin=limits[0]
dmax=limits[1]
elif norm == 'linear':
dmin=limits[0]
dmax=limits[1]
else:
raise TypeError('\nCheck the limits that were given please.')
else:
dd = self.nustar_map.data
if limits == []:
finite_vals = dd[np.isfinite(dd)]
dmin = np.min(finite_vals[np.nonzero(finite_vals)])
dmax = np.max(finite_vals)
elif len(limits) == 2:
if norm == 'lognorm':
if limits[0] <= 0:
dmin = 0.1
dmax=limits[1]
else:
dmin=limits[0]
dmax=limits[1]
elif norm == 'linear':
dmin=limits[0]
dmax=limits[1]
else:
raise TypeError('\nCheck the limits that were given please. It should be a list with two float/int '
'entries')
self.dmin = dmin # make it possible to get min and max normalisation values of the NuSTAR map
self.dmax = dmax
# Tidy up before plotting
dd[dd < dmin]=0
nm = sunpy.map.Map(dd, self.nustar_map.meta)
if rebin_factor != 1:
#can rebin the pixels if we want to further bring out faint features
#set to 1 means no actual rebinning
nx,ny = np.shape(nm.data)
if rebin_factor >= 1/nx and rebin_factor >= 1/ny:
dimensions = u.Quantity([nx*rebin_factor, ny*rebin_factor], u.pixel)
rsn_map = nm.resample(dimensions)
else:
raise TypeError(f'\nRebin factor must be greater than one over the x,y dimensions (1/{nx} and '
f'1/{ny}) as to rebin to get one, or more, pixel(s) fro the entire image, i.e. can\'t rebin to half a pixel.')
elif rebin_factor == 1:
rsn_map = nm
del nm
if norm == 'linear':
#change all zeros to NaNs so they appear white in the plot otherwise zeros appear as the lowest colour
#on the colourbar
rsn_map_data = rsn_map.data
rsn_map_data[rsn_map_data <= 0] = self.sub_lt_zero
rsn_map = sunpy.map.Map(rsn_map_data, rsn_map.meta)
# Setup the scaling of the map and colour table
rsn_map.plot_settings['norm'] = colors.Normalize(vmin=dmin,vmax=dmax)
rsn_map.plot_settings['cmap'] = cm.get_cmap('Spectral_r')
elif norm == 'lognorm':
#log(0) produces a NaN (-inf) here anyway so appears white
# Setup the scaling of the map and colour table
rsn_map.plot_settings['norm'] = colors.LogNorm(vmin=dmin,vmax=dmax)
rsn_map.plot_settings['cmap'] = cm.get_cmap('Spectral_r')
self.rsn_map = rsn_map
return rsn_map
annotations = {'apply':False, 'text':'Some text', 'position':(0,0), 'color':'black', 'fontsize':12, 'weight':'normal'}
rcParams_default_setup = True
cbar_title = 'Counts'
ax_label_size = 18
@staticmethod
def draw_solar_grid(rsnmap, axes):
rsnmap.draw_limb(color='black',linewidth=1,linestyle='dashed', zorder=0)
# Manually plot a heliographic overlay - hopefully future no_ticks option in draw_grid
overlay = axes.get_coords_overlay('heliographic_stonyhurst')
lon = overlay[0]
lat = overlay[1]
lon.set_ticks_visible(False)
lat.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
lon.coord_wrap = 180
lon.set_major_formatter('dd')
overlay.grid(color='grey', linewidth=0.5, linestyle='dashed', zorder=0)
plt_plot_lines = None
@staticmethod
def execute_plt(*arg):
"""
# Example
file = 'file_sunpos.evt'
nu = nustardo.NustarDo(file)
plt.figure(figsize=(10,10))
nu.nustar_setmap(submap="fov")
x,y = [0, 200], [0, 200]
nu.plt_plot_lines = [f'plt.plot({x},{y}, marker="o", ms=10, c="r")']
nu.nustar_plot(show_fig=False)
plt.show()
"""
for a in arg:
exec(a)
def nustar_plot(self, boxes=None, show_fig=True, save_fig=None, usr_title=None, draw_grid=True):
# adapted from Iain's python code
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,8)
plt.rcParams['font.size'] = 18
plt.rcParams['axes.facecolor']='white'
plt.rcParams['savefig.facecolor']='white'
# Start the plot - many things here just to make matplotlib look decent
self.rectangles = boxes
#fig = plt.figure(figsize=(9, 8), frameon=False)
ax = plt.subplot(projection=self.rsn_map, frame_on=False) #rsn_map nustar_submap
self.axes = ax
ax.set_facecolor((1.0, 1.0, 1.0))
self.rsn_map.plot()
# can't plot properly if the grid is drawn first so this allows plt.plot lines to be passed an executed before the grid in drawn
if type(self.plt_plot_lines)!=type(None):
self.execute_plt(*self.plt_plot_lines)
if self.annotations['apply'] == True:
plt.annotate(self.annotations['text'], self.annotations['position'], color=self.annotations['color'], fontsize=self.annotations['fontsize'], weight=self.annotations['weight'])
if draw_grid:
self.draw_solar_grid(self.rsn_map, ax)
# Tweak the titles and labels
title_obsdate = self.rsn_map.date.strftime('%Y-%b-%dT%H:%M:%S.%f')[:-13] #'{:.20}'.format('{:%Y-%b-%d}'.format(self.rsn_map.date))
fpm = 'FPM'+self.fpm
title_obstime_start = self.time_range[0][-8:]
title_obstime_end = self.time_range[1][-8:]
if type(usr_title) == type(None):
if self.chu_state == 'not_split':
ax.set_title('NuSTAR '+self.e_range_str+' keV '+fpm+' '+ title_obsdate+' '+title_obstime_start+' to '+title_obstime_end)
else:
ax.set_title('NuSTAR '+self.e_range_str+' keV '+fpm+' '+self.chu_state+' '+ title_obsdate+' '+title_obstime_start+' to '+title_obstime_end)
else:
ax.set_title(usr_title)
ax.set_ylabel('y [arcsec]', fontsize=self.ax_label_size)
ax.set_xlabel('x [arcsec]', fontsize=self.ax_label_size)
tx, ty = ax.coords
tx.set_major_formatter('s')
ty.set_major_formatter('s')
ax.grid(False)
# Add a colour bar
if self.time_norm == True:
plt.colorbar(fraction=0.035, pad=0.03,label=self.cbar_title+' $s^{-1}$')
else:
plt.colorbar(fraction=0.035, pad=0.03,label=self.cbar_title)
if boxes is not None:
if np.shape(boxes)==(4,):
rect = boxes
bottom_left_rectangle = SkyCoord(rect[0]*u.arcsec, rect[1]*u.arcsec, frame=self.rsn_map.coordinate_frame)
length = rect[2] - rect[0]
height = rect[3] - rect[1]
self.rsn_map.draw_rectangle(bottom_left_rectangle, width=length*u.arcsec, height=height*u.arcsec, color='black')
else:
b = 1
for rect in boxes:
bottom_left_rectangle = SkyCoord(rect[0]*u.arcsec, rect[1]*u.arcsec, frame=self.rsn_map.coordinate_frame)
length = rect[2] - rect[0]
height = rect[3] - rect[1]
self.rsn_map.draw_rectangle(bottom_left_rectangle, width=length*u.arcsec, height=height*u.arcsec, color='black')
for_text = self.arcsec_to_pixel([rect[0]-10,rect[3]+20], centre_pix_val= [self.rsn_map.meta['crpix1']+0.5, self.rsn_map.meta['crpix2']])
plt.text(for_text[0][0], for_text[0][1], 'Box '+str(b), fontsize=10)
b += 1
if save_fig != None:
plt.savefig(save_fig, dpi=300, bbox_inches='tight')
if show_fig == True:
plt.show('all')
def nustar_peek(self):
#just to view the map with all default settings
self.nustar_setmap()
self.nustar_plot()
@staticmethod
def stepped_lc_from_hist(x, y, inc_edges=True):
"""Takes an x and y input, duplicates the x values and y values with the offset as to produce a new x and y which
will produce a stepped graph once all the scatter points are plotted.
Parameters
----------
x : 1-d list/array
This is the original set of x values or, in the case for a histogram, the bin edges.
y : 1-d list/array
This is the original set of y values.
inc_edges : bool
This determines whether the ends should go from their value to zero (True) or stop where they are (False).
Default: True
Returns
-------
New x and y values that, when plotted, will produce a stepped graph. Can be used to represent binning along the x
axis.
"""
if len(x) == len(y)+1: #since histogram gives one more as they are the boundaries of the bins
old_x = x
x = x[:-1]
elif len(x) == len(y):
x = x #not necessary, but more readable just now
else:
raise ValueError('Either the x-axis array is the edge of the bins (len(x) == len(y)+1) or the x-axis is the '
'value for the beginning of each bin (len(x) == len(y)), you haven\'t satisfied either of '
'these.')
new_x = np.array(np.zeros(2*len(x)))
new_y = np.array(np.zeros(2*len(y)))
for i in range(len(x)): #x and y should be the same length to plot anyway
if i == 0: #start with the 1st and 2nd x value having the same y.
new_x[i] = x[i]
new_y[2*i], new_y[2*i+1] = y[i], y[i]
elif i == len(x)-1: #the last new_x should be one beyond the last x as this value for the start of its bin
if len(x) == len(y)+1:
new_x[2*i-1], new_x[2*i], new_x[2*i+1] = x[i], x[i], old_x[-1]
elif len(x) == len(y):
new_x[2*i-1], new_x[2*i], new_x[2*i+1] = x[i], x[i], x[i]+(x[i]-x[i-1])
new_y[2*i] , new_y[2*i+1] = y[i], y[i]
break
else: #else keep the pattern going that two adjacent x's should share a y
new_x[2*i-1], new_x[2*i] = x[i], x[i]
new_y[2*i], new_y[2*i+1] = y[i], y[i]
if inc_edges == True: #create first and last coordinates to have a new_y of zero
new_x = np.insert(new_x, 0, [new_x[0]])
new_x = np.append(new_x,[new_x[-1]])
new_y = np.insert(new_y, 0, [0])
new_y = np.append(new_y,[0])
return new_x, new_y
@staticmethod
def dt_to_md(dt_array):
if type(dt_array) != list:
dt_array = [dt_array]
new_array = np.zeros(len(dt_array))
for c, d in enumerate(dt_array):
plt_date = mdates.date2num(d)
new_array[c] = plt_date
return new_array
@staticmethod
def spatial_filter(evt_data, sub_region_in_pixels):
x = evt_data['X']
y = evt_data['Y']
#find indices within the x and y pixel range
indices = (sub_region_in_pixels[0][0] < x)&(x<= sub_region_in_pixels[1][0]) & \
(sub_region_in_pixels[0][1] < y)&(y <= sub_region_in_pixels[1][1])
evt_data = evt_data[:len(indices)][indices] # [:len(indices)] is a quick fix, doesn't work otherwise if cleanevt is loaded from pickle
return evt_data
@staticmethod
def time_filter(evtdata, tmrng=None):
''' ***** From filter function ***** >4x quicker to just filter with time than with full filter ***** '''
if tmrng is None:
tmrng = [evtdata['TIME'][0], evtdata['TIME'][-1]]
elif tmrng is not None:
tstart = data_handling.getTimeFromFormat(tmrng[0]) #date must be in this format 'yyyy/mm/dd, HH:MM:SS'
tend = data_handling.getTimeFromFormat(tmrng[1])
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00") #the date NuSTAR times are defined from
tstart_s = (tstart - rel_t).total_seconds() #both dates are converted to number of seconds from 2010-Jan-1
tend_s = (tend - rel_t).total_seconds()
tmrng = [tstart_s, tend_s]
time_filter = ( (evtdata['TIME']>tmrng[0]) & (evtdata['TIME']<tmrng[1]) )
inds = (time_filter).nonzero()
goodinds=inds[0]
return evtdata[goodinds]
@staticmethod
def nustar_file_finder(start_directory='', obs_id='', descriptor='', fpm='', ext=''):
full_filename = None
file_directory = None
file_name = None
#expression for everything that ends in a slash
search_directory_regex = re.compile(r'\w+/')
#find all the folders in the evt directory (they end with a slash)
search_directory = search_directory_regex.findall(start_directory)
# search the folder the evt file is in first
sd = '/'+''.join(search_directory)
for in_dir in os.listdir(sd):
if in_dir == 'nu' + obs_id + fpm + descriptor + ext:
full_filename = os.path.join(sd, in_dir)
file_directory = sd
file_name = in_dir
return full_filename, file_directory, file_name
#don't includce the last folder to go back a directory
search_directory = '/'+''.join(search_directory[:-1]) #go back a directory to search for the house keeping file
for _dirpath, _dirnames, _filenames in os.walk(search_directory):
for _file in _filenames:
if _file == 'nu' + obs_id + fpm + descriptor + ext:
full_filename = os.path.join(_dirpath, _file)
file_directory = _dirpath
file_name = _file
return full_filename, file_directory, file_name
return full_filename, file_directory, file_name
def livetime(self, hk_filename=None, set_up_plot=True, show_fig=True):
#file = '/Users/kris/Documents/PhD/data/nustar/nu80414201001A_fpm.hk'
'''
This has to be moved above the time profile function so it is defined to be called
'''
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,6)
plt.rcParams['font.size'] = 18
if hk_filename == None:
hk_filename, self.hk_directory, self.hk_filename = self.nustar_file_finder(start_directory=self.evt_directory, obs_id=self.obs_id, descriptor='_fpm', fpm=self.fpm, ext='.hk')
if hk_filename == None: #if there is still no hk_filename then there won't be one used
print('Unable to find appropriate .hk file.')
self.hk_times = 0
self.hk_livetimes = [] # so the this length is 0
return #stops the function here but doesn't stop the code, this is the same as 'return None'
name_of_hk_file_regex = re.compile(r'\w+\.\w+')
name_of_hk_file = name_of_hk_file_regex.findall(hk_filename)[0]
hk_obs_id_regex = re.compile(r'\d+')
hk_obs_id = hk_obs_id_regex.findall(name_of_hk_file)[0]
hk_fpm_regex = re.compile(r'[A-Z]')
hk_fpm = hk_fpm_regex.findall(name_of_hk_file)[0]
#check .evt file and .hk file match
assert self.obs_id == hk_obs_id, 'The observation id from the .evt file and the .hk are different, i.e. {} =/= {}'.format(self.obs_id, hk_obs_id)
assert self.fpm == hk_fpm, 'The FPM from the .evt file and the .hk are different, i.e. {} =/= {}'.format(self.fpm, hk_fpm)
hdulist = fits.open(hk_filename)
self.hk_header = hdulist[1].header
self.hk_data = hdulist[1].data
hdulist.close()
#check .hk filename matches its header info
assert self.hk_header['OBS_ID'] == hk_obs_id, 'The observation id from the .hk file header and the .hk filename are different, i.e. {} =/= {}'.format(self.hk_header['OBS_ID'], hk_obs_id)
assert self.hk_header['INSTRUME'][-1] == hk_fpm, 'The FPM from the .hk header and the .hk filename are different, i.e. {} =/= {}'.format(self.hk_header['INSTRUME'][-1], hk_fpm)
self.hk_times = self.hk_data['time']
self.lvt_times = [(self.rel_t + timedelta(seconds=t)) for t in self.hk_times]
self.hk_livetimes = self.hk_data['livetime']
if set_up_plot:
hktime = self.hk_times - self.hk_times[0]
dt_times = self.lvt_times
lt_start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(self.hk_times))).strftime('%Y/%m/%d, %H:%M:%S'))
fig = plt.figure()
ax = plt.axes()
plt.semilogy(self.dt_to_md(dt_times), self.hk_livetimes, drawstyle='steps-mid')
plt.title('Livetime - '+lt_start_hhmmss[:10]) #get the date in the title
plt.xlabel('Start Time - '+lt_start_hhmmss[12:])
plt.ylabel('Livetime Fraction')
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])#[dt_times[0], dt_times[-1]])
plt.ylim([0,1])
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator) # xlocator was plt.LinearLocator(9)
plt.xticks(rotation=30)
if show_fig == True:
plt.show()
t_bin = {'seconds_per_bin':10, 'method':'approx'}
def light_curve(self, cleanevt=None, hdr=None, sub_reg=None, tstart=None, tend=None,
count_rate=True, house_keeping_file=None, show_fig=True):
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,6)
plt.rcParams['font.size'] = 18
if cleanevt == None:
cleanevt = self.cleanevt
if hdr == None:
hdr = self.evt_header
if sub_reg == 'boxes':
sub_reg = self.rectangles
self.sub_reg_lc = sub_reg
single_lc = True # just start by assuming one light curve, don't worry, this only gets set to False if not
if tstart == None:
tstart = np.min(cleanevt['TIME'])
self.rel_tstart = tstart #already relative to 1/1/2010 and in seconds
else:
tstart = data_handling.getTimeFromFormat(tstart)
self.rel_tstart = (tstart - self.rel_t).total_seconds()
if tend == None:
tend = np.max(cleanevt['TIME'])
self.rel_tend = tend #already relative to 1/1/2010 and in seconds
else:
tend = data_handling.getTimeFromFormat(tend)
self.rel_tend = (tend - self.rel_t).total_seconds()
if count_rate == True:
self.livetime(hk_filename=house_keeping_file, set_up_plot=False, show_fig=False) #run to get times and livetimes
if len(self.hk_times) == 0:
decision = input('No livetimes present. Do you just want to see the counts vs. time instead: ')
if decision in ['Yes', 'yes', 'Y', 'y']:
count_rate = False
else:
print('Will not show plot.')
return
self.lc_livetimes = 0 # just to have it defined
if self.t_bin['method'] == 'approx':
if (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg == None): #data form of NuSTAR
t_bin_conversion = int((self.rel_tend - self.rel_tstart) // self.t_bin['seconds_per_bin']) #get approximately t_bin seconds per bin as start and end of
#data are fixed when the histogram is created
assert t_bin_conversion >= 1, 'Number of bins cannot be <1. Decrease \'t_bin\' value to get more bins.'
counts = np.histogram(cleanevt['TIME'], t_bin_conversion) #gives out bin values and bin edges
self.lc_counts = counts[0]
times = counts[1][:-1]
self.t_bin_edges = counts[1]
start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%H:%M:%S'))
start_yyyymmdd = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%Y/%m/%d'))
elif (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg != None):
#this is to plot the light curve of a sub-region.
print('Inconvenient to approximate the time bins for the light curve of a sub_region.'
'\nChanging to \'exact\'.')
self.t_bin['method'] = 'exact'
else:
raise TypeError('\'astropy.io.fits.fitsrec.FITS_rec\' is the only supported data type at the moment.')
if self.t_bin['method'] == 'exact': #if since if the 'approx' flag is up and also submap!=None then time profile should be made here
t_bin_number = int((self.rel_tend - self.rel_tstart) // self.t_bin['seconds_per_bin']) #get whole number of bins that are t_bin seconds long and
#doesn't include any time at the end that only has data for some of the last range
assert t_bin_number >= 1, 'Number of bins cannot be <1. Decrease \'t_bin\' value to get more bins.'
edge = self.rel_tstart
self.t_bin_edges = np.zeros(t_bin_number+1) #+1 for the last edge
for t in range(len(self.t_bin_edges)):
self.t_bin_edges[t] = edge
edge += self.t_bin['seconds_per_bin']
times = self.t_bin_edges[:-1]
start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%H:%M:%S'))
start_yyyymmdd = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%Y/%m/%d'))
if (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg == None): #data form of NuSTAR
counts = np.histogram(cleanevt['TIME'], self.t_bin_edges) #gives out bin values and bin edges
self.lc_counts = counts[0]
elif (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg != None):
if np.shape(sub_reg) == (4,):
counts = []
pixels = self.arcsec_to_pixel([sub_reg[0],sub_reg[1]], [sub_reg[2],sub_reg[3]])
spatial_evtdata = self.spatial_filter(self.cleanevt, pixels)
for t in range(len(self.t_bin_edges)-1):
# ts = (datetime.datetime(1970, 1, 1) + timedelta(seconds=(float(self.rel_t.strftime("%s"))+self.t_bin_edges[t]))).strftime('%Y/%m/%d, %H:%M:%S')
# te = (datetime.datetime(1970, 1, 1) + timedelta(seconds=(float(self.rel_t.strftime("%s"))+self.t_bin_edges[t+1]))).strftime('%Y/%m/%d, %H:%M:%S')
ts = (self.rel_t + timedelta(seconds=self.t_bin_edges[t])).strftime('%Y/%m/%d, %H:%M:%S')
te = (self.rel_t + timedelta(seconds=self.t_bin_edges[t+1])).strftime('%Y/%m/%d, %H:%M:%S')
sub_cleanevt = self.time_filter(spatial_evtdata, tmrng=[ts, te])
counts.append(len(sub_cleanevt['TIME']))
self.lc_counts = np.array(counts)
elif np.shape(sub_reg)[1] == 4:
all_counts = {}
all_count_rates = {}
for b, sub_r in enumerate(sub_reg, start=1):
counts = []
pixels = self.arcsec_to_pixel([sub_r[0],sub_r[1]], [sub_r[2],sub_r[3]])
spatial_evtdata = self.spatial_filter(self.cleanevt, pixels)
for t in range(len(self.t_bin_edges)-1):
ts = (self.rel_t + timedelta(seconds=self.t_bin_edges[t])).strftime('%Y/%m/%d, %H:%M:%S')
te = (self.rel_t + timedelta(seconds=self.t_bin_edges[t+1])).strftime('%Y/%m/%d, %H:%M:%S')
sub_cleanevt = self.time_filter(spatial_evtdata, tmrng=[ts, te])
counts.append(len(sub_cleanevt['TIME']))
box = ' (Box '+str(b)+')'
all_counts[box] = np.array(counts)
#if make_final_graph == True:
if count_rate == True:
#livetime correction
livetimes = np.zeros(len(self.t_bin_edges)-1)
for t in range(len(self.t_bin_edges)-1):
indices = ((self.hk_times>=self.t_bin_edges[t]) & (self.hk_times<self.t_bin_edges[t+1]))
ltimes_in_range = self.hk_livetimes[indices]
livetimes[t] = np.average(ltimes_in_range)
self.lc_livetimes = livetimes
counts_per_second = np.array(counts) / (livetimes * (times[1]-times[0]))
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts_per_second))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd + box)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts_per_second[np.isfinite(counts_per_second)])*1.05])
plt.ylabel('Counts $s^{-1}$')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
all_count_rates[box] = counts_per_second
else:
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd + box)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts[np.isfinite(counts)])*1.05])
plt.ylabel('Counts')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_counts = all_counts
if all_count_rates == []:
self.lc_count_rates = None
else:
self.lc_count_rates = all_count_rates
self.lc_times = dt_times
if show_fig:
plt.show()
single_lc = False
else:
raise TypeError('Check the form of the sub-region was given in, e.g. need [bx,by,tx,ty] or [[bx,by,tx,ty], ...].')
else:
raise TypeError('\'astropy.io.fits.fitsrec.FITS_rec\' is the only supported data type at the moment.')
else:
if (self.t_bin['method'] != 'exact') and (self.t_bin['method'] != 'approx'):
raise ValueError('Only options for the time bins is \'approx\' or \'exact\'.')
if single_lc == True: #only in case multiple regions are plotted then they are handled in its own 'for' loop
if count_rate == True:
#livetime correction
livetimes = np.zeros(len(self.t_bin_edges)-1)
for t in range(len(self.t_bin_edges)-1):
indices = ((self.hk_times>=self.t_bin_edges[t]) & (self.hk_times<self.t_bin_edges[t+1]))
ltimes_in_range = self.hk_livetimes[indices]
livetimes[t] = np.average(ltimes_in_range)
self.lc_livetimes = livetimes
counts_per_second = self.lc_counts / (livetimes * (times[1]-times[0]))
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts_per_second))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts_per_second[np.isfinite(counts_per_second)])*1.05])
plt.ylabel('Counts $s^{-1}$')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_times = dt_times
self.lc_count_rates = counts_per_second
else:
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), self.lc_counts))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(self.lc_counts[np.isfinite(self.lc_counts)])*1.05])
plt.ylabel('Counts')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_times = dt_times
self.lc_count_rates = None
if show_fig:
plt.show()
def full_obs_chus(self, start_directory=None, obs_id=None, descriptor='_chu123', ext='.fits' ,show_fig=True):
'''
Apapted from:
https://github.com/ianan/nustar_sac/blob/master/idl/load_nschu.pro
and
https://github.com/NuSTAR/nustar_solar/blob/master/depricated/solar_mosaic_20150429/read_chus.pro
'''
if start_directory == None:
start_directory=self.evt_directory
if obs_id == None:
obs_id=self.obs_id
chu_filename, self.chu_directory, self.chu_filename = self.nustar_file_finder(start_directory=start_directory, obs_id=obs_id, descriptor=descriptor, ext=ext)
#not self.chu_filename as fits.open needs to know the full path to the file
hdulist = fits.open(chu_filename)
data1 = hdulist[1].data
data2 = hdulist[2].data
data3 = hdulist[3].data
hdulist.close()
# easier to work with numpy arrays later
data_c1 = np.array(data1)
data_c2 = np.array(data2)
data_c3 = np.array(data3)
maxres = 20
for chu_num, dat in enumerate([data_c1, data_c2, data_c3]):
chu_bool = ((dat['VALID']==1) &
(dat['RESIDUAL']<maxres) &
(dat['STARSFAIL']<dat['OBJECTS']) &
(dat['CHUQ'][:,3]!=1))
chu_01 = chu_bool*1 # change true/false into 1/0
chu_mask = chu_01* (chu_num+1)**2 # give each chu a unique number that when it is added to another it gives a unique chu combo, like file permissions
if chu_num == 0:
chu_all = chu_mask # after chu 1 file have an array with 1s and 0s
else:
chu_all += chu_mask # after the others (chu2 and chu3) have an array with 1,4,9,5,10,13,14
# last data array in the for loop can give the time, no. of seconds from 1-Jan-2010
chu_time = dat['TIME']
# reassigned values are at 100, etc. as to not accidently double sort the values again
# e.g. if mask value was changed to 10, then if it was accidently run again it would get sorted into chu state 13 etc.
chu_all[chu_all == 1] = 100 #chu1 # mask value in array is changed to chu state, e.g. mask value=5, chu state is 12, and value 102
chu_all[chu_all == 4] = 101 #chu2
chu_all[chu_all == 5] = 102 #chu12
chu_all[chu_all == 9] = 103 #chu3
chu_all[chu_all == 10] = 104 #chu13
chu_all[chu_all == 13] = 105 #chu23
chu_all[chu_all == 14] = 106 #chu123
chu_time = chu_time[chu_all > 0] # if there is still no chu assignment for that time then remove
chu_all = chu_all[chu_all > 0]
self.chu_all = chu_all
self.chu_reference = {'chu1':100, 'chu2':101, 'chu12':102, 'chu3':103, 'chu13':104, 'chu23':105, 'chu123':106}
tick_labels = ['','1', '2', '12', '3', '13', '23', '123']
self.chu_times = [(self.rel_t + datetime.timedelta(seconds=t)) for t in chu_time]
dt_times = self.chu_times
fig = plt.figure(figsize=(10,5))
ax = plt.axes()
plt.plot(dt_times, chu_all,'x')
plt.title('CHU States of NuSTAR on ' + dt_times[0].strftime('%Y/%m/%d')) #get the date in the title
plt.xlabel('Start Time - ' + dt_times[0].strftime('%H:%M:%S'))
plt.ylabel('NuSTAR CHUs')
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
ax.axes.set_yticklabels(tick_labels)
plt.xticks(rotation=30)
if show_fig == True:
plt.show()
lc_3D_params = {'energy_low':1.6, 'energy_high':80, 'time_range':None} # start at 1.6 keV as this is the lowest (yet not trusted) bin for NuSTAR for binning in 0.04 keV steps
def lightcurves_3D(self, all_evt_data=None, energy_increment=0.04, aspect=6):
'''***Under Construction***'''
if all_evt_data == None:
all_evt_data = self.evt_data
if self.lc_3D_params['time_range'] == None:
self.lc_3D_params['time_range'] = self.time_range
cleaned_all_evt = filter_with_tmrng.event_filter(all_evt_data, fpm = self.fpm,
energy_low = self.lc_3D_params['energy_low'],
energy_high = self.lc_3D_params['energy_high'],
tmrng=self.lc_3D_params['time_range'])
energies = np.arange(1.6 , self.lc_3D_params['energy_high'], energy_increment)
no_of_time = 200
times = np.arange(no_of_time, 1)
er_and_tc = []
for e in range(len(energies)-1):
specific_lc_inds = filter_with_tmrng.by_energy(cleaned_all_evt, energies[e], energies[e+1])
specific_lc_data = cleaned_all_evt[specific_lc_inds]
counts = np.histogram(specific_lc_data['TIME'], no_of_time)[0]
er_and_tc.append(counts)
er_and_tc = np.array(er_and_tc)
print(np.max(er_and_tc))
fig = plt.figure(figsize=(6,8))
plt.imshow(er_and_tc, origin='lower', aspect=aspect, vmax=1)
plt.ylim([self.lc_3D_params['energy_low'], self.lc_3D_params['energy_high']])
plt.xlabel('Time')
plt.ylabel('Energy')
plt.show()
## event list for each energy bin (get energy filter function)
## get lightcurve for each energy bin
## Get 2D array for counts for each energy along rows, and time steps along the columns
## 1D array for the energies, 1D array for time steps
## get seperate, static method for 3D plot creation, return axis object
## axis limits to 2.5--80 keV (range of NuSTAR that's well calibrated)
def detectors(self, show_fig=True):
self.all_detectors = {}
plt.figure()
ax = plt.axes()
for d in range(4):
# if the detector is the one I want then I want the time of it, else leave it alone
self.all_detectors['det'+str(d)] = [self.cleanevt['TIME'][c] for c,i in enumerate(self.cleanevt['DET_ID']) if i==d]
# get percentage of counts each detector contributed to the full time
self.all_detectors['det'+str(d)+'%'] = len(self.all_detectors['det'+str(d)]) / len(self.cleanevt['TIME']) * 100
dets = np.histogram(self.all_detectors['det'+str(d)], self.t_bin_edges) #gives out bin values and bin edges
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in dets[1]]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), dets[0]), label='det'+str(d)+': '+'{:.1f}'.format(self.all_detectors['det'+str(d)+'%'])+'%')
plt.legend()
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
plt.title('Detector Contribution '+self.e_range_str+" keV")
plt.ylabel('Counts from detector')
plt.xlabel('Time')
if show_fig:
plt.show()
#return plt.g
def plotChuTimes(self, span=True, axis=None):
# remember to show_fig=False for the plotting methods as to allow alterations of the figures once run
# look for and get the start and end times for each CHU file
chus = ['chu1', 'chu2', 'chu12', 'chu3', 'chu13', 'chu23', 'chu123']
colours = ['k', 'r', 'g', 'c', 'm', 'b', 'y']
chuChanges = {}
axis = {'ax':plt} if axis is None else {'ax':axis}
pipeline_modes = ["_S_", "_N_"]
for c, chu in enumerate(chus):
for pm in pipeline_modes:
chuFile = self.evt_directory+'nu' + self.obs_id + self.fpm + '06_' + chu + pm + 'cl_sunpos.evt'
if isfile(chuFile):
break
if not isfile(chuFile):
continue
hdulist = fits.open(chuFile)
evt_data = hdulist[1].data
hdulist.close()
chuChanges[chu] = [self.rel_t + timedelta(seconds=min(evt_data['time'])),
self.rel_t + timedelta(seconds=max(evt_data['time']))]
# plot a shaded region or just the time boundaries for the chu changes
if span:
axis['ax'].axvspan(*chuChanges[chu], alpha=0.1, color=colours[c])
else:
axis['ax'].axvline(chuChanges[chu][0], color=colours[c])
axis['ax'].axvline(chuChanges[chu][1], color=colours[c])
self.chuChanges = chuChanges
def save(self, save_dir='./', folder_name=None, overwrite=False, **kwargs):
#replace folder of saved data if run twice or just make a new one?
"""
Can I automate the process using dir(nu) since this has every variable created?
Or at least add to a list of attributes to be saved.
Use os module to create appropriate directory structure for saved attributes.
"""
#print(dir(nuA))
'''
Variables/info to save:
***** evt_file_used *****
~evt_directory, evt_filename, evt_data, evt_header #where did the data come from?
~meta data, chu_state, energy range, fpm, obs id
***** house_keeping_file_used *****
~self.hk_directory, self.hk_filename, hk_data, hk_header #what hk file was used?
***** nustar_livetime_data *****
~hk_livetimes, hk_times, livetimes plot
***** nustar_map_data *****
~rsn_map and plot (for plot need to run the nustar_plot() with save enabled) #what does it look like?
~gaussian filter applied, rectangle coordinates
***** nustar_light_curve_data *****
~lc_counts/lc_count_rates, lc_times, lightcurve plot(s)
~rectangle coordinates
New stuff to save:
***** chu function ***** deconvolve settings *****
'''
if self.chu_state != 'not_split' and folder_name is None:
nustar_folder = save_dir + self.obs_id + self.fpm + '_' + self.chu_state + '_nustar_folder'
elif folder_name is not None:
nustar_folder = folder_name
else:
nustar_folder = save_dir + self.obs_id + self.fpm + '_nustar_folder'
# Create target Directory if don't exist
if not os.path.exists(nustar_folder + '/'):
nustar_folder = nustar_folder + '/'
os.mkdir(nustar_folder) #make empty folder
print("Directory " , nustar_folder , " Created.", end='')
# If the folder exists and overwrite is True then replace the first one
elif os.path.exists(nustar_folder + '/') and (overwrite == True):
nustar_folder = nustar_folder + '/'
subprocess.check_output(['rm', '-r', nustar_folder]) #remove evrything in it too
os.mkdir(nustar_folder) #make empty folder
print("Replacing directory " , nustar_folder, end='')
# If the folder exists and overwrite is False then just make another file with an index
elif os.path.exists(nustar_folder + '/') and (overwrite == False):
number_exist = len(np.nonzero(['nustar_folder' in f for f in os.listdir(save_dir)])[0])
nustar_folder = nustar_folder + '(' + str(number_exist) + ')/'
os.mkdir(nustar_folder)
print("Directory " , nustar_folder , " already exists. Creating another.", end='')
self.nustar_folder = nustar_folder
# Now 'nustar_folder' is the folder things will be save into
# Start with evt file information
evt_folder = nustar_folder + 'evt_file_used/'
os.mkdir(evt_folder)
evt_list_to_save = ['evt_directory', 'evt_filename', 'obs_id', 'fpm', 'chu_state', 'energy_range',
'time_range', 'evt_data', 'evt_header', 'cleanevt']
evt_info = list(set(dir(self)) & set(evt_list_to_save))
evt_to_store = {}
for name in evt_info:
evt_to_store[name] = self.__dict__[name]
with open(evt_folder + 'evt_file_info.pickle', 'wb') as evt_save_file:
pickle.dump(evt_to_store, evt_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# hk file information
hk_folder = nustar_folder + 'hk_file_used/'
os.mkdir(hk_folder)
hk_list_to_save = ['hk_directory', 'hk_filename', 'hk_data', 'hk_header']
hk_info = list(set(dir(self)) & set(hk_list_to_save))
hk_to_store = {}
for name in hk_info:
hk_to_store[name] = self.__dict__[name]
with open(hk_folder + 'hk_file_info.pickle', 'wb') as hk_save_file:
pickle.dump(hk_to_store, hk_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Livetime info
lvt_folder = nustar_folder + 'livetime_data/'
os.mkdir(lvt_folder)
lvt_list_to_save = ['hk_times', 'hk_livetimes']
lvt_info = list(set(dir(self)) & set(lvt_list_to_save))
lvt_to_store = {}
for name in lvt_info:
lvt_to_store[name] = self.__dict__[name]
with open(lvt_folder + 'livetime_data.pickle', 'wb') as lvt_save_file:
pickle.dump(lvt_to_store, lvt_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Map info
map_folder = nustar_folder + 'map_data/'
os.mkdir(map_folder)
map_list_to_save = ['rsn_map', 'gaussian_filter', 'time_norm', 'rectangles']
map_info = list(set(dir(self)) & set(map_list_to_save))
map_to_store = {}
for name in map_info:
try:
map_to_store[name] = self.__dict__[name]
except KeyError:
map_to_store[name] = NustarDo.__dict__[name]
with open(map_folder + 'map_data.pickle', 'wb') as map_save_file:
pickle.dump(map_to_store, map_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Light curve info
lc_folder = nustar_folder + 'light_curve_data/'
os.mkdir(lc_folder)
lc_list_to_save = ['lc_times', 'lc_counts', 'lc_count_rates', 'sub_reg_lc', 'lc_livetimes']
lc_info = list(set(dir(self)) & set(lc_list_to_save))
lc_to_store = {}
for name in lc_info:
lc_to_store[name] = self.__dict__[name]
with open(lc_folder + 'light_curve_data.pickle', 'wb') as lc_save_file:
pickle.dump(lc_to_store, lc_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Can save your own stuff
if len(kwargs) > 0:
own_folder = nustar_folder
with open(own_folder + 'kwargs_data.pickle', 'wb') as own_save_file:
pickle.dump(kwargs, own_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# save the object that can be loaded back in
with open(nustar_folder + nustar_folder[:-1].split('/')[-1] + '.pickle', 'wb') as object_file:
pickle.dump(self.__dict__, object_file, protocol=pickle.HIGHEST_PROTOCOL)
self.object_file = nustar_folder + nustar_folder[:-1].split('/')[-1] + '.pickle'
print(' Now Populated.')
def load(self, object_file=None):
'''Takes the object's namespace from the save() method and loads it back in to all it's attributes.'''
if not hasattr(self, 'object_file') and object_file is None:
print('\'object_file\' attribute and input to this function are both \'None\', please provide one. \n Note: the input for this method takes priority.')
return
object_file = object_file if (object_file is not None) else self.object_file
with open(object_file, "rb") as input_file:
self.__dict__ = pickle.load(input_file)
def shift(evt_data, pix_xshift=None, pix_yshift=None):
if pix_xshift != None:
for X in evt_data:
X['X'] = X['X'] + pix_xshift
if pix_yshift != None:
for Y in evt_data:
Y['Y'] = Y['Y'] + pix_yshift
return evt_data
def nustars_synth_count(temp_response_dataxy, plasma_temp, plasma_em, source_area, errors=None, Tresp_syserror=0, log_data=False):
"""Takes data for a channel's temperature response, plasma temperature and emission measure and area of source and
returns the expected DN/s per pixel.
*** Check output and make sure your units work ***
Parameters
----------
temp_response_dataxy : dict
The x and y data for the temperature response of the channel of interest, e.g. {'x':[...], 'y':[...]}.
plasma_temp : float
Temperature of the response you want in MK.
plasma_em : float
Volumetric emission measure of the plasma in cm^-3.
(If you have column emission measure, i.e. cm^-5, then set source_area=1.)
source_area : float
Area of the source in cm^2.
errors : dict
A dictionary of dictionaries containing the errors on T and EM, e.g. {'T':{'+':a, '-':b},
'EM':{'+':c, '-':d}}.
Defualt: None
Tresp_syserror : float
Fractional systematic error on the temperature response, e.g. 20% error on temp_response_dataxy['y'] means Tresp_error=0.2
Default: 0
log_data : bool
Do you want the data (x and y) logged (base 10) for the interpolation?
Default: False
Returns
-------
A dictionary of floats that is the synthetic DN/s per pixel for the data given, temperature response,
temperature, and emission measure with units and errors.
"""
# find temperature response at the given plasma temperature in DN cm^5 pix^-1 s^-1
if log_data:
f = interpolate.interp1d(np.log10(temp_response_dataxy['x']), np.log10(temp_response_dataxy['y']))
temp_response = [10**f(np.log10(plasma_temp))]
else:
f = interpolate.interp1d(temp_response_dataxy['x'], temp_response_dataxy['y'])
temp_response = [f(plasma_temp)]
syn_flux = [tr * plasma_em * (1 / source_area) for tr in temp_response]
# For errors
if errors is not None:
min_T, max_T = plasma_temp - errors['T']['-'], plasma_temp + errors['T']['+']
min_EM, max_EM = plasma_em - errors['EM']['-'], plasma_em + errors['EM']['+']
e_response = []
for Ts in [min_T, max_T]:
# find temperature response at the given plasma temperature in DN cm^5 pix^-1 s^-1
r = [f(Ts)]
e_response.append(r[0])
temp_max_response = temp_response_dataxy['x'][np.argmax(temp_response_dataxy['y'])]
# what if there is a bump between central value and error range
if (e_response[0] < temp_response[0]) and (e_response[1] < temp_response[0]):
if min_T < temp_max_response < plasma_temp:
e_response[0] = np.max(temp_response_dataxy['y'])
elif plasma_temp < temp_max_response < max_T:
e_response[1] = np.max(temp_response_dataxy['y'])
min_R, max_R = e_response[0], e_response[1] #R from min_T and R from max_T
# include temperature response error
up_resp = 1 + Tresp_syserror
down_resp = 1 - Tresp_syserror
#flux from min_T(max_EM) and flux from max_T(min_EM)
min_flux, max_flux = min_R * max_EM * (1 / source_area), max_R * min_EM * (1 / source_area)
flux_range = [min_flux, max_flux]
e_response = np.array(e_response)[np.isfinite(e_response)]
flux_range = np.array(flux_range)[np.isfinite(flux_range)]
# max flux could be up_resp more, and min flux could be be down_resp more
f_err = [up_resp*np.max(flux_range) - syn_flux[0], syn_flux[0] - down_resp*np.min(flux_range)]
for n,f in enumerate(f_err):
if f < 0:
f_err[n] = np.max(f_err)
errors = {'syn_flux_err':{'+': f_err[0], '-':f_err[1]},
't_res_err':{'+': abs(up_resp*np.max(e_response) - temp_response[0]), '-':abs(temp_response[0] - down_resp*np.min(e_response))},
't_res_syserr':[Tresp_syserror*100, '%'],
'T_err':{'+': errors['T']['+'], '-':errors['T']['-']},
'EM_err':{'+': errors['EM']['+'],' -':errors['EM']['-']}}
return {'syn_flux':[syn_flux[0],'DN pix^-1 s^-1'], 't_res':[temp_response, 'DN cm^5 pix^-1 s^-1'], 'T':[plasma_temp, 'MK'], 'EM':[plasma_em, 'cm^-3'], 'errors':errors}
def timefilter_evt(file, time_range=None, save_dir=None):
"""Takes a .evt file and filters the events list to a given time range. Only for region selection, do not use directly with spectral fitting software.
Parameters
----------
file : Str
File (or directory/file) of the .evt file to be filtered by time.
time_range : list
A list of length 2 with the start and end date and time. Must be given in a specific format, e.g. time_range=['2018/09/10, 16:22:30', '2018/09/10, 16:24:30'].
Default: None
save_dir : Str
String of the directory for the filtered file to be saved.
Default: None
Returns
-------
Creates a new file file with '_tf' before the file extension (meaning time filtered) and returns the name of the new file.
"""
if time_range == None:
print('No time_range given. Nothing will be done.')
return
file_regex = re.compile(r'.\w+') # form to split up filename string
ext = file_regex.findall(file) # splits up file into all components, directories, filename, extension
if save_dir == None:
new_file_name = ''.join(ext[:-1]) + '_tf' + ext[-1] # '_tf' for time filtered
else:
new_file_name = save_dir + ext[-2] + '_tf' + ext[-1]
hdulist = fits.open(file)
evtdata=hdulist[1].data # data to be filtered
evt_in_time = NustarDo().time_filter(evtdata, tmrng=time_range) # picks events inside time range
hdulist[1].data = evt_in_time # replaces this hdu with the filtered events list
hdulist.writeto(new_file_name, overwrite=True) # saves the edited file, original stays as is
hdulist.close()
return new_file_name
def CheckGrade0ToAllGrades(evtFile, wholeRangeToo=False, saveFig=None, timeRange=None, printOut=False, shortTitle=""):
"""Takes a NuSTAR evt file and compares the grade 0 events to the events of all grades.
Adapted from: https://github.com/ianan/ns_proc_test/blob/main/test_proc_jun20_002.ipynb
Parameters
----------
evtFile : str
The .evt file.
wholeRangeToo : Bool
If you want to plot the whole energy range in a second plot, next to the one ranging from
1.6--10 keV, set thi to True.
Default: False
saveFig : str
If you want to save the figure made as a PDF then set this to a string of the save name.
Defualt: None
timeRange : list, 2 strings
If you only want a certain time range of the total file's spectrum to be plotted, e.g.
["%Y/%m/%d, %H:%M:%S", "%Y/%m/%d, %H:%M:%S"].
Defualt: None
printOut : Bool
If you want to print out the output nicely(-ish) set this to True.
Default: False
shortTitle : Str
Add a quick title to help keep track of the plots
Default: ""
Returns
-------
Dictionary containing the file name used ["file"], the time range of the file ["fileTimeRange"],
time range you asked it to plot ["timeRangeGivenToPlot"], effective exposure of full file ["eff_exp"],
ontime of full file ["ontime"], and percentage livetime ["lvtime_percent"] of full file given.
"""
# read in .pha files for grade 0 and all grades
hdulist = fits.open(evtFile)
evt_data = hdulist[1].data
evt_header = hdulist[1].header
hdulist.close()
# what is the time range of the file before filtering with time if you want
## nustar times are measured in seconds from this date
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00")
file_start = str((rel_t + timedelta(seconds=np.min(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
file_end = str((rel_t + timedelta(seconds=np.max(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
# filter evt file by time?
if type(timeRange) == list:
if len(timeRange) == 2:
evt_data = NustarDo().time_filter(evt_data, tmrng=timeRange)
# get the data
hist_gradeAll, be_gradeAll = np.histogram(evt_data['pi']*0.04+1.6,bins=np.arange(1.6,79,0.04))
# work out the grade 0 spectra as well
data_grade0 = evt_data['pi'][evt_data['grade']==0]
hist_grade0, be_grade0 = np.histogram(data_grade0*0.04+1.6,bins=np.arange(1.6,79,0.04))
# plotting info
width = 11 if wholeRangeToo else 5
columns = 2 if wholeRangeToo else 1
y_lims_spec = [1e-1, 1.1*np.max(hist_gradeAll)]
ratio = hist_gradeAll/hist_grade0
fintie_vals = np.isfinite(ratio)
y_lims_ratio = [0.95, 1.05*np.max(ratio[fintie_vals])] if wholeRangeToo else [0.95, 1.05*np.max(ratio[fintie_vals][:int((10-1.6)/0.04)])]
axes_made = []
plt.figure(figsize=(width,7))
# define subplots for close look
ax1 = plt.subplot2grid((4, columns), (0, 0), colspan=1, rowspan=3)
axes_made.append(ax1)
ax2 = plt.subplot2grid((4, columns), (3, 0), colspan=1, rowspan=1)
axes_made.append(ax2)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
ax1.plot(be_gradeAll[:-1], hist_gradeAll, drawstyle="steps-pre", label="Grade All")
ax1.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax1.set_yscale("log")
ax1.set_ylim(y_lims_spec)
ax1.set_ylabel("Counts")# s$^{-1}$ keV$^{-1}$")
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_xlim([1.6,10])
ax1.set_title("Grade 0 vs All Grades - "+shortTitle)
ax1.legend()
# axis 2: the difference between all grades and grade 0
ax2.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax2.set_ylabel("All Grades / Grade0")
ax2.set_ylim(y_lims_ratio)
ax2.set_xlim([1.6,10])
ax2.set_xlabel("Energy [keV]")
ax2.grid(axis='y')
# define subplots for whole energy range
if wholeRangeToo:
# define subplots for close look
ax3 = plt.subplot2grid((4, 2), (0, 1), colspan=1, rowspan=3)
axes_made.append(ax3)
ax4 = plt.subplot2grid((4, 2), (3, 1), colspan=1, rowspan=1)
axes_made.append(ax4)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
ax3.plot(be_gradeAll[:-1], hist_gradeAll, drawstyle="steps-pre", label="Grade All")
ax3.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax3.set_yscale("log")
ax3.set_ylim(y_lims_spec)
ax3.set_xscale("log")
plt.setp(ax3.get_xticklabels(), visible=False)
ax3.set_xlim([1.6,79])
ax3.set_title("Same But Whole E-range")
ax3.legend()
# axis 2: the difference between all grades and grade 0
ax4.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax4.set_ylim(y_lims_ratio)
ax4.set_xscale("log")
ax4.set_xlim([1.6,79])
ax4.set_xlabel("Energy [keV]")
ax4.grid(axis='y')
if type(saveFig) == str:
plt.savefig(saveFig, bbox_inches="tight")
# plt.show()
inform = {"file":evtFile,
"fileTimeRange":[file_start, file_end],
"timeRangeGivenToPlot":timeRange,
"eff_exp":evt_header['livetime'],
"ontime":evt_header['ontime'],
"lvtime_percent":100*evt_header['livetime']/evt_header['ontime']}
if printOut:
for key in inform.keys():
print(key, " : ", inform[key])
return inform, axes_made
def CheckGrade0ToAnyGrades(evtFile, grades, wholeRangeToo=False, saveFig=None, timeRange=None, printOut=False, shortTitle="", xlims=None):
"""Takes a NuSTAR evt file and compares the grade 0 events to the events of all grades.
Adapted from: https://github.com/ianan/ns_proc_test/blob/main/test_proc_jun20_002.ipynb
Parameters
----------
evtFile : str
The .evt file.
grades : list of length 1 or 2 list
A list of the lists of grades you want the grade 0 counts to be compared against. E.g. grades=[[1], [0,4]]
means that grade zero will be checked against grade 1 counts and grade 0-4 counts inclusive.
wholeRangeToo : Bool
If you want to plot the whole energy range in a second plot, next to the one ranging from
1.6--10 keV, set thi to True.
Default: False
saveFig : str
If you want to save the figure made as a PDF then set this to a string of the save name.
Defualt: None
timeRange : list, 2 strings
If you only want a certain time range of the total file's spectrum to be plotted, e.g.
["%Y/%m/%d, %H:%M:%S", "%Y/%m/%d, %H:%M:%S"].
Defualt: None
printOut : Bool
If you want to print out the output nicely(-ish) set this to True.
Default: False
shortTitle : Str
Add a quick title to help keep track of the plots
Default: ""
Returns
-------
Dictionary containing the file name used ["file"], the time range of the file ["fileTimeRange"],
time range you asked it to plot ["timeRangeGivenToPlot"], effective exposure of full file ["eff_exp"],
ontime of full file ["ontime"], percentage livetime ["lvtime_percent"] of full file given, Grade 0
plotting info, and you custom grade info too.
"""
# read in .pha files for grade 0 and all grades
hdulist = fits.open(evtFile)
evt_data = hdulist[1].data
evt_header = hdulist[1].header
hdulist.close()
# what is the time range of the file before filtering with time if you want
## nustar times are measured in seconds from this date
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00")
file_start = str((rel_t + timedelta(seconds=np.min(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
file_end = str((rel_t + timedelta(seconds=np.max(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
# filter evt file by time?
if type(timeRange) == list:
if len(timeRange) == 2:
evt_data = NustarDo().time_filter(evt_data, tmrng=timeRange)
# work out the grade 0 spectra as well
data_grade0 = evt_data['pi'][evt_data['grade']==0]
hist_grade0, be_grade0 = np.histogram(data_grade0*0.04+1.6,bins=np.arange(1.6,79,0.04))
other_grades = {}
ratios = []
max_ratios, min_ratios = [], []
# get the data
for g in grades:
if len(g)==1:
data_grade = evt_data['pi'][evt_data['grade']==g[0]]
g_str = "Grade "+str(g[0])
other_grades[g_str] = np.histogram(data_grade*0.04+1.6,bins=np.arange(1.6,79,0.04))
else:
data_grade = evt_data['pi'][(evt_data['grade']>=g[0]) & (evt_data['grade']<=g[1])]
g_str = "Grade "+str(g[0])+"-"+str(g[1])
other_grades[g_str] = np.histogram(data_grade*0.04+1.6,bins=np.arange(1.6,79,0.04))
ratio = other_grades[g_str][0]/hist_grade0
ratios.append(ratio)
maximum = np.max(ratio[np.isfinite(ratio)]) if wholeRangeToo else np.max(ratio[np.isfinite(ratio)][:int((10-1.6)/0.04)])
minimum = np.min(ratio[np.isfinite(ratio)]) if wholeRangeToo else np.min(ratio[np.isfinite(ratio)][:int((10-1.6)/0.04)])
max_ratios.append(maximum)
min_ratios.append(minimum)
# plotting info
width = 11 if wholeRangeToo else 5
columns = 2 if wholeRangeToo else 1
y_lims_spec = [1e-1, 1.1*np.max(hist_grade0)]
y_lims_ratio = [0.95*np.min(min_ratios), 1.05*np.max(max_ratios)]
axes_made = []
plt.figure(figsize=(width,7))
# define subplots for close look
ax1 = plt.subplot2grid((4, columns), (0, 0), colspan=1, rowspan=3)
axes_made.append(ax1)
ax2 = plt.subplot2grid((4, columns), (3, 0), colspan=1, rowspan=1)
axes_made.append(ax2)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
for key, r in zip(other_grades.keys(), ratios):
ax1.plot(other_grades[key][1][:-1], other_grades[key][0], drawstyle="steps-pre", label=key)
ax2.plot(other_grades[key][1][:-1], r, drawstyle="steps-pre")
ax1.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax1.set_yscale("log")
ax1.set_ylim(y_lims_spec)
ax1.set_ylabel("Counts")# s$^{-1}$ keV$^{-1}$")
plt.setp(ax1.get_xticklabels(), visible=False)
xlims = xlims if type(xlims)!=type(None) else [1.6,10]
ax1.set_xlim(xlims)
ax1.set_title("Grade 0 vs Chosen Grades - "+shortTitle)
ax1.legend()
# axis 2: the difference between all grades and grade 0
# ax2.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax2.set_ylabel("Chosen Grades / Grade0")
ax2.set_ylim(y_lims_ratio)
ax2.set_xlim(xlims)
ax2.set_xlabel("Energy [keV]")
ax2.grid(axis='y')
# define subplots for whole energy range
if wholeRangeToo:
# define subplots for close look
ax3 = plt.subplot2grid((4, 2), (0, 1), colspan=1, rowspan=3)
axes_made.append(ax3)
ax4 = plt.subplot2grid((4, 2), (3, 1), colspan=1, rowspan=1)
axes_made.append(ax4)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
for key, r in zip(other_grades.keys(), ratios):
ax3.plot(other_grades[key][1][:-1], other_grades[key][0], drawstyle="steps-pre", label=key)
ax4.plot(other_grades[key][1][:-1], r, drawstyle="steps-pre")
ax3.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax3.set_yscale("log")
ax3.set_ylim(y_lims_spec)
ax3.set_xscale("log")
plt.setp(ax3.get_xticklabels(), visible=False)
ax3.set_xlim([1.6,79])
ax3.set_title("Same But Whole E-range")
ax3.legend()
# axis 2: the difference between all grades and grade 0
# ax4.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax4.set_ylim(y_lims_ratio)
ax4.set_xscale("log")
ax4.set_xlim([1.6,79])
ax4.set_xlabel("Energy [keV]")
ax4.grid(axis='y')
if type(saveFig) == str:
plt.savefig(saveFig, bbox_inches="tight")
# plt.show()
inform = {"file":evtFile,
"fileTimeRange":[file_start, file_end],
"timeRangeGivenToPlot":timeRange,
"eff_exp":evt_header['livetime'],
"ontime":evt_header['ontime'],
"lvtime_percent":100*evt_header['livetime']/evt_header['ontime'],
"Grade 0":[hist_grade0, be_grade0],
**other_grades}
if printOut:
for key in inform.keys():
print(key, " : ", inform[key])
return inform, axes_made
## functions to help find the FoV rotation
def collectSameXs(rawx, rawy, solx, soly):
""" Returns a dictionary where each column is given a unique entry with a list
of the rows that correspond to that one column from the evt file. Also saves the
solar coordinates for that raw coordinate column with the rawx column key+"map2sol".
Parameters
----------
rawx, rawy : lists
Raw coordinates of the evt counts.
solx, soly : lists
Solar coordinates of the sunpos evt counts.
Returns
-------
A dictionary.
Examples
--------
rawx, rawy = [1,2,3,3], [7,8,4,9]
solx, soly = [101, 102, 103, 104], [250, 252, 254, 256]
collectSameXs(rawx, rawy, solx, soly)
>>> {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
"""
output = {}
for c,xs in enumerate(rawx):
if str(xs) not in output:
output[str(xs)] = [rawy[c]]
output[str(xs)+"map2sol"] = [[solx[c], soly[c]]]
else:
output[str(xs)].append(rawy[c])
output[str(xs)+"map2sol"].append([solx[c], soly[c]])
assert len([solx[c], soly[c]])==2
return output
def minRowInCol(columns):
""" Returns a dictionary where each key is the solar X position of each raw
coordinate chosen (edges between det0&3 and 1&2) with its value being the
solar Y coordinate.
Parameters
----------
columns : dictionary
Information of the raw and solar coordinates of the counts in order to
each other.
Returns
-------
A dictionary.
Examples
--------
cols = {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
minRowInCol(cols)
>>> {"101":250, "102":252, "103":254}
"""
output_sol = {}
for key in columns.keys():
if "map2sol" not in key:
# find the corresponding solar coords to the minimum rawy
sol_coords = columns[key+"map2sol"][np.argmin(columns[key])]
# now have the solarX key with the solarY as its value
assert len(sol_coords)==2
output_sol[str(sol_coords[0])] = sol_coords[1]
return output_sol
def maxRowInCol(columns):
""" Returns a dictionary where each key is the solar X position of each raw
coordinate chosen (edges between det0&3 and 1&2) with its value being the
solar Y coordinate.
Parameters
----------
columns : dictionary
Information of the raw and solar coordinates of the counts in order to
each other.
Returns
-------
A dictionary.
Examples
--------
cols = {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
minRowInCol(cols)
>>> {"101":250, "102":252, "104":256}
"""
output_sol = {}
for key in columns.keys():
if "map2sol" not in key:
# find the corresponding solar coords to the maximum rawy
sol_coords = columns[key+"map2sol"][np.argmax(columns[key])]
# now have the solarX key with the solarY as its value
output_sol[str(sol_coords[0])] = sol_coords[1]
return output_sol
def getXandY(colsAndRows):
""" Returns solar X and Y coordinates.
Parameters
----------
colsAndRows : dictionary
Keys as the solar X and values of solar Y coordinates.
Returns
-------
Two numpy arrays.
Examples
--------
colsAndRows = {"101":250, "102":252, "104":256}
getXandY(colsAndRows)
>>> [101, 102, 104], [250, 252, 256]
"""
return np.array([int(c) for c in list(colsAndRows.keys())]), np.array(list(colsAndRows.values()))
def getDegrees(grad):
""" Returns angle of rotation in degrees.
Parameters
----------
grad : float
Gradient.
Returns
-------
Angle in degrees.
Examples
--------
grad = 1
getDegrees(grad)
>>> 45
"""
return np.arctan(grad)*(180/np.pi)
def straightLine(x, m, c):
""" A straight line model.
Parameters
----------
x : numpy list
X positions.
m : float
Gradient.
c : float
Y-intercept.
Returns
-------
Ys for a straight line.
Examples
--------
x, m, c = [1, 2], 0.25, 1
straightLine(x, m, c)
>>> [1.25, 1.5]
"""
return m*x + c
def getAngle_plot(rawx, rawy, solx, soly, det, **kwargs):
""" Returns the rotation of the NuSTAR FoV from the gradient of the edges between
det0&3 and 1&2 for whatever detector(s) you give it.
Parameters
----------
rawx, rawy : lists
Raw coordinates of the evt counts.
solx, soly : lists
Solar coordinates of the sunpos evt counts.
det : int
The detector for the counts (0--3).
**kwargs : Can pass an axis to it.
Returns
-------
A float of the rotation from "North" in degrees where anticlockwise is positive.
This assumes the rotation is between 90 and -90 degrees.
Examples
--------
fig, axs = plt.subplots(2,2, figsize=(14,10))
# get orientation from the nustar_swguide.pdf, Figure 3
gradient0 = getAngle_plot(rawx0, rawy0, solx0, soly0, 0, axes=axs[0][0])
gradient1 = getAngle_plot(rawx1, rawy1, solx1, soly1, 1, axes=axs[0][1])
gradient2 = getAngle_plot(rawx2, rawy2, solx2, soly2, 2, axes=axs[1][1])
gradient3 = getAngle_plot(rawx3, rawy3, solx3, soly3, 3, axes=axs[1][0])
plt.show()
"""
k = {"axes":plt}
for kw in kwargs:
k[kw] = kwargs[kw]
if det==0:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==1:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==2:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==3:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
# working with rawx and y to make sure using correct edge then find the
# corresponding entries in solar coords
aAndY = getXandY(m_row_per_col)
x, y = aAndY[0], aAndY[1]
xlim, ylim = [np.min(x)-5, np.max(x)+5], [np.min(y)-5, np.max(y)+5]
#if det in [0, 1]:
# x = x[y>np.median(y)]
# y = y[y>np.median(y)]
#elif det in [2, 3]:
# x = x[y<np.median(y)]
# y = y[y<np.median(y)]
popt, pcov = curve_fit(straightLine, x, y, p0=[0, np.mean(y)])
k["axes"].plot(x, y, '.')
k["axes"].plot(x, straightLine(x, *popt))
if k["axes"] != plt:
k["axes"].set_ylim(ylim)
k["axes"].set_xlim(xlim)
k["axes"].set_ylabel("Solar-Y")
k["axes"].set_xlabel("Solar-X")
else:
k["axes"].ylim(ylim)
k["axes"].xlim(xlim)
k["axes"].ylabel("Solar-Y")
k["axes"].xlabel("Solar-X")
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2+5, "Grad: "+str(popt[0]))
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2, "Angle: "+str(np.arctan(popt[0]))+" rad")
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2-5, "Angle: "+str(np.arctan(popt[0])*(180/np.pi))+" deg")
k["axes"].text(np.max(x)*0.99, ylim[0]*1.001, "DET: "+str(det), fontweight="bold")
return np.arctan(popt[0])*(180/np.pi) | 2.09375 | 2 |
setup.py | edamontology/edam2json | 2 | 12758629 | # coding: utf-8
import os, sys
from setuptools import setup, find_packages
NAME = "edam2json"
VERSION = "1.0dev1"
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, 'README.md')
readme = open(README).read()
REQUIRES = ["rdflib", "rdflib-jsonld"]
setup(
name=NAME,
version=VERSION,
description="edam2json automates the export of the EDAM ontology to various JSON-based formats",
author='<NAME>',
author_email="<EMAIL>",
url="https://github.com/edamontology/edam2json",
packages=find_packages(),
install_requires=REQUIRES,
license="MIT",
keywords=["Bioinformatics", "OWL", "JSON", "JSON-LD", "Ontology"],
entry_points={
'console_scripts': [
'edam2json=edam2json.__main__:main',
]
}
)
| 1.523438 | 2 |
DDPG/utils.py | sergfer26/Quadcopter-Deep-RL | 0 | 12758630 | <reponame>sergfer26/Quadcopter-Deep-RL
import numpy as np
import gym
import random
from numpy import floor
from collections import deque
from .env.quadcopter_env import QuadcopterEnv
# Ornstein-Ulhenbeck Process
# Taken from #https://github.com/vitchyr/rlkit/blob/master/rlkit/exploration_strategies/ou_strategy.py
class OUNoise(object):
def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=1.0, min_sigma=0.1, decay_period=1e5):
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self.max_sigma = max_sigma
self.min_sigma = min_sigma
self.decay_period = decay_period
self.action_dim = action_space.shape[0]
self.low = action_space.low
self.high = action_space.high
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)
self.state = x + dx
return self.state
def get_action(self, action, t=0):
ou_state = self.evolve_state()
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)
return np.clip(action + ou_state, self.low, self.high)
# https://github.com/openai/gym/blob/master/gym/core.py
class NormalizedEnv(QuadcopterEnv):
""" Wrap action """
def __init__(self, env):
QuadcopterEnv.__init__(self)
def _action(self, action):
act_k = (self.action_space.high - self.action_space.low)/ 2.
act_b = (self.action_space.high + self.action_space.low)/ 2.
return act_k * action + act_b
def _reverse_action(self, action):
act_k_inv = 2./(self.action_space.high - self.action_space.low)
act_b = (self.action_space.high + self.action_space.low)/ 2.
return act_k_inv * (action - act_b)
class Memory:
def __init__(self, max_size):
self.max_size = max_size
self.buffer = deque(maxlen=max_size)
def push(self, state, action, reward, next_state, done):
experience = (state, action, np.array([reward]), next_state, done)
self.buffer.append(experience)
def sample(self, batch_size):
state_batch = []
action_batch = []
reward_batch = []
next_state_batch = []
done_batch = []
batch = random.sample(self.buffer, batch_size)
for experience in batch:
state, action, reward, next_state, done = experience
state_batch.append(state)
action_batch.append(action)
reward_batch.append(reward)
next_state_batch.append(next_state)
done_batch.append(done)
return state_batch, action_batch, reward_batch, next_state_batch, done_batch
def __len__(self):
return len(self.buffer)
def remove(self):
n = int(floor(len(self.buffer)/2))
for _ in range(n):
i = np.random.randint(n)
del self.buffer[i]
print('Se elimino el 50% de la infomacion!!')
| 2.53125 | 3 |
stores/apps/users/admin.py | diassor/CollectorCity-Market-Place | 135 | 12758631 | from models import *
from django.contrib import admin
admin.site.register(Profile)
admin.site.register(EmailVerify)
| 1.289063 | 1 |
gantk2/args/utils.py | MLIA/gantk2 | 11 | 12758632 | # Copyright 2021 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configargparse
def create_argparser():
p = configargparse.ArgumentParser(
prog='NTK GANs',
description='NTK GANs.',
formatter_class=configargparse.ArgumentDefaultsHelpFormatter
)
return p
def create_default_parser_wrapper(create_args_fn):
def create_parser(p=None):
if p is None:
p = create_argparser()
return create_args_fn(p)
return create_parser
def get_arg_group(p, title):
groups = [g for g in p._action_groups if g.title == title]
if len(groups) == 0:
raise Warning('No groups with this title')
elif len(groups) > 1:
raise Warning('More than 1 group with the same title')
return groups[0]
| 2.1875 | 2 |
app/backend/mails/apps.py | NicholasCF/meridien | 1 | 12758633 | from django.apps import AppConfig
class MailsConfig(AppConfig):
name = 'mails'
| 1.265625 | 1 |
src/chapter2/exercise7.py | Group12BSE1/BSE-2021 | 0 | 12758634 | <reponame>Group12BSE1/BSE-2021
#this is to make change from dollar bills
change=float(input("Enter an amount to make change for :"))
print("Your change is..")
print(int(change//20), "twenties")
change=change % 20
print(int(change//10), "tens")
change=change % 10
print(int(change//5), "fives")
change=change % 5
print(int(change//1), "ones")
change=change % 1
print(int(change//0.25), "quarters")
change=change % 0.25
print(int(change//0.1),"dimes")
change=change % 0.1
print(int(change//0.05), "nickels")
change=change % 0.05
print(int(change//0.01), "pennies")
change=change % 0.01
| 4.09375 | 4 |
itdagene/graphql/object_types.py | itdagene-ntnu/itdagene | 9 | 12758635 | import graphene
from django.db.models import Q
from graphene import relay
from graphene_django import DjangoObjectType
from graphene_django.registry import Registry
from itdagene.app.career.models import Joblisting as ItdageneJoblisting
from itdagene.app.career.models import Town as ItdageneTown
from itdagene.app.company.models import Company as ItdageneCompany
from itdagene.app.company.models import KeyInformation as ItdageneKeyInformation
from itdagene.app.events.models import Event as ItdageneEvent
from itdagene.app.pages.models import Page as ItdagenePage
from itdagene.app.stands.models import DigitalStand as ItdageneStand
from itdagene.core.models import Preference
from itdagene.core.models import User as ItdageneUser
from itdagene.graphql.types import CountableConnectionBase, OpengraphMetadata
from itdagene.graphql.utils import resize_image
class Town(DjangoObjectType):
class Meta:
model = ItdageneTown
interfaces = (relay.Node,)
description = "Town entity"
only_fields = ("id", "name")
class Joblisting(DjangoObjectType):
towns = graphene.NonNull(graphene.List(graphene.NonNull(Town)))
class Meta:
model = ItdageneJoblisting
connection_class = CountableConnectionBase
# filter_fields = [
# 'type',
# 'to_year',
# 'from_year',
# ]
description = "Joblisting entity"
only_fields = (
"id",
"towns",
"company",
"title",
"type",
"description",
"image",
"deadline",
"from_year",
"to_year",
"url",
"date_created",
"slug",
"video_url",
"is_summerjob_marathon",
)
interfaces = (relay.Node, OpengraphMetadata)
def resolve_towns(self, info, **kwargs):
return self.towns.all()
def resolve_sharing_image(self, info, **kwargs):
if not self.company.logo:
return None
return resize_image(self.company.logo, width=1200, height=630)
def resolve_company(self, info, **kwargs):
return info.context.loaders.Companyloader.load(self.company_id)
@classmethod
def get_queryset(cls):
return ItdageneJoblisting.objects.all()
@classmethod
def get_node(cls, context, id):
try:
return ItdageneJoblisting.objects.get(pk=id)
except Exception as e:
print(e)
return None
class Page(DjangoObjectType):
class Meta:
model = ItdagenePage
interfaces = (relay.Node, OpengraphMetadata)
description = "(info)Page entity"
only_fields = (
"slug",
"title",
"language",
"menu",
"content",
"ingress",
"date_saved",
"date_created",
)
def resolve_description(self, info, **kwargs):
return self.ingress
@classmethod
def get_queryset(cls):
return ItdagenePage.objects.filter(need_auth=False, active=True)
class User(DjangoObjectType):
full_name = graphene.String()
role = graphene.String()
photo = graphene.Field(graphene.String, height=graphene.Int(), width=graphene.Int())
class Meta:
model = ItdageneUser
interfaces = (relay.Node,)
description = "User entity"
only_fields = ("id", "firstName", "lastName", "email", "year", "role")
def resolve_full_name(self, info):
return self.get_full_name()
def resolve_role(self, info):
return self.role()
def resolve_photo(self, info, **kwargs):
return resize_image(self.photo, format="JPEG", quality=80, **kwargs)
class Event(DjangoObjectType):
class Meta:
model = ItdageneEvent
description = "Small event type"
only_fields = (
"id",
"title",
"time_start",
"time_end",
"description",
"type",
"location",
"company",
"uses_tickets",
"max_participants",
"date",
)
interfaces = (relay.Node,)
@classmethod
def get_queryset(cls):
"""
When fetching all events, we do not want stand events,
unless they are of the type 'promoted stand event' (7)
"""
return ItdageneEvent.objects.filter(Q(stand=None) | Q(type=7))
class Stand(DjangoObjectType):
events = graphene.List(
graphene.NonNull(Event), description="The stand's associated events"
)
class Meta:
model = ItdageneStand
description = "A company stand"
only_fields = (
"slug",
"description",
"livestream_url",
"qa_url",
"chat_url",
"active",
"company",
)
interfaces = (relay.Node,)
def resolve_company(self, info, **kwargs):
return info.context.loaders.Companyloader.load(self.company_id)
def resolve_events(self, info, **kwargs):
return ItdageneEvent.objects.filter(stand=self)
@classmethod
def get_queryset(cls):
return ItdageneStand.objects.filter(active=True)
class KeyInformation(DjangoObjectType):
class Meta:
model = ItdageneKeyInformation
interfaces = (relay.Node,)
description = "Key information about a company"
only_fields = ("id", "name", "value")
class Company(DjangoObjectType):
logo = graphene.Field(
graphene.String,
height=graphene.Int(),
width=graphene.Int(),
padding=graphene.Boolean(),
)
key_information = graphene.List(
graphene.NonNull(KeyInformation),
description="Key information about the company.",
)
class Meta:
model = ItdageneCompany
description = "Company entity"
only_fields = (
"id",
"name",
"url",
"logo",
"description",
"is_collabrator",
"joblistings",
)
interfaces = (relay.Node,)
@classmethod
def get_queryset(cls):
return ItdageneCompany.get_last_day() | ItdageneCompany.get_first_day()
@classmethod
def get_node(cls, context, id):
try:
return cls.get_queryset().get(pk=id)
except Exception as e:
print(e)
return None
def resolve_logo(self, info, **kwargs):
return resize_image(self.logo, **kwargs)
def resolve_key_information(self, info, **kwargs):
return ItdageneKeyInformation.objects.filter(company=self)
def resolve_stand(self, info, **kwargs):
return Stand.get_queryset().filter(company=self).first()
class MainCollaborator(Company):
class Meta:
model = ItdageneCompany
description = "Main collaborator company entity"
only_fields = (
"id",
"name",
"url",
"logo",
"description",
"joblistings",
"intro",
"video",
"poster",
)
interfaces = (relay.Node,)
# This has to be added to avoid GraphQL using this definiton for all company references
registry = Registry()
intro = graphene.String()
video = graphene.String()
poster = graphene.String()
def resolve_intro(self, info):
return Preference.current_preference().hsp_intro
def resolve_video(self, info):
return Preference.current_preference().hsp_video
def resolve_poster(self, info):
return Preference.current_preference().hsp_poster
class MetaData(DjangoObjectType):
companies_first_day = graphene.List(graphene.NonNull(Company))
companies_last_day = graphene.List(graphene.NonNull(Company))
collaborators = graphene.List(
graphene.NonNull(Company),
description="List the collaborators, not including the main collaborator",
)
main_collaborator = graphene.Field(
MainCollaborator, description="Main collaborator for current years event"
)
board_members = graphene.NonNull(graphene.List(graphene.NonNull(User)))
interest_form = graphene.String()
def resolve_main_collaborator(self, info):
if self.view_hsp:
return ItdageneCompany.get_main_collaborator()
def resolve_companies_first_day(self, info):
if self.view_companies:
return ItdageneCompany.get_first_day()
def resolve_companies_last_day(self, info):
if self.view_companies:
return ItdageneCompany.get_last_day()
def resolve_collaborators(self, info):
if self.view_sp:
return ItdageneCompany.get_collaborators()
def resolve_board_members(self, info):
return (
ItdageneUser.objects.filter(year=self.year, is_active=True)
.all()
.prefetch_related("groups")
)
def resolve_interest_form(self, info):
if self.show_interest_form:
return self.interest_form_url
class Meta:
model = Preference
description = "Metadata about the current years itdagene"
only_fields = (
"id",
"start_date",
"end_date",
"year",
"nr_of_stands",
"companies_first_day" "companies_last_day",
"collaborators",
"main_collaborator",
"board_members",
"interest_form",
)
interfaces = (relay.Node,)
class SearchResult(graphene.Union):
class Meta:
types = (Joblisting, Company, Page, Town)
| 1.960938 | 2 |
pdf_api/test.py | Rob192/pdf_api | 0 | 12758636 | from pathlib import Path
import os
text_file = Path(os.getcwd()) / 'pdf_api' /'api_uploaded_files' / 'test.txt'
with open(text_file, 'rb') as f:
output = f.read() | 2.53125 | 3 |
scripts/loading/ontology/edam.py | dougli1sqrd/SGDBackend-Nex2 | 5 | 12758637 | import urllib.request, urllib.parse, urllib.error
from datetime import datetime
import sys
import os
import importlib
importlib.reload(sys) # Reload does the trick!
from src.models import Source, Edam, EdamUrl, EdamAlia, EdamRelation, Ro
from scripts.loading.database_session import get_session
from scripts.loading.ontology import read_owl
__author__ = 'sweng66'
## Created on May 2017
## This script is used to update EDAM ontology in NEX2.
log_file = 'scripts/loading/ontology/logs/edam.log'
ontology = 'EDAM'
src = 'EDAM'
CREATED_BY = os.environ['DEFAULT_USER']
def load_ontology(ontology_file):
nex_session = get_session()
source_to_id = dict([(x.display_name, x.source_id) for x in nex_session.query(Source).all()])
edamid_to_edam = dict([(x.edamid, x) for x in nex_session.query(Edam).all()])
term_to_ro_id = dict([(x.display_name, x.ro_id) for x in nex_session.query(Ro).all()])
edam_id_to_alias = {}
for x in nex_session.query(EdamAlia).all():
aliases = []
if x.edam_id in edam_id_to_alias:
aliases = edam_id_to_alias[x.edam_id]
aliases.append((x.display_name, x.alias_type))
edam_id_to_alias[x.edam_id] = aliases
edam_id_to_parent = {}
for x in nex_session.query(EdamRelation).all():
parents = []
if x.child_id in edam_id_to_parent:
parents = edam_id_to_parent[x.child_id]
parents.append(x.parent_id)
edam_id_to_parent[x.child_id] = parents
####################################
fw = open(log_file, "w")
is_sgd_term = {}
data = read_owl(ontology_file, ontology)
[update_log, to_delete_list] = load_new_data(nex_session, data,
source_to_id,
edamid_to_edam,
term_to_ro_id['is a'],
edam_id_to_alias,
edam_id_to_parent,
fw)
write_summary_and_send_email(fw, update_log, to_delete_list)
nex_session.close()
fw.close()
def load_new_data(nex_session, data, source_to_id, edamid_to_edam, ro_id, edam_id_to_alias, edam_id_to_parent, fw):
active_edamid = []
update_log = {}
for count_name in ['updated', 'added', 'deleted']:
update_log[count_name] = 0
relation_just_added = {}
alias_just_added = {}
for x in data:
edam_id = None
if "EDAM:" not in x['id']:
continue
if x['id'] in edamid_to_edam:
## in database
y = edamid_to_edam[x['id']]
edam_id = y.edam_id
if y.is_obsolete is True:
y.is_obsolete = '0'
nex_session.add(y)
nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
fw.write("The is_obsolete for " + x['id'] + " has been updated from " + y.is_obsolete + " to " + 'False' + "\n")
if x['term'] != y.display_name:
## update term
fw.write("The display_name for " + x['id'] + " has been updated from " + y.display_name + " to " + x['term'] + "\n")
y.display_name = x['term']
nex_session.add(y)
nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
print("UPDATED: ", y.edamid, y.display_name, x['term'])
# else:
# print "SAME: ", y.edamid, y.display_name, x['definition'], x['aliases'], x['parents']
active_edamid.append(x['id'])
else:
fw.write("NEW entry = " + x['id'] + " " + x['term'] + "\n")
this_x = Edam(source_id = source_to_id[src],
format_name = x['id'],
edamid = x['id'],
display_name = x['term'],
edam_namespace = x['namespace'],
description = x['definition'],
obj_url = '/edam/' + x['id'],
is_obsolete = '0',
created_by = CREATED_BY)
nex_session.add(this_x)
nex_session.flush()
edam_id = this_x.edam_id
update_log['added'] = update_log['added'] + 1
# print "NEW: ", x['id'], x['term'], x['definition']
## add three URLs
link_id = x['id'].replace(':', '_')
insert_url(nex_session, source_to_id['Ontobee'], 'Ontobee', edam_id,
'http://www.ontobee.org/ontology/EDAM?iri=http://purl.obolibrary.org/obo/'+link_id,
fw)
insert_url(nex_session, source_to_id['BioPortal'], 'BioPortal', edam_id,
'http://bioportal.bioontology.org/ontologies/EDAM/?p=classes&conceptid=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id,
fw)
insert_url(nex_session, source_to_id['OLS'], 'OLS', edam_id,
'http://www.ebi.ac.uk/ols/ontologies/edam/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id,
fw)
## add RELATIONS
for parent_edamid in x['parents']:
parent = edamid_to_edam.get(parent_edamid)
if parent is not None:
parent_id = parent.edam_id
child_id = edam_id
insert_relation(nex_session, source_to_id[src], parent_id,
child_id, ro_id, relation_just_added, fw)
## add ALIASES
for (alias, alias_type) in x['aliases']:
insert_alias(nex_session, source_to_id[src], alias,
alias_type, edam_id, alias_just_added, fw)
## update RELATIONS
# print x['id'], "RELATION", edam_id_to_parent.get(edam_id), x['parents']
update_relations(nex_session, edam_id, edam_id_to_parent.get(edam_id), x['parents'],
source_to_id[src], edamid_to_edam, ro_id, relation_just_added, fw)
## update ALIASES
# print x['id'], "ALIAS", edam_id_to_alias.get(edam_id), x['aliases']
update_aliases(nex_session, edam_id, edam_id_to_alias.get(edam_id), x['aliases'],
source_to_id[src], edamid_to_edam, alias_just_added, fw)
to_delete = []
for edamid in edamid_to_edam:
if edamid in active_edamid:
continue
x = edamid_to_edam[edamid]
if edamid.startswith('NTR'):
continue
to_delete.append((edamid, x.display_name))
if x.is_obsolete is False:
x.is_obsolete = '1'
# nex_session.add(x)
# nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
fw.write("The is_obsolete for " + x.edamid + " has been updated from " + x.is_obsolete +" to " + 'True' + "\n")
nex_session.commit()
return [update_log, to_delete]
def update_aliases(nex_session, edam_id, curr_aliases, new_aliases, source_id, edamid_to_edam, alias_just_added, fw):
# print "ALIAS: ", curr_aliases, new_aliases
# return
if curr_aliases is None:
curr_aliases = []
for (alias, type) in new_aliases:
if (alias, type) not in curr_aliases:
insert_alias(nex_session, source_id, alias, type, edam_id, alias_just_added, fw)
for (alias, type) in curr_aliases:
if(alias, type) not in new_aliases:
## remove the old one
to_delete = nex_session.query(EdamAlia).filter_by(edam_id=edam_id, display_name=alias, alias_type=type).first()
nex_session.delete(to_delete)
fw.write("The old alias = " + alias + " has been deleted for edam_id = " + str(edam_id) + "\n")
def update_relations(nex_session, child_id, curr_parent_ids, new_parents, source_id, edamid_to_edam, ro_id, relation_just_added, fw):
# print "RELATION: ", curr_parent_ids, new_parents
# return
if curr_parent_ids is None:
curr_parent_ids = []
new_parent_ids = []
for parent_edamid in new_parents:
parent = edamid_to_edam.get(parent_edamid)
if parent is not None:
parent_id = parent.edam_id
new_parent_ids.append(parent_id)
if parent_id not in curr_parent_ids:
insert_relation(nex_session, source_id, parent_id, child_id,
ro_id, relation_just_added, fw)
for parent_id in curr_parent_ids:
if parent_id not in new_parent_ids:
## remove the old one
to_delete = nex_session.query(EdamRelation).filter_by(child_id=child_id, parent_id=parent_id).first()
nex_session.delete(to_delete)
fw.write("The old parent: parent_id = " + str(parent_id) + " has been deleted for edam_id = " + str(child_id)+ "\n")
def insert_url(nex_session, source_id, display_name, edam_id, url, fw):
# print url
# return
x = EdamUrl(display_name = display_name,
url_type = display_name,
source_id = source_id,
edam_id = edam_id,
obj_url = url,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new URL: " + url + " for edam_id = " + str(edam_id) + "\n")
def insert_alias(nex_session, source_id, display_name, alias_type, edam_id, alias_just_added, fw):
# print display_name
# return
if (edam_id, display_name, alias_type) in alias_just_added:
return
alias_just_added[(edam_id, display_name, alias_type)] = 1
x = EdamAlia(display_name = display_name,
alias_type = alias_type,
source_id = source_id,
edam_id = edam_id,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new ALIAS: " + display_name + " for edam_id = " + str(edam_id) + "\n")
def insert_relation(nex_session, source_id, parent_id, child_id, ro_id, relation_just_added, fw):
# print "PARENT/CHILD: ", parent_id, child_id
# return
if (parent_id, child_id) in relation_just_added:
return
relation_just_added[(parent_id, child_id)] = 1
x = EdamRelation(parent_id = parent_id,
child_id = child_id,
source_id = source_id,
ro_id = ro_id,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new PARENT: parent_id = " + str(parent_id) + " for edam_id = " + str(child_id) + "\n")
def write_summary_and_send_email(fw, update_log, to_delete_list):
summary = "Updated: " + str(update_log['updated'])+ "\n"
summary = summary + "Added: " + str(update_log['added']) + "\n"
if len(to_delete_list) > 0:
summary = summary + "The following EDAM terms are not in the current release:\n"
for (edamid, term) in to_delete_list:
summary = summary + "\t" + edamid + " " + term + "\n"
fw.write(summary)
print(summary)
if __name__ == "__main__":
# http://edamontology.org/EDAM_1.20.owl
url_path = "http://edamontology.org/"
owl_file = "EDAM_1.20.owl"
urllib.request.urlretrieve(url_path + owl_file, owl_file)
load_ontology(owl_file)
| 2.140625 | 2 |
msspec/read/resolution.py | MatteoLacki/msspec | 0 | 12758638 | import re
pattern = re.compile(r"(\d+([.,]\d*)?|([.,]\d*))([a-zA-Z]+)")
def parse(x = '0.0Da'):
"""Parse a resolution string.
Args:
x (str or float): A string with resolution, like '5ppm', '4mmu', '.02Da'.
Defaults to 'ppm' (i.e. when given a float, treat is a parts per million value).
"""
try:
v = float(x)
unit = 'ppm'
except ValueError:
x = x.replace(" ","")
g = re.match(pattern, x)
unit = g[4].lower()
assert unit in ('da', 'th', 'mmu', 'ppm'), "Wrong or missing unit."
v = float(g[1].replace(',','.'))
x_type = 'abs'
if unit == 'mmu':
v /= 1000.
if unit == 'ppm':
x_type = 'rel'
v *= 1e-6
if v == 0:
print("WARNING: infinite resolution. God mode on?")
return v, x_type
def test_parse():
assert parse("0.05Da") == (0.05, 'abs')
assert parse("0.05Th") == (0.05, 'abs')
assert parse("0,05Th") == (0.05, 'abs')
assert parse("0,05Da") == (0.05, 'abs')
assert parse("50.0mmu") == (0.05, 'abs')
assert parse("50mmu") == (0.05, 'abs')
assert parse("5.0ppm") == (5.0*1e-6, 'rel')
assert parse("5,0ppm") == (5.0*1e-6, 'rel')
assert parse(",2ppm") == (.2*1e-6, 'rel')
assert parse(".3ppm") == (.3*1e-6, 'rel')
assert parse(.3) == (.3*1e-6, 'rel')
assert parse(3) == (3*1e-6, 'rel') | 3.578125 | 4 |
projetosiniciantes/desenvolvimento2.py | lordvinick/Python | 0 | 12758639 | # Escolha2 (passageira)
print(f'''{nick} era gerente em uma loja de roupas no shopping da
barra, numa noite de quinta feira,encerrando seu expediente,
quando foi chamar um uber para ir pra casa,
ela notou que seu celular, que mais cedo tinha colocado-o
pra carregar, não estava conectado e a bateria dele estava nos
3%, ela então decidiu descer e ir para o ponto de ônibus,
quando chegou no ponto, o unico ônibus que servia pra ela,
tinha acabado de sair, sendo que ele so passava a cada 1 hora.'''
print('O que vc decide fazer esperar o proximo onibus[o] ou pedir um táxi[t]?')
decisao1 = str(input('-> '))
# Decisão o
print(f'''Ela então resolvi esperar o proximo ônibus, no ponto havia umas 5 pessoas,
{nick} resolve se sentar um pouco já q a espera ia ser longa,
o tempo vai passando e ponto vai esvaziando, até quw ele fica
completamente vazio e local se torna dezerto.''')
print('Você ainda assim, continua esperando o ônibus ou tenta chamar um táxi')
# Decisão t
print('''ela então decidiu pegar um tâxi, pois não estava a fim de ficar esperando,
entrou no táxi, o motorista perguntou qual era o destino,
ela dissse que ia pro alto do coqueirinho, quando o taxista diz a ela o valor da corrida,
ela percebe que é insuficiente o dinheiro q tinha na carteira.''')
print('Você continua e quando chegar ao destino resolve[1] ou desse e espera o proximo ônibus?[2]')
dec = int(input('-> '))
# dec da Decisão t
| 3.8125 | 4 |
src/posts/urls.py | eduardkh/matkonim2 | 0 | 12758640 | <filename>src/posts/urls.py
from django.urls import path
import posts.views
# for media files in test env
from django.conf import settings
from django.conf.urls.static import static
app_name = 'posts'
urlpatterns = [
path('', posts.views.index, name='index'),
path('<slug:slug>', posts.views.detail, name='recipe_detail'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 1.835938 | 2 |
2019/day01/day01_part1.py | boffman/adventofcode | 0 | 12758641 | <gh_stars>0
def calc_fuel(mass):
fuel = int(mass / 3) - 2
return fuel
with open("input.txt") as infile:
fuel_sum = 0
for line in infile:
mass = int(line.strip())
fuel = calc_fuel(mass)
fuel_sum += fuel
print(fuel_sum)
| 3.53125 | 4 |
synth/data_pipeline/SDN.py | MTG/content_choral_separation | 5 | 12758642 | # Generator functions to generate batches of data.
import numpy as np
import os
import time
import h5py
import matplotlib.pyplot as plt
import collections
from synth.config import config
from synth.utils import utils
def data_gen_SDN(mode = 'Train', sec_mode = 0):
with h5py.File(config.stat_file, mode='r') as stat_file:
max_feat = stat_file["feats_maximus"][()] + 0.001
min_feat = stat_file["feats_minimus"][()] - 0.001
voc_list = [x for x in os.listdir(config.feats_dir) if x.endswith('.hdf5') and x.split('_')[0].upper() in [x for x in config.datasets if x != "DAMP"]]
if "DAMP" in config.datasets:
damp_list = [x for x in os.listdir(config.feats_dir) if x.endswith('.hdf5') and x.split('_')[1] in config.damp_singers]
voc_list = voc_list+damp_list
# if config.SDN_mix:
# back_list = [x for x in os.listdir(config.backing_dir) if x.endswith('.hdf5')]
# voc_list = [x for x in voc_list if x not in ['csd_alto1_NinoDios_14.hdf5', 'jvs_jvs023_raw_song_unique_11.hdf5', 'jvs_jvs024_raw_song_unique_2.hdf5', 'csd_soprano3_NinoDios_18.hdf5', 'csd_tenor1_ElRossinyol_13.hdf5', 'csd_soprano3_NinoDios_5.hdf5', 'csd_tenor3_NinoDios_8.hdf5', 'csd_tenor2_NinoDios_13.hdf5', 'jvs_jvs047_raw_song_unique_4.hdf5', 'jvs_jvs098_raw_song_unique_1.hdf5', 'jvs_jvs023_raw_song_unique_9.hdf5', 'jvs_jvs023_raw_song_unique_14.hdf5', 'csd_soprano2_NinoDios_13.hdf5', 'csd_tenor4_LocusIste_12.hdf5', 'csd_bass4_NinoDios_5.hdf5', 'jvs_jvs014_raw_song_unique_15.hdf5', 'csd_soprano2_NinoDios_2.hdf5', 'csd_bass4_NinoDios_12.hdf5', 'jvs_jvs041_raw_song_unique_14.hdf5', 'csd_alto3_LocusIste_25.hdf5', 'jvs_jvs023_raw_song_unique_16.hdf5', 'jvs_jvs092_raw_song_unique_12.hdf5', 'jvs_jvs074_raw_song_unique_6.hdf5', 'jvs_jvs017_raw_song_unique_2.hdf5']]
train_list = [x for x in voc_list if not x.split('_')[2]=='04'] + voc_list[:int(len(voc_list)*0.9)]
val_list = [x for x in voc_list if x.split('_')[2]=='04']+ voc_list[int(len(voc_list)*0.9):]
max_files_to_process = int(config.batch_size/config.autovc_samples_per_file)
if mode == "Train":
num_batches = config.autovc_batches_per_epoch_train
file_list = train_list
else:
num_batches = config.autovc_batches_per_epoch_val
file_list = val_list
for k in range(num_batches):
feats_targs = []
stfts_targs = []
targets_speakers = []
# if config.SDN_mix:
# back_index = np.random.randint(0,len(back_list))
# back_to_open = back_list[back_index]
# with h5py.File(os.path.join(config.backing_dir,back_to_open), "r") as hdf5_file:
# back = hdf5_file['backing_stft'][()]
# back = np.clip(back, 0.0, 1.0)
for i in range(max_files_to_process):
voc_index = np.random.randint(0,len(file_list))
voc_to_open = file_list[voc_index]
with h5py.File(os.path.join(config.feats_dir,voc_to_open), "r") as hdf5_file:
mel = hdf5_file['feats'][()]
back = hdf5_file['back_stft'][()]
stfts = hdf5_file['stfts'][()]
back = np.clip(back, 0.0, 1.0)
f0 = mel[:,-2]
med = np.median(f0[f0 > 0])
f0[f0==0] = med
mel[:,-2] = f0
speaker_name = voc_to_open.split('_')[1]
speaker_index = config.singers.index(speaker_name)
mel = (mel - min_feat)/(max_feat-min_feat)
stfts = np.clip(stfts, 0.0, 1.0)
assert mel.max()<=1.0 and mel.min()>=0.0, "Error in file {}, max: {}, min: {}".format(voc_to_open, mel.max(), mel.min())
for j in range(config.autovc_samples_per_file):
voc_idx = np.random.randint(0,len(mel)-config.max_phr_len)
feats_targs.append(mel[voc_idx:voc_idx+config.max_phr_len])
noise = np.random.rand(config.max_phr_len,stfts.shape[-1])*np.random.uniform(0.0,config.noise_threshold)
back_gain = np.random.uniform(0.0, config.back_threshold)
stft = stfts[voc_idx:voc_idx+config.max_phr_len]*np.random.uniform(back_gain, 1.0) + noise
back_sample = back[voc_idx:voc_idx+config.max_phr_len]
stft = stft + back_sample * back_gain
# if config.SDN_mix:
# back_idx = np.random.randint(0,len(back)-config.max_phr_len)
# back_sample = back[back_idx:back_idx+config.max_phr_len]
# stft = stft + back_sample * back_gain
stfts_targs.append(stft)
targets_speakers.append(speaker_index)
feats_targs = np.array(feats_targs)
stfts_targs = np.array(stfts_targs)
yield feats_targs, stfts_targs, np.array(targets_speakers) | 2.171875 | 2 |
sherpa_client/models/role_update.py | kairntech/sherpa-client | 0 | 12758643 | <filename>sherpa_client/models/role_update.py
from typing import Any, Dict, List, Type, TypeVar, Union, cast
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="RoleUpdate")
@attr.s(auto_attribs=True)
class RoleUpdate:
""" """
label: Union[Unset, str] = UNSET
permissions: Union[Unset, List[str]] = UNSET
def to_dict(self) -> Dict[str, Any]:
label = self.label
permissions: Union[Unset, List[str]] = UNSET
if not isinstance(self.permissions, Unset):
permissions = self.permissions
field_dict: Dict[str, Any] = {}
field_dict.update({})
if label is not UNSET:
field_dict["label"] = label
if permissions is not UNSET:
field_dict["permissions"] = permissions
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
label = d.pop("label", UNSET)
permissions = cast(List[str], d.pop("permissions", UNSET))
role_update = cls(
label=label,
permissions=permissions,
)
return role_update
| 2.125 | 2 |
interview/test/test_cracking_the_coding_interview.py | ssavinash1/Algorithm_stanford | 0 | 12758644 | # -*- coding: utf-8 -*-
import unittest
from cracking_the_coding_interview import *
class CrackingTheCodeInterview(unittest.TestCase):
# Chapter 1: Arrays and Strings
def test_problem_1_1(self):
data = 'alexandru'
self.assertFalse(problem_1_1(data), 'should detect duplicate chars')
data = 'alex'
self.assertTrue(problem_1_1(data), 'all chars are unique')
data = 'alexandru'
self.assertFalse(problem_1_1_bis(data), 'should detect duplicate chars')
data = 'alex'
self.assertTrue(problem_1_1_bis(data), 'all chars are unique')
def test_problem_1_2(self):
data = 'alex$'
expected = 'xela$'
actual = problem_1_2(data)
self.assertEqual(actual, expected, 'should invert correctly')
def test_problem_1_3(self):
data = ""
expected = ""
self.assertEqual(problem_1_3(data), expected, 'removed duplicate consecutive chars')
data = "a"
expected = "a"
self.assertEqual(problem_1_3(data), expected, 'removed duplicate consecutive chars')
data = "abc"
expected = "abc"
self.assertEqual(problem_1_3(data), expected, 'removed duplicate consecutive chars')
data = "abcc"
expected = "abc"
self.assertEqual(problem_1_3(data), expected, 'removed duplicate consecutive chars')
data = "aabc"
expected = "abc"
self.assertEqual(problem_1_3(data), expected, 'removed duplicate consecutive chars')
data = "abca"
expected = "abca"
self.assertEqual(problem_1_3(data), expected, 'removed duplicate consecutive chars')
data = "aaaa"
expected = "a"
self.assertEqual(problem_1_3(data), expected, 'removed duplicate consecutive chars')
def test_problem_1_4(self):
s1 = 'cat'
s2 = 'act'
self.assertTrue(problem_1_4(s1, s2), 'are anagrams')
s1 = 'cats'
s2 = 'act'
self.assertFalse(problem_1_4(s1, s2), 'are not anagrams')
s1 = 'aab'
s2 = 'aba'
self.assertTrue(problem_1_4(s1, s2), 'are anagrams')
s1 = 'aab'
s2 = 'abc'
self.assertFalse(problem_1_4(s1, s2), 'are not anagrams')
def test_problem_1_5(self):
s = ' '
expected = '%20%20%20'
actual = problem_1_5(s)
self.assertEqual(actual, expected, 'correct url encode spaces')
s = ' a '
expected = '%20a%20'
actual = problem_1_5(s)
self.assertEqual(actual, expected, 'correct url encode spaces')
s = 'ab'
expected = 'ab'
actual = problem_1_5(s)
self.assertEqual(actual, expected, 'correct url encode spaces')
def test_problem_1_6(self):
arr = [
[ 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]
actual = problem_1_6(arr)
expected = [
[21, 16, 11, 6, 1],
[22, 17, 12, 7, 2],
[23, 18, 13, 8, 3],
[24, 19, 14, 9, 4],
[25, 20, 15, 10, 5]
]
self.assertItemsEqual(actual, expected, 'should rotate array')
def test_problem_1_7(self):
arr = [
[ 0, 2, 3, 4, 5],
[ 6, 7, 8, 0, 10],
[ 0, 12, 0, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]
expected = [
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 17, 0, 0, 20],
[ 0, 22, 0, 0, 25],
]
actual = problem_1_7(arr)
self.assertItemsEqual(actual, expected, 'should turn correct items to zero')
def test_problem_1_8(self):
s1 = 'waterbottle'
s2 = 'erbottlewat'
self.assertTrue(problem_1_8(s1, s2), 'should detect rotated string')
s1 = 'waterbottle'
s2 = 'wabottleter'
self.assertFalse(problem_1_8(s1, s2), 'should not detect any rotated string')
s1 = 'abcd'
s2 = 'bcdab'
self.assertFalse(problem_1_8(s1, s2), 'should not detect any rotated string')
# Chapter 2: Linked Lists.
def test_problem_2_1(self):
initial = SingleLinkedListNode.from_list([1,2,2,2,3,4,5])
actual = problem_2_1(initial)
expected = [1,2,3,4,5]
self.assertEqual(actual.to_list(), expected, 'should remove duplicate 2s')
def test_problem_2_2(self):
l = SingleLinkedListNode.from_list([1,2,3,4,5])
expected = 4
actual = problem_2_2(l, 2).key
self.assertEqual(actual, expected, 'should detect the correct value')
l = SingleLinkedListNode.from_list([1])
expected = 4
actual = problem_2_2(l, 2)
self.assertIsNone(actual, 'should detect index error')
self.assertRaises(Exception, problem_2_2, None, -2,
'should detect bad input params and raise exception')
l = SingleLinkedListNode.from_list([1,2,3,4,5])
expected = 5
actual = problem_2_2(l, 1).key
self.assertEqual(actual, expected, 'should detect the correct value')
l = SingleLinkedListNode.from_list([1,2,3,4,5])
expected = 1
actual = problem_2_2(l, 5).key
self.assertEqual(actual, expected, 'should detect the correct value')
def test_problem_2_3(self):
l = SingleLinkedListNode.from_list([1,2,3,4,5])
node = l.next.next # Node with key 3.
problem_2_3(node)
actual = l.to_list()
expected = [1,2,4,5]
self.assertEqual(actual, expected, 'should have removed the key 3')
l = SingleLinkedListNode.from_list([1,2,3,4,5])
node = l.next.next.next.next # Node with key 5.
self.assertRaises(Exception, problem_2_3, node,
'should detect the last node in the list')
def test_problem_2_4(self):
l1 = SingleLinkedListNode.from_list([2,3,4])
l2 = SingleLinkedListNode.from_list([1,2,3])
expected = SingleLinkedListNode.from_list([3,5,7])
actual = problem_2_4(l1, l2)
self.assertEqual(actual.to_list(), expected.to_list(),
'should compute sum of two regular numbers')
l1 = SingleLinkedListNode.from_list([])
l2 = SingleLinkedListNode.from_list([1,2,3])
expected = SingleLinkedListNode.from_list([1,2,3])
actual = problem_2_4(l1, l2)
self.assertEqual(actual.to_list(), expected.to_list(),
'should compute sum when the other element is empty')
l1 = SingleLinkedListNode.from_list([1,2,3])
l2 = SingleLinkedListNode.from_list([2])
expected = SingleLinkedListNode.from_list([3,2,3])
actual = problem_2_4(l1, l2)
self.assertEqual(actual.to_list(), expected.to_list(),
'should compute sum when one number has less digits')
l1 = SingleLinkedListNode.from_list([9,9,9])
l2 = SingleLinkedListNode.from_list([9,9,9])
expected = SingleLinkedListNode.from_list([8,9,9,1])
actual = problem_2_4(l1, l2)
self.assertEqual(actual.to_list(), expected.to_list(),
'should compute sum when digit overflow occurs')
def test_problem_2_5(self):
l = SingleLinkedListNode.from_list([1,2,3,4,5,6,7])
start = l.next.next # Node with key 3.
last = start.next.next.next.next # Node with key 7.
last.next = start
actual = problem_2_5(l)
self.assertEqual(actual, start, 'should detect the start node')
# Chapter 3: Stacks and Queues
def test_problem_3_1(self):
ThreeStacks = problem_3_1()
stacks = ThreeStacks()
stacks.push(0, 1)
stacks.push(0, 2)
stacks.push(1, 3)
stacks.push(1, 4)
stacks.push(2, 5)
stacks.push(2, 6)
expected = [6, 12, 18, 0, None, 1, 0, 3, 2, 1, None, 3, 1, 9, 4, 2, None, 5, 2, 15, 6]
self.assertEqual(stacks.arr, expected, 'array should look ok after all those inserts')
value = stacks.pop(0)
self.assertEqual(value, 2, 'stack #0 produces 2')
value = stacks.pop(0)
self.assertEqual(value, 1, 'stack #0 produces 1')
value = stacks.pop(0)
self.assertIsNone(value, 'stack #0 is now empty')
value = stacks.pop(1)
self.assertEqual(value, 4, 'stack #1 produces 4')
value = stacks.pop(1)
self.assertEqual(value, 3, 'stack #1 produces 3')
value = stacks.pop(1)
self.assertIsNone(value, 'stack #1 is now empty')
value = stacks.pop(2)
self.assertEqual(value, 6, 'stack #2 produces 6')
value = stacks.pop(2)
self.assertEqual(value, 5, 'stack #2 produces 5')
value = stacks.pop(2)
self.assertIsNone(value, 'stack #2 is now empty')
expected = [None, None, None, '__', '__', '__', '__', '__', '__', '__', '__', '__', '__', '__', '__', '__', '__', '__', '__', '__', '__']
self.assertEqual(stacks.arr, expected, 'stacks are all empty')
def test_problem_3_2(self):
MinStack = problem_3_2()
s = MinStack()
s.push(2)
self.assertEqual(s.min(), 2, 'should compute the min value')
s.push(1)
self.assertEqual(s.min(), 1, 'should compute the min value')
s.pop()
self.assertEqual(s.min(), 2, 'should compute the min value')
s.pop()
self.assertIsNone(s.min(), 'no more components in the stack')
def test_problem_3_3(self):
SetOfStacks = problem_3_3()
s = SetOfStacks(2)
s.push(1)
s.push(2)
s.push(3)
s.push(4)
s.push(5)
self.assertEqual(len(s.stacks), 3, 'should have built three stacks')
self.assertEqual(s.stacks[0], [1, 2], 'should have filled the first stack')
self.assertEqual(s.stacks[1], [3, 4], 'should have filled the second stack')
self.assertEqual(s.stacks[2], [5], 'should have added to the third stack')
value = s.pop()
self.assertEqual(value, 5, 'removed the last value')
self.assertEqual(s.stacks[2], [], 'last stack is now empty')
value = s.pop()
self.assertEqual(value, 4, 'removed the last value')
self.assertEqual(len(s.stacks), 2, 'only two stacks are left')
self.assertEqual(s.stacks[1], [3], 'last stack now has only one element')
s.pop()
s.pop()
value = s.pop()
self.assertEqual(value, 1, 'removed the last element from the set of stacks')
self.assertEqual(len(s.stacks), 1, 'only one stack left')
self.assertEqual(s.stacks[0], [], 'last stack left is empty')
value = s.pop()
self.assertIsNone(value, 'no more data in the set of stacks')
self.assertEqual(len(s.stacks), 0, 'all stacks have been deleted')
s.push(1)
s.push(2)
s.push(3)
s.push(4)
s.push(5)
value = s.popAt(1)
self.assertEqual(value, 4, 'should have returned the last value of the second stack')
self.assertEqual(s.stacks[0], [1, 2], 'should have filled the first stack')
self.assertEqual(s.stacks[1], [3, 5], 'should have the value from the last stack')
self.assertEqual(s.stacks[2], [], 'should be left an empty stack')
def test_problem_3_4(self):
actual = problem_3_4([1,2,3,4,5,6], [], [])
expected = ([], [], [1,2,3,4,5,6])
self.assertEqual(actual, expected,
'should have moved the disks to the last rod')
def test_problem_3_5(self):
MyQueue = problem_3_5()
q = MyQueue()
q.enqueue(1)
q.enqueue(2)
self.assertEqual(len(q), 2, 'two elements in the queue')
self.assertEqual(q.dequeue(), 1, 'should return the first value')
self.assertEqual(q.dequeue(), 2, 'should return the second value')
self.assertIsNone(q.dequeue(), 'queue is empty')
def test_problem_3_6(self):
l = Stack()
for i in [7, 2, 1, 8, 3]:
l.push(i)
problem_3_6(l)
for i in [8, 7, 3, 2, 1]:
self.assertEqual(i, l.pop(), 'should have sorted the queue')
# Chapter 4: Trees and Graphs
def test_problem_4_1(self):
# Inballanced tree.
r = TreeNode('r')
n1 = TreeNode('1')
n2 = TreeNode('2')
n3 = TreeNode('3')
n4 = TreeNode('4')
n5 = TreeNode('5')
n6 = TreeNode('6')
r.children = [n1, n2, n3]
n3.children = [n4, n5]
n4.children = [n6]
self.assertFalse(problem_4_1(r), 'should detect that the tree is inballanced')
# Ballanced tree.
r = TreeNode('r')
n1 = TreeNode('1')
n2 = TreeNode('2')
n3 = TreeNode('3')
n4 = TreeNode('4')
n5 = TreeNode('5')
n6 = TreeNode('6')
r.children = [n1, n2, n3]
n1.children = [n4, n5]
n2.children = [n6]
self.assertTrue(problem_4_1(r), 'should detect that the tree is ballanced')
def test_problem_4_2(self):
v1 = GraphVertex('1')
v2 = GraphVertex('2')
v3 = GraphVertex('3')
v4 = GraphVertex('4')
v5 = GraphVertex('5')
v1.adjacent = [v2, v3]
v2.adjacent = [v4]
v3.adjacent = [v4]
v5.adjacent = [v4]
self.assertTrue(problem_4_2(v1, v4), 'there is a direct route from v1 to v4')
self.assertTrue(problem_4_2(v5, v4), 'there is a direct route from v5 to v4')
self.assertFalse(problem_4_2(v1, v5), 'there is no direct route from v1 to v5')
def test_problem_4_3(self):
arr = [1,2,3,4,5,6]
actual = problem_4_3(arr)
self.assertEqual(actual.key, 3)
self.assertEqual(actual.children[0].key, 1)
self.assertEqual(actual.children[0].children[1].key, 2)
self.assertEqual(actual.children[1].key, 5)
self.assertEqual(actual.children[1].children[0].key, 4)
self.assertEqual(actual.children[1].children[1].key, 6)
def test_problem_4_4(self):
r = TreeNode('r')
n1 = TreeNode('1')
n2 = TreeNode('2')
n3 = TreeNode('3')
n4 = TreeNode('4')
n5 = TreeNode('5')
n6 = TreeNode('6')
r.children = [n1, n2]
n1.children = [n3, n4]
n2.children = [n5, n6]
lists = problem_4_4(r)
self.assertEqual(len(lists), 3, 'three lists are produces')
self.assertEqual(lists[0].key, r, 'the first list contains the root')
self.assertEqual(lists[1].key, n1, 'first level of nodes')
self.assertEqual(lists[1].next.key, n2, 'first level of nodes')
self.assertEqual(lists[2].key, n3, 'second level of nodes')
self.assertEqual(lists[2].next.key, n4, 'second level of nodes')
self.assertEqual(lists[2].next.next.key, n5, 'second level of nodes')
self.assertEqual(lists[2].next.next.next.key, n6, 'second level of nodes')
def test_problem_4_5(self):
""" The tree under test is the following:
(4)
/ \
(2) (6)
/ \ / \
(1) (3) (5) (7)
"""
n1 = BinaryTreeNode('1')
n2 = BinaryTreeNode('2')
n3 = BinaryTreeNode('3')
n4 = BinaryTreeNode('4')
n5 = BinaryTreeNode('5')
n6 = BinaryTreeNode('6')
n7 = BinaryTreeNode('7')
n4.left = n2; n4.right = n6
n2.left = n1; n2.right = n3
n6.left = n5; n6.right = n7
n2.parent = n6.parent = n4
n1.parent = n3.parent = n2
n5.parent = n7.parent = n6
self.assertEqual(problem_4_5(n4), n5, 'successor to 4 is 5')
self.assertEqual(problem_4_5(n6), n7, 'successor to 6 is 7')
self.assertEqual(problem_4_5(n1), n2, 'successor to 1 is 2')
def test_problem_4_6(self):
""" The tree under test is the following:
(1)
/ | \
(2) (3) (4)
| | \
(5) (6) (7)
"""
n1 = TreeNode('1')
n2 = TreeNode('2')
n3 = TreeNode('3')
n4 = TreeNode('4')
n5 = TreeNode('5')
n6 = TreeNode('6')
n7 = TreeNode('7')
n1.children = [n2, n3, n4]
n2.children = [n5]
n3.children = [n6, n7]
n2.parent = n3.parent = n4.parent = n1
n5.parent = n2
n6.parent = n7.parent = n3
self.assertEqual(problem_4_6(n5, n7), n1, '1 is root for 5 and 7')
self.assertEqual(problem_4_6(n5, n2), n2, '2 is root for 5 and 2')
def test_problem_4_7(self):
n1 = BinaryTreeNode(1)
n2 = BinaryTreeNode(2)
n3 = BinaryTreeNode(3)
n4 = BinaryTreeNode(4)
n5 = BinaryTreeNode(5)
n6 = BinaryTreeNode(6)
n7 = BinaryTreeNode(7)
n8 = BinaryTreeNode(8)
n1.left = n4
n1.right = n7
n4.left = n3
n4.rigth = n5
n7.left = n6
n7.right = n8
n3.left = n2
self.assertTrue(problem_4_7(n1, n7), '7 is the child of 1 so a subtree')
def test_problem_4_8(self):
# TODO make this work!
pass
# Chapter 5: Bit Manipulation.
def test_problem_5_1(self):
n = int('10000000000', 2)
m = int('10101', 2)
i = 2
j = 6
actual = problem_5_1(n, m, i, j)
expected = int('10001010100', 2)
self.assertEqual(actual, expected, 'should produce the correct value')
n = int('11111111111', 2)
m = int('10101', 2)
i = 2
j = 6
actual = problem_5_1(n, m, i, j)
expected = int('11111010111', 2)
self.assertEqual(actual, expected, 'should produce the correct value')
def test_problem_5_2(self):
n = 3.75
actual = problem_5_2(n)
expected = '11,11'
self.assertEqual(actual, expected, 'should return the correct value')
n = 3.17
self.assertRaises(Exception, problem_5_2, n,
'should raise because no accurate representation can be computed')
def test_problem_5_3(self):
n = int('111', 2)
self.assertRaises(Exception, problem_5_3, n, 'should detect that '+
'there is no smaller number but with the same number of set bits')
n = int('110', 2)
actual = problem_5_3(n)
expected = (int('101', 2), int('1001', 2))
self.assertEqual(actual, expected, 'should produce the correct values')
def test_problem_5_5(self):
a = 31
b = 14
expected = 2
actual = problem_5_5(a, b)
self.assertEqual(actual, expected, 'should compute the number of different bits')
a = 67
b = 143
expected = 4
actual = problem_5_5(a, b)
self.assertEqual(actual, expected, 'should compute the number of different bits')
def test_problem_5_6(self):
n = int('10', 2)
expected = int('01', 2)
actual = problem_5_6(n)
self.assertEqual(actual, expected, 'should swap bits correctly')
n = int('111011', 2)
expected = int('110111', 2)
actual = problem_5_6(n)
self.assertEqual(actual, expected, 'should swap bits correctly')
n = int('01110101', 2)
expected = int('10111010', 2)
actual = problem_5_6(n)
self.assertEqual(actual, expected, 'should swap bits correctly')
def x_test_problem_5_7(self):
arr = [1,2,4,5,6,7,8,9]
expected = 3
actual = problem_5_7(arr)
self.assertEqual(actual, expected, 'should detect the missing value')
# Chapter 8: Recursion
def test_problem_8_1(self):
self.assertEqual(problem_8_1(3), 3,
'should correctly compute the fib number')
def test_problem_8_2(self):
actual = problem_8_2(2)
expected = 2
self.assertEqual(actual, expected, 'only two ways to get from the '+
'top left to the botton right corners')
actual = problem_8_2(3)
expected = 6
self.assertEqual(actual, expected, 'only two ways to get from the '+
'top left to the botton right corners')
def test_problem_8_2_bis(self):
grid = [
[0, 0],
[1, 0]
]
actual = problem_8_2_bis(grid)
expected = [[(0,0), (0,1), (1,1)]]
self.assertItemsEqual(actual, expected,
'should compute the correct available paths')
grid = [
[0, 0, 0],
[0, 1, 0],
[0, 1, 0]
]
actual = problem_8_2_bis(grid)
expected = [[(0,0), (0,1), (0,2), (1,2), (2,2)]]
self.assertItemsEqual(actual, expected,
'should compute the correct available paths')
grid = [
[0, 0, 0],
[1, 1, 0],
[0, 0, 0]
]
actual = problem_8_2_bis(grid)
expected = [[(0,0), (0,1), (0,2), (1,2), (2,2)]]
self.assertItemsEqual(actual, expected,
'should compute the correct available paths')
grid = [
[0, 0, 0],
[1, 1, 0],
[1, 1, 0]
]
actual = problem_8_2_bis(grid)
expected = [[(0,0), (0,1), (0,2), (1,2), (2,2)]]
self.assertItemsEqual(actual, expected,
'should compute the correct available paths')
grid = [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]
actual = problem_8_2_bis(grid)
expected = [[(0,0), (0,1), (0,2), (1,2), (2,2)],
[(0,0), (1,0), (2,0), (2,1), (2,2)]]
self.assertItemsEqual(actual, expected,
'should compute the correct available paths')
def test_problem_8_3(self):
data = set([1,2,3])
expected = [set([1]), set([2]), set([3]), set([1,2]), set([1,3]),
set([2,3]), set([1,2,3]), set([])]
actual = problem_8_3(data)
self.assertItemsEqual(actual, expected,
'should compute all the subsets of a set')
def test_problem_8_4(self):
data = 'abc'
expected = ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
actual = problem_8_4(data)
self.assertItemsEqual(actual, expected,
'should compute all permutations')
def test_problem_8_5(self):
expected = ['()()()', '()(())', '(())()', '((()))', '(()())']
actual = problem_8_5(3)
self.assertItemsEqual(actual, expected,
'should produce correct prantheses')
def test_problem_8_6(self):
canvas = [
[1, 0, 0, 1, 0],
[1, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 0, 0, 1]
]
expected = [
[1, 0, 0, 2, 0],
[1, 0, 2, 2, 0],
[0, 2, 2, 2, 0],
[0, 2, 2, 0, 1],
[1, 0, 0, 0, 1]
]
problem_8_6(canvas, (2, 2), 2)
self.assertItemsEqual(canvas, expected,
'should color only on in straight line, not diagonals')
def test_problem_8_7(self):
change = 3
expected = 1
actual = problem_8_7(change)
self.assertEqual(actual, expected,
'should compute the number of combinations')
change = 6
expected = 2
actual = problem_8_7(change)
self.assertEqual(actual, expected,
'should compute the number of combinations')
change = 27
expected = 32
actual = problem_8_7(change)
self.assertEqual(actual, expected,
'should compute the number of combinations')
def test_problem_8_8(self):
actual = problem_8_8(4)
expected = 2
self.assertEqual(len(actual), expected, 'should compute how many ways '+
'one can arrange 4 queens on a 4x4 table so that they do not attach each other')
actual = problem_8_8(8)
expected = 92
self.assertEqual(len(actual), expected, 'should compute how many ways '+
'one can arrange 8 queens on a 8x8 table so that they do not attach each other')
# Chapter 9: Searching and Sorting
def test_problem_9_1(self):
arr1 = [1, 2, 3, 4, None, None, None]
arr2 = [0, 5, 6]
expected = [0, 1, 2, 3, 4, 5, 6]
actual = problem_9_1(arr1, arr2)
self.assertEqual(actual, expected, 'should merge the two arrays')
arr1 = [1, 3, 5, 7, None, None, None, None]
arr2 = [0, 2, 4, 6]
expected = [0, 1, 2, 3, 4, 5, 6, 7]
actual = problem_9_1(arr1, arr2)
self.assertEqual(actual, expected, 'should merge the two arrays')
arr1 = [2, 3, 4, None, None]
arr2 = [0, 1]
expected = [0, 1, 2, 3, 4]
actual = problem_9_1(arr1, arr2)
self.assertEqual(actual, expected, 'should merge the two arrays')
def test_problem_9_2(self):
strings = ['cat', 'act', 'boo', 'foo', 'baz']
expected = ['cat', 'act', 'baz', 'boo', 'foo']
actual = problem_9_2(strings)
self.assertEquals(actual, expected,
'should sort the list such that anagrams are near-by')
def test_problem_9_3(self):
arr = [15, 16, 19, 20, 25, 1, 3, 4, 5, 7, 10, 14]
actual = problem_9_3(arr, 5)
expected = 8 # index of 5 in arr.
self.assertEqual(actual, expected, 'should compute the correct index')
arr = [8, 9, 1, 2, 3, 4, 5, 6, 7]
actual = problem_9_3(arr, 5)
expected = 6 # index of 5 in arr.
self.assertEqual(actual, expected, 'should compute the correct index')
arr = [3, 4, 5, 6, 7, 8, 9, 1, 2]
actual = problem_9_3(arr, 5)
expected = 2 # index of 5 in arr.
self.assertEqual(actual, expected, 'should compute the correct index')
def test_problem_9_5(self):
arr = ["at", "", "", "", "ball", "", "", "car", "", "", "dad", "", ""]
actual = problem_9_5(arr, 'ball')
expected = 4
self.assertEqual(actual, expected, 'should find the correct position of the word')
arr = ["at", "", "", "", "", "ball", "car", "", "", "dad", "", ""]
actual = problem_9_5(arr, 'ballcar')
expected = -1
self.assertEqual(actual, expected, 'should find the correct position of the word')
def test_problem_9_6(self):
mat = [
[1,2,3,4,5],
[6,7,8,9,10],
[11,12,13,14,15],
[16,17,18,19,20],
[21,22,23,24,25]
]
self.assertTrue(problem_9_6(mat, 15), 'should find the element 15')
self.assertTrue(problem_9_6(mat, 9), 'should find the element 9')
self.assertTrue(problem_9_6(mat, 22), 'should find the element 22')
self.assertFalse(problem_9_6(mat, 35), 'should not find element 35')
self.assertFalse(problem_9_6(mat, -5), 'should not find element -5')
def x_test_problem_9_7(self):
data = [(75, 190), (70, 150), (68, 110), (65, 100), (60, 95), (56, 90)]
expected = [(56, 90) (60,95) (65,100) (68,110) (70,150) (75,190)]
actual = problem_9_7(data)
self.assertItemsEqual(actual, expected, 'should compute the highest tower')
data = [(75, 190), (70, 150), (58, 140), (65, 100), (60, 95), (56, 90)]
expected = [(75, 190), (70, 150), (65, 100), (60, 95), (56, 90)]
actual = problem_9_7(data)
self.assertItemsEqual(actual, expected, 'should compute the highest tower')
# Chapter 10: Mathematical
def test_problem_10_4(self):
actual = problem_10_4('+', 10, 20)
self.assertEqual(actual, 30, 'should produce the sum of the operands')
actual = problem_10_4('-', 20, 10)
self.assertEqual(actual, 10, 'should produce the diff of the operands')
actual = problem_10_4('*', 4, 5)
self.assertEqual(actual, 20, 'should produce the multiplication of the operands')
actual = problem_10_4('/', 5, 4)
self.assertEqual(actual, 1, 'should produce the div of the operands')
def test_problem_10_5(self):
expected = (0, 1)
actual = problem_10_5(({'x': 1, 'y': 1}, {'x': 2, 'y': 1}),
({'x': 3, 'y': 1}, {'x': 4, 'y': 1}))
self.assertEqual(actual, expected,
'should correctly compute the line between the two centers')
def test_problem_10_6(self):
points = [(2,3), (4,5), (6,7), (8,9), (1,1), (2,2), (3,3)]
expected = {(2,3), (4,5), (6,7), (8,9)}
actual = problem_10_6(points)
self.assertEqual(actual, expected, 'should find largest set of points')
def test_problem_10_7(self):
# NOTE: THIS IS INCORRECT!
self.assertEqual(problem_10_7(0), 3*5*7, 'should have worked')
self.assertEqual(problem_10_7(1), 3*3*5*7, 'should have worked')
self.assertEqual(problem_10_7(2), 3*5*5*7, 'should have worked')
self.assertEqual(problem_10_7(3), 3*5*7*7, 'should have worked')
self.assertEqual(problem_10_7(4), 3*3*3*5*7, 'should have worked')
self.assertEqual(problem_10_7(5), 3*3*5*5*7, 'should have worked')
self.assertEqual(problem_10_7(6), 3*3*5*7*7, 'should have worked')
self.assertEqual(problem_10_7(7), 3*5*5*5*7, 'should have worked')
self.assertEqual(problem_10_7(8), 3*3*3*3*5*7, 'should have worked')
self.assertEqual(problem_10_7(9), 3*5*5*7*7, 'should have worked')
self.assertEqual(problem_10_7(10), 3*3*3*5*5*7, 'should have worked')
self.assertEqual(problem_10_7(11), 3*5*7*7*7, 'should have worked')
self.assertEqual(problem_10_7(12), 3*3*3*5*7*7, 'should have worked')
self.assertEqual(problem_10_7(13), 3*3*5*5*7*7, 'should have worked')
self.assertEqual(problem_10_7(14), 3*3*3*5*5*7*7, 'should have worked')
self.assertEqual(problem_10_7(15), 3*3*5*5*5*7*7, 'should have worked')
def test_problem_10_7_bis(self):
self.assertEqual(problem_10_7_bis(0), 3*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(1), 3*3*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(2), 3*5*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(3), 3*5*7*7, 'should have worked')
self.assertEqual(problem_10_7_bis(4), 3*3*3*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(5), 3*3*5*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(6), 3*3*5*7*7, 'should have worked')
self.assertEqual(problem_10_7_bis(7), 3*5*5*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(8), 3*3*3*3*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(9), 3*5*5*7*7, 'should have worked')
self.assertEqual(problem_10_7_bis(10), 3*3*3*5*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(11), 3*5*7*7*7, 'should have worked')
self.assertEqual(problem_10_7_bis(12), 3*3*3*5*7*7, 'should have worked')
self.assertEqual(problem_10_7_bis(13), 3*3*5*5*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(14), 3*3*3*3*3*5*7, 'should have worked')
self.assertEqual(problem_10_7_bis(15), 3*3*5*5*7*7, 'should have worked')
# Chapter 19. Additional Review Problems: Moderate
def test_problem_19_1(self):
a = 1
b = 2
(a, b) = problem_19_1(a, b)
self.assertEqual(a, 2, 'replace value of a with value of b')
self.assertEqual(b, 1, 'replace value of b with value of a')
def test_problem_19_2(self):
table = [
[1, 0, 1],
[1, 0, 0],
[0, 1, 0]
]
self.assertIsNone(problem_19_2(table), 'no one wins this game')
table = [
[1, 1, 1],
[1, 0, 0],
[0, 1, 0]
]
self.assertTrue(problem_19_2(table), '1 wins the game')
table = [
[0, 1, 1],
[1, 0, 0],
[0, 1, 0]
]
self.assertFalse(problem_19_2(table), '0 wins the game')
def test_problem_19_3(self):
self.assertEqual(problem_19_3(6), 1, '6! has only 1 trailing zeros')
self.assertEqual(problem_19_3(10), 2, '10! has only 2 trailing zeros')
self.assertEqual(problem_19_3(26), 6, '26! has 6 trailing zeros')
self.assertEqual(problem_19_3(100), 24, '10! has 24 trailing zeros')
def test_problem_19_4(self):
self.assertEqual(problem_19_4(10, 5), 10, 'should find max to be 10')
self.assertEqual(problem_19_4(5, 6), 6, 'should find max to be 6')
self.assertEqual(problem_19_4(20, 20), 20, 'numbers are equal')
def test_problem_19_5(self):
actual = problem_19_5('RGGB', 'YRGB')
expected = (2, 1)
self.assertEqual(actual, expected,
'should return the accurate hits and pseudo-hits')
def test_problem_19_6(self):
num = 12
expected = 'Twelve'
actual = problem_19_6(num)
self.assertEqual(actual, expected, 'should print the number in letters')
num = 67
expected = 'Sixty Seven'
actual = problem_19_6(num)
self.assertEqual(actual, expected, 'should print the number in letters')
num = 1000
expected = 'One Thousand'
actual = problem_19_6(num)
self.assertEqual(actual, expected, 'should print the number in letters')
num = 1001
expected = 'One Thousand, One'
actual = problem_19_6(num)
self.assertEqual(actual, expected, 'should print the number in letters')
num = 123
expected = 'One Hundred and Twenty Three'
actual = problem_19_6(num)
self.assertEqual(actual, expected, 'should print the number in letters')
num = 99909
expected = 'Ninty Nine Thousands, Nine Hundreds and Nine'
actual = problem_19_6(num)
self.assertEqual(actual, expected, 'should print the number in letters')
num = 999999
expected = 'Nine Hundreds and Ninty Nine Thousands, Nine Hundreds and Ninty Nine'
actual = problem_19_6(num)
self.assertEqual(actual, expected, 'should print the number in letters')
num = 1234
expected = 'One Thousand, Two Hundreds and Thirty Four'
actual = problem_19_6(num)
self.assertEqual(actual, expected, 'should print the number in letters')
| 3.8125 | 4 |
TidyTuesday/20210216-dubois-challenge.py | vivekparasharr/Challenges-and-Competitions | 6 | 12758645 |
import numpy as np
import pandas as pd
import plotly.express as px
georgia_pop = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/georgia_pop.csv')
census = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/census.csv')
furniture = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/furniture.csv')
city_rural = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/city_rural.csv')
income = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/income.csv')
freed_slaves = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/freed_slaves.csv')
occupation = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/occupation.csv')
conjugal = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/conjugal.csv')
# dubois challenge 1
# Population change by race in Georgia.
yr = georgia_pop.Year.values
xC = georgia_pop.Colored.values
xW = georgia_pop.White.values
import plotly.graph_objects as go
fig = go.Figure()
fig.update_layout(width=500, height=700,
legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.55), #showlegend=False,
title="Population change by race in Georgia",title_x=0.5,)
fig.add_trace(go.Scatter(x=xC, y=yr, mode='lines', name='African American'))
fig.add_trace(go.Scatter(x=xW, y=yr, mode='lines', name='White American'))
annotations=[]
annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.1,
xanchor='center', yanchor='top',
text='#TidyTuesday - 2021/02/16 | twitter.com/vivekparasharr | github.com/vivekparasharr',
font=dict(family='Arial', size=12, color='grey'),
showarrow=False))
fig.update_layout(annotations=annotations)
fig.show()
# dubois challenge 2
# Marriage status
# Prepare data
conjugal = conjugal.replace('Negroes','African Americans')
conjugal.columns = ['Population', 'Age', 'c_Single', 'c_Married', 'c_Divorced_and_Widowed']
conjugal = pd.wide_to_long(conjugal, stubnames='c', i=['Population', 'Age'], j='Conjugal_Status', sep='_', suffix=r'\w+').reset_index()
conjugal.columns = ['Population', 'Age', 'Conjugal_Status', 'Conjugal_Status_Value']
import plotly.graph_objects as go
fig = go.Figure()
fig.update_layout(
template="simple_white",
yaxis=dict(title_text="Age"), xaxis=dict(title_text="Race Share Pct"),
barmode="stack",
legend=dict(yanchor="top", y=1.25, xanchor="left", x=0.50), #showlegend=False,
title="Conjugal condition",title_x=0.5,
)
colors = ['firebrick','olive','dodgerblue']#,'blueviolet','dimgrey','tomato','sienna','darkorange','forestgreen','steelblue','royalblue','orchid']
#selected_colors = colors[:y_axis_levels]
labels={'Single':'Single', 'Married':'Married', 'Divorced_and_Widowed':'Divorced and Widowed'}
for r, c in zip(conjugal.Conjugal_Status.unique(), colors):
plot_df = conjugal[conjugal.Conjugal_Status == r]
fig.add_trace(
go.Bar(y=[plot_df.Age, plot_df.Population], x=plot_df.Conjugal_Status_Value, name=labels[r], marker_color=c, orientation='h', ),
)
fig
# dubois challenge 3
# occupation
import plotly.graph_objects as go
labels = occupation.Category.tolist()
labels = ['Negroes: Agriculture, Fisheries and Mining',
'Negroes: Manufacturing and Mechanical Industries',
'Negroes: Domestic and Personal Service',
'Negroes: Professions',
'Negroes: Trade and Transportation',
'Blank: Right',
'Whites: Agriculture, Fisheries and Mining',
'Whites: Manufacturing and Mechanical Industries',
'Whites: Domestic and Personal Service',
'Whites: Professions',
'Whites: Trade and Transportation',
'Blank: Left']
white_space=50 # this can be modified as needed
values = occupation.Percentage.tolist()
values = [62.0, 5.0, 28.0, 0.8, 4.5, white_space, 64.0, 12.5, 5.5, 4.0, 13.0, white_space]
color_list = ['dimgray', 'firebrick', 'olive', 'saddlebrown', 'steelblue', 'white', 'dimgray', 'firebrick', 'olive', 'saddlebrown', 'steelblue', 'white']
fig = go.Figure(data=[go.Pie(labels=None, values=values,
direction='clockwise',
rotation=(-((white_space/sum(values))*360)),
sort=False, showlegend=False,
title='Occupation by race')]) # , labels=labels, hole=0.4 to make a donut
fig.update_traces(marker=dict(colors=color_list), textinfo='none') #, line=dict(color='#000000', width=2)))
fig.show()
| 2.5625 | 3 |
ide/tasks/gist.py | Ramonrlb/cloudpebble | 147 | 12758646 | <filename>ide/tasks/gist.py
import json
import github
from celery import task
from django.db import transaction
from django.conf import settings
from ide.models.user import User
from ide.models.project import Project
from ide.utils.sdk import load_manifest_dict
from ide.models.files import SourceFile, ResourceFile, ResourceIdentifier, ResourceVariant
from ide.utils.project import APPINFO_MANIFEST, PACKAGE_MANIFEST
from ide.utils import generate_half_uuid
from utils.td_helper import send_td_event
from collections import defaultdict
import urllib2
@task(acks_late=True)
def import_gist(user_id, gist_id):
user = User.objects.get(pk=user_id)
g = github.Github()
try:
gist = g.get_gist(gist_id)
except github.UnknownObjectException:
send_td_event('cloudpebble_gist_not_found', data={'data': {'gist_id': gist_id}}, user=user)
raise Exception("Couldn't find gist to import.")
files = gist.files
default_name = gist.description or 'Sample project'
default_settings = {
'name': default_name,
'app_short_name': default_name,
'app_long_name': default_name,
'app_company_name': user.username,
'app_version_label': '1.0',
'app_is_watchface': False,
'app_is_hidden': False,
'app_is_shown_on_communication': False,
'app_capabilities': '[]',
'app_keys': '{}',
'project_type': 'native',
'app_modern_multi_js': False,
'sdk_version': '2'
}
if len(files) == 1 or ((APPINFO_MANIFEST in files or PACKAGE_MANIFEST in files) and len(files) == 2):
if 'simply.js' in files:
default_settings['project_type'] = 'simplyjs'
elif 'app.js' in files:
default_settings['project_type'] = 'pebblejs'
elif 'index.js' in files:
default_settings['project_type'] = 'rocky'
# If all files are .js or .json and there is an index.js, assume it's a rocky project.
if all(x.endswith(('.js', '.json')) for x in gist.files) and 'index.js' in files:
default_settings['project_type'] = 'rocky'
default_settings['sdk_version'] = '3'
default_settings['app_modern_multi_js'] = True
media = []
# Using defaultdict we can load project settings from a manifest dict which
# has values that default to None. This way, we can delegate
if PACKAGE_MANIFEST in files:
content = json.loads(files[PACKAGE_MANIFEST].content)
package = defaultdict(lambda: None)
package.update(content)
package['pebble'] = defaultdict(lambda: None)
package['pebble'].update(content.get('pebble', {}))
manifest_settings, media, dependencies = load_manifest_dict(package, PACKAGE_MANIFEST, default_project_type=None)
default_settings['app_keys'] = '[]'
default_settings['sdk_version'] = '3'
default_settings['app_modern_multi_js'] = True
elif APPINFO_MANIFEST in files:
content = json.loads(files[APPINFO_MANIFEST].content)
package = defaultdict(lambda: None)
package.update(content)
manifest_settings, media, dependencies = load_manifest_dict(package, APPINFO_MANIFEST, default_project_type=None)
else:
manifest_settings = {}
dependencies = {}
fixed_settings = {
'owner': user,
'app_uuid': generate_half_uuid()
}
project_settings = {}
project_settings.update(default_settings)
project_settings.update({k: v for k, v in manifest_settings.iteritems() if v is not None})
project_settings.update(fixed_settings)
with transaction.atomic():
project = Project.objects.create(**project_settings)
project.set_dependencies(dependencies)
project_type = project.project_type
if project_type == 'package':
raise Exception("Gist imports are not yet support for packages.")
if project_type != 'simplyjs':
for filename in gist.files:
target = 'app'
if not filename.endswith(('.c', '.h', '.js', '.json')):
continue
if filename in ('appinfo.json', 'package.json'):
continue
if project_type == 'native':
if filename.endswith(('.js', '.json')):
target = 'pkjs'
elif project_type == 'rocky':
if filename == 'app.js':
target = 'pkjs'
source_file = SourceFile.objects.create(project=project, file_name=filename, target=target)
source_file.save_text(gist.files[filename].content)
resources = {}
for resource in media:
kind = resource['type']
def_name = resource['name']
filename = resource['file']
regex = resource.get('characterRegex', None)
tracking = resource.get('trackingAdjust', None)
memory_format = resource.get('memoryFormat', None)
storage_format = resource.get('storageFormat', None)
space_optimisation = resource.get('spaceOptimization', None)
is_menu_icon = resource.get('menuIcon', False)
compatibility = resource.get('compatibility', None)
if filename not in gist.files:
continue
if filename not in resources:
resources[filename] = ResourceFile.objects.create(project=project, file_name=filename, kind=kind,
is_menu_icon=is_menu_icon)
# We already have this as a unicode string in .content, but it shouldn't have become unicode
# in the first place.
default_variant = ResourceVariant.objects.create(resource_file=resources[filename], tags=ResourceVariant.TAGS_DEFAULT)
default_variant.save_file(urllib2.urlopen(gist.files[filename].raw_url))
ResourceIdentifier.objects.create(
resource_file=resources[filename],
resource_id=def_name,
character_regex=regex,
tracking=tracking,
compatibility=compatibility,
memory_format=memory_format,
storage_format=storage_format,
space_optimisation=space_optimisation
)
else:
source_file = SourceFile.objects.create(project=project, file_name='app.js')
source_file.save_text(gist.files['simply.js'].content)
send_td_event('cloudpebble_gist_import', data={'data': {'gist_id': gist_id}}, project=project)
return project.id
| 2.109375 | 2 |
lib/candy_editor/core/Command.py | lihaochen910/Candy | 1 | 12758647 | <gh_stars>1-10
import logging
from . import signals
from abc import ABCMeta, abstractmethod
class ICommand ( metaclass=ABCMeta ):
def canExecute ( self, *params ):
return True
@abstractmethod
def execute ( self, *params ):
pass
class EditorCommandMeta ( type ):
def __init__ ( cls, name, bases, dict ):
super ( EditorCommandMeta, cls ).__init__ ( name, bases, dict )
fullname = dict.get ( 'name', None )
if not fullname: return
EditorCommandRegistry.get ().registerCommand ( fullname, cls )
##----------------------------------------------------------------##
class EditorCommand ( object, metaclass=EditorCommandMeta ):
def init ( self, **kwargs ):
pass
def redo ( self ):
return
def undo ( self ):
return
def canMerge ( self, prevCommand ):
return False
def canUndo ( self ):
return True
def hasHistory ( self ):
return True
def getResult ( self ):
return None
def __repr__ ( self ):
return self.fullname
##----------------------------------------------------------------##
class EditorCommandStack ( object ):
def __init__ ( self, stackLimit = 100 ):
self.undoStack = []
self.redoStack = []
self.stackLimit = stackLimit
def clear ( self ):
self.undoStack = []
self.redoStack = []
def canUndo ( self ):
return len ( self.undoStack ) > 0
def canRedo ( self ):
return len ( self.redoStack ) > 0
def pushCommand ( self, cmd, redo = False ):
if not cmd.hasHistory ():
return cmd.redo ()
if not self.canUndo ():
# TODO: Warning about non-undoable action
pass
assert not hasattr ( cmd, 'inStack' )
count = len ( self.undoStack )
cmd.inStack = True
cmd.merged = False
if count > 0:
lastCommand = self.undoStack[ count - 1 ]
if cmd.canMerge ( lastCommand ):
cmd.merged = True
if count >= self.stackLimit:
self.undoStack.pop ( 0 )
self.undoStack.append ( cmd )
if cmd.redo () == False: # failed
self.undoStack.pop ()
return False
if not redo:
signals.emit ( 'command.new', cmd, self )
self.redoStack = []
else:
signals.emit ( 'command.redo', cmd, self )
return True
def undoCommand ( self, popCommandOnly = False ):
count = len ( self.undoStack )
if count > 0:
cmd = self.undoStack[ count - 1 ]
if not popCommandOnly:
if cmd.undo () == False:
return False
self.undoStack.pop ()
self.redoStack.append ( cmd )
signals.emit ( 'command.undo', cmd, self )
if cmd.merged:
return self.undoCommand ( popCommandOnly )
else:
return True
return False
def redoCommand ( self ):
if not self.canRedo (): return False
cmd = self.redoStack.pop ()
return self.pushCommand ( cmd, True )
# TODO: redo merged commands
##----------------------------------------------------------------##
class EditorCommandRegistry ( object ):
_singleton = None
@staticmethod
def get ():
if not EditorCommandRegistry._singleton:
return EditorCommandRegistry ()
return EditorCommandRegistry._singleton
def __init__ ( self ):
assert not EditorCommandRegistry._singleton
EditorCommandRegistry._singleton = self
self.stacks = {}
self.commands = {}
def createCommandStack ( self, name ):
stack = EditorCommandStack ()
self.stacks[ name ] = stack
return stack
def getCommandStack ( self, name ):
return self.stacks.get ( name, None )
def registerCommand ( self, fullname, cmdClass ):
cmdBlobs = fullname.split ( '/' )
assert len ( cmdBlobs ) == 2, 'command name must be <group>/<name>'
stackName, cmdName = cmdBlobs[ 0 ], cmdBlobs[ 1 ]
self.commands[ fullname ] = (stackName, cmdName, cmdClass)
# logging.info ( 'register command: %s / %s' % (stackName, cmdName) )
# print ( 'register command: %s / %s' % (stackName, cmdName) )
def doCommand ( self, fullname, **kwargs ):
entry = self.commands.get ( fullname, None )
if not entry:
logging.warn ( 'command not found %s ' % fullname )
return None
(stackName, cmdName, cmdClass) = entry
stack = self.getCommandStack ( stackName )
if not stack:
logging.warn ( 'command stack not found %s ' % stackName )
return None
cmd = cmdClass ()
cmd._fullname = fullname
if cmd.init ( **kwargs ) == False: return None
if stack.pushCommand ( cmd ):
return cmd
else:
return None
##----------------------------------------------------------------##
class RemoteCommandMeta ( type ):
def __init__ ( cls, name, bases, dict ):
super ( RemoteCommandMeta, cls ).__init__ ( name, bases, dict )
fullname = dict.get ( 'name', None )
if not fullname: return
RemoteCommandRegistry.get ().registerCommand ( fullname, cls )
##----------------------------------------------------------------##
class RemoteCommand ( object ):
__metaclass__ = RemoteCommandMeta
def run ( argv ):
pass
##----------------------------------------------------------------##
class RemoteCommandRegistry ( object ):
_singleton = None
@staticmethod
def get ():
if not RemoteCommandRegistry._singleton:
return RemoteCommandRegistry ()
return RemoteCommandRegistry._singleton
def __init__ ( self ):
RemoteCommandRegistry._singleton = self
self.commands = {}
def registerCommand ( self, name, cmdClas ):
self.commands[ name ] = cmdClas
def doCommand ( self, argv, output ):
if argv:
cmdName = argv[ 0 ]
clas = self.commands.get ( cmdName, None )
if clas:
cmd = clas ()
if len ( argv ) > 1:
args = argv[ 1: ]
else:
args = []
try:
cmd.run ( *args )
except Exception as e:
logging.exception ( e )
else:
logging.warning ( 'no remote command found:' + cmdName )
RemoteCommandRegistry ()
| 2.703125 | 3 |
tests/test_parametric_components/test_DivertorITER.py | PullRequest-Agent/paramak | 0 | 12758648 | <reponame>PullRequest-Agent/paramak<filename>tests/test_parametric_components/test_DivertorITER.py
import unittest
import paramak
class test_DivertorITER(unittest.TestCase):
def test_DivertorITER_creation(self):
"""Creates an ITER-type divertor using the ITERtypeDivertor parametric
component and checks that a cadquery solid is created"""
test_shape = paramak.ITERtypeDivertor()
assert test_shape.solid is not None
def test_DivertorITER_STP_export(self):
"""Creates an ITER-type divertor using the ITERtypeDivertor parametric
component and checks that a stp file of the shape can be exported using
the export_stp method"""
test_shape = paramak.ITERtypeDivertor()
test_shape.export_stp("tests/ITER_div")
def test_DivertorITER_faces(self):
"""Creates an ITER-type divertor using the ITERtypeDivertor parametric
component and checks that a solid with the correct number of faces is
created"""
test_shape = paramak.ITERtypeDivertor()
assert len(test_shape.areas) == 12
assert len(set(test_shape.areas)) == 12
test_shape.rotation_angle = 180
assert len(test_shape.areas) == 14
assert len(set(test_shape.areas)) == 13
| 2.8125 | 3 |
dev/notes.py | negrinho/research_toolbox | 14 | 12758649 |
# NOTE: some stuff can be moved to the tb_numpy.
#
# Implementation requests
# * tb_augmentation.py
# * tb_data.py
# * tb_debugging.py
# * tb_experiments.py
# * tb_filesystem.py
# * tb_io.py
# * tb_logging.py
# * tb_plotting.py
# * Add grid plots.
# * Add histograms (1D, 2D).
# * Add scatter plots (1D, 2D, 3D).
# * tb_preprocessing.py
# * tb_project.py
# * tb_random.py
# * tb_remote.py
# * tb_resource.py
# * tb_training.py
# * Model checkpoint and resuming.
# * tb_tensorflow.py
# * tb_metrics.py
# * tb_pytorch.py
# * tb_serving.py
# * tb_deep_architect.py
# * tb_numpy (?)
# Additional aspects to contemplate.
# tb_tensorflow and tb_pytorch (I'm not sure what to do here; I think that
# certain things are probably useful to have such as what is the best way of
# defining new models and perhaps some class definition.)
# Design principles:
# * Working with lists most of the time
# * d stands for dictionary.
# * Suggests the conceptualization of recurring transformations that would otherwise be spread out throughout the code.
# * Keep it simple and extensible.
# * Tools are overly general; we provide a simplification.
# * It should be natural for the person using the functionality.
# * Translation layer from more flexible language to a more simple language.
# * Collection of utility scripts rather than a big system with a lot of interactions.
# tb_features.py
# * Implement the different combinators for features.
# Other potential aspects to consider
# * Model serving.
# * Email notifications when job or process terminates.
# * Tools to work with a server in a more interactive way.
# Design principle: loosely decoupled models.
# general implementation:
# * Python 3 compatibility
# * Additional tests for functionality.
# add more plot objects that are easy to use for different configurations.
# Going about making this more useful to you
# * Understand the format to run the code.
# * Understand the creation of examples.
# * Understand running on the server.
# * Adapt the examples to run on the server to suit your needs.
# * Simple interfaces for models.
# * Tools for preprocessing and training.
# * Add more system information (GPU that was ran on).
# * Easy data preprocessing.
# * Loading data
# * Working with models
# * Evaluation
# * Visualization of model predictions.
# * Common error checking operations.
# * Operate directly on files.
# * Easy to use tensorboard for simple things.
# * Get all files matching a pattern in a directory.
# * Simple scripts for working with images and videos.
# * Get to the point
# * Improve the loading of experiments and the manipulation of experiments.
# * Creating different hyperparameter configurations.
# * Add common step size strategies.
# * Add more common training logic like step size reduction and what not.
# * Make it trivial to run on a remote machine (these configurations should be easy to set up).
# * Improve plotting functionality.
# * Add functionality for darch with some common models.
# * Make sure that it is easy to process the resulting logs.
# * Inspect the variations among different dictionaries.
# * Command line based tools for working directly from the command line (perhaps some batch file). An example is tb_interact.py run --alias sync_folder_to_matrix (stuff like that).
# * Get available resources from the command line.
# * Add some simple hyperparameter tuning.
# * Add some simple graph visualization.
# * Working easily on remote paths.
# * Download all files of a given type from a webpage.
# * Support for JSON formatting?
# * Working easily with ordered dicts.
# * Profiling and debugging hooks.
# * Working with numpy for the computational interface of the jobs.
# * Combining a few of these files.
# * Better tools for managing directories.
# * Write down typical use cases where it would be appropriate to extend the functionality.
# * Make it really easy to run a command like it was on the server.
# * Ability to easily extend (in a sane way) the models that are in there.
# * Better tools for featurization.
# * Support for nested experiments.
# * Exploration of CSV files can be done through pandas.
# * Add some support to easily work with an hyperparameter optimization toolbox.
# * Functionality to inspect the mistakes made by a model.
# * Run directly from a config file.
# * Add creation of Tikz figures in python.
# * Logging is mostly done through JSON.
# * Adding your own interaction commands.
# * Print available commands in interact.
# * Run on a SLURM managed clustered with all the dependencies.
# * Plot quantiles easily.
# * Map a file line by line somehow (e.g., by calling some function on it).
# * Scripts for managing environments and configurations. These can be done in Python, e.g., setting the appropriate environment variables.
# * Simple processing of output from bash commands. Going seamlessly between bash commands and Python processing. Should be able to pipe to a Python command.
# * Functionality to help write reentrant code.
# * Run configurations to reduce the amount of repetition that is needed when running things.
# * Running any command of the toolbox via tb_interact. Useful for file manipulation.
# * Fix the randomness for Tensorflow and Pytorch and potentially other code.
# * Add test for the currently implemented functionality.
# * Easy to use functionality to easily generate processing folders.
# * For the interact, I also want to generate code that is bash instead of Python. Does this work for function calls.
# * Dynamic definition of new function calls. Potentially by combining with some simple iteration.
# * Tools for data collection (this just means constructing a JSON incrementally, I suppose), potentially with some more processing information.
# * More standard loggers to make sure that it can work easily with different models.
# * Some common tools for crawling and such. Download all pdfs from a webpage.
# * Create a tb_web_crawling.py for downloading stuff from the web; this is similar to going over folder in some sense.
# * Very simple to create a simple dataset by crawling data from the web.
# * Packaging data in some of these simple formats.
# * Common readers and writers for some simple data.
# * Model serving based on the type of the data.
# * Keeping running configs in a JSON is nice. fast, noop, default.
# * Easy processing of text log files, either via matching. Something that would be interesting is to combine mapping over files with something better. A lot of it is just iteration over files.
# * How to plot sequences directly from the command line, and how can this be done.
# * Ability to very easily interface with the C code. What are examples of C functionality that would be better ran in C.
# * APIs for training and serving.
# * Easy download of data and what not.
# * Including the toolbox as a submodule, or copying the needed files.
# * Install a specific version locally.
# * Easy setup of the environment to run on certain types of data.
# * Clear a latex file from comments.
# * Nice interfaces for data.
# * Define some general formats to deal with data.
# * Very simple way of defining simple trainers and hyperparameters.
# * Managing dependencies.
# * Addressing some of the Pytorch pitfalls
# * Easy masking and masking working for different functionality of the model.
# * Easy definition of a conv net and LSTM and some training code.
# * Functionality to make the server as a seamless extension of the local machine.
# * Easy logging emails with information about how the results are going. This can be useful to deal with the model.
# * Use the inspect module for some code manipulation.
# * Some automatic experiment generation for running certain types of experiments automatically.
# * Create a simple repository of some Pytorch and Tensorflow models.
# * Simple language independent training schedules.
# * Tools for model inspection.
# * Smarter tools for running configuration folders and to process their results.
# * Creation of composite tools.
# * Easy to create videos and simple animations with images.
# * Add better naming conventions for running different experiments.
# * Treating the experiment folder nicely.
# * Registering commands locally should help run things very easily, like a sequence
# of commands to run.
# * Working with tree dictionaries.
# * Conditional logging information.
# * Add functionality to run periodically.
# * Have a config manager to write experiments easily.
# * Improve the run on a SLURM managed cluster such that it is easy to adapt to a new one.
# * Pretrained models.
# * Tools for helping generating all the results of a paper based on a single script.
# * Easy to stress test a model and make sure that it works.
# * Dataset manager that allows to download the data in disk.
# * Write a mini-scheduler for non-managed node servers.
# * List files in a remote folder.
# * Easy to apply regular expressions and substitutions.
# * Send a downloaded version of the experiments upon completion.
# * Design some simple instructions/procedure to get things set up in a server.
# * Design some simple procedure to get a Docker container running with the experiments.
# * Setting up ssh id without a server. (create a new key, )
# * Download files of a certain type from a webpage.
# * Add option to run command locally: run_on_local
# * Add abort if it exists to the file checks.
# * Add a function to draw the dependencies of a project, assuming that there is a root from which files are called.
# * Add functionality to see how much time has been waiting on the queue.
# * Functionality to know my jobs in the queue.
# * Simplify the run on lithium node. it still has too many options for most cases.
# * Adding a function to extract from the toolbox is convenient. Check how to do this. This is convenient when we don't want to depend on the whole toolbox..
# * Add functionality to make it easy to work with the assyncronous computation.
# * Add scripts to periodically clean the folder. It would be nice to add one of these scripts in Python
# * Make sure that I can specify the axis of the plotter to be easier to use (like log vs linear).
# * Add some functionality to make sure that I can easily substitute all occurrences of strings in a code.
# * Add functionality to take the top elements of a vector
# it should be easy to take a folder and run it on the cluster with some
# configuration, then it should be easy to get the results back, maybe waiting
# or not for it to finish.
# what is a good description of a featurizer. I think that the definition of
# the data is relatively stable, but featurizers are malleable, and depend
# on design choices. the rest is mostly independent from design choices.
# binary search like featurization. think about featurization for
# directory like structures.
# this is binarization. check how they do it in scikit-learn.
# download certain files periodically or when they terminates. this allows to
# give insight into what is happening locally.
# there is syncing functionality
# make it easy to use the rsync command.
# managing jobs after they are running in the server.
# TODO: write a file with information about the system that did the run.
# gathering information about the system is important
# TODO: and perhaps kill some of the jobs, or have some other file
# that say ongoing. define what is reasonable to keep there.
# why would you change an experiment configuration. only if it was due to
# bug. what if the code changes, well, you have to change the other experiments
# according to make sure that you can still run the exact same code, or maybe
# not.
# TODO: perhaps it is possible to run the model in such a way that makes
# it easy to wait for the end of the process, and then just gets the results.
# for example, it puts it on the server, sends the command, and waits for
# it to terminate. gets the data every time.
# TODO: the question is which are the cpus that are free.
# NOTE: it is a good idea to think about one ssh call as being one remote
# function call.
# ideally, you just want to run things once.
# also, perhaps, I would like to just point at the folder and have it work.
# TODO: develop functions to look at the most recent experiments, or the files
# that were change most recently.
# the commands are only pushed to the server for execution.
# easy to do subparsers and stuff, as we can always do something to
# put the subparsers in the same state are the others.
# can use these effectively.
# TODO: add something to run periodically. for example, to query the server
# or to sync folders. this would be useful.
# TODO: it is possible to have something that sync folders
# between two remote hosts, but it would require using the
# current computer for that.
# add error checking
# TODO: check output or make sure that things are working the correct way.
# it may be preferable to do things through ssh sometimes, rather than
# use some of the subprocess with ssh.
# NOTE: it is important to be on the lookout for some problems with the
# current form of the model. it may happen that things are not properly
# between brackets. this part is important
# there is questions about being interactive querying what there is still
# to do there.
# there is only part of the model that it is no up. this is better.
# there is more stuff that can go into the toolbox, but for now, I think that
# this is sufficient to do what I want to do.
# add stuff for error analysis.
# TODO: functionality for periodically running some function.
# TODO: more functionality to deal with the experiment folders.
# for reading and writing CSV files, it is interesting to consider the existing
# CSV functionality in python
# look at interfacing nicely with pandas for some dataframe preprocessing.
# NOTE: neat tools for packing and unpacking are needed. this is necessary
# to handle this information easily.
# TODO: mapping the experiment folder can perhaps be done differently as this is
# not very interesting.
# dealing with multiple dictionaries without merging them.
# going over something and creating a list out of it through function
# calls, I think that is the best way of going.
# NOTE: a list is like a nested dictionary with indices, so it
# should work the same way.
# NOTE: for flatten and stuff like that, I can add some extra parts to the model
# that should work nicely, for example, whether it is a list of lists
# or not. that is nicer.
# there are also iterators, can be done directly. this is valid, like
# [][][][][]; returns a tuple of that form. (s, s, ...)
# this should work nicely.
# question about iterator and map,
# I think that based on the iterator, I can do the map, but it is probably
# a bit inefficient. I think that
# NOTE: some of these recursive functions can be done with a recursive map.
# most support for dictionaries and list and nested mixtures of both.
# although, I think that dictionaries are more useful.
# TODO: make it easier to transfer a whole experiment folder to the server and
# execute it right way.
# develop tools for model inspection.
# TODO: some easy interface to regular expressions.
# TODO: stuff to inspect the examples and look at the ones that have the most
# mistakes. this easily done by providing a function
# there is stuff that needs interfacing with running experiments. that is the
# main use of this part of this toolbox.
# think about returning the node and the job id, such that I can kill those
# jobs easily in case of a mistake.
# TODO: add stuff for coupled iteration. this is hard to do currently.
# think about the structure code.
# TODO: work on featurizers. this one should be simple to put together.
# NOTE: it is a question of looking at the number of mistakes of a
# model and see how they are split between types of examples.
# or generate confusion matrices easily, perhaps with bold stuff for the largest
# off-diagonal entries.
# Working with the configs, I think that that is interesting.
# the folders do not matter so much, but I think that it is possible
# do a mirror of a list [i:]
# TODO: add ML models unit tests.
# like running for longer should improve performance.
# performance should be within some part of the other.
# do it in terms of the problems.
# <IMPORTANT> TODO: logging of different quantities for debugging.
# greatly reduce the problem.
# copy the repository somewhere, for example, the debug experiments.
# TODO: these can be improved, this is also information that can be added
# to the dictionary without much effort.
# TODO: function to get a minimal description of the machine in which
# the current model is running on.
# loss debugging. they should go to zero.
# TODO: dumb data for debugging. this is important to check that the model is working correctly.
# question about the debugging. should be a sequence of
# objects with some descriptive string. the object should probably also
# generate a string to print to a file.
# this makes sense and I think that it is possible.
# TODO: for example, if I'm unsure about the correctness of some variable
# it would be nice to register its computation. how to do that.
# I would need to litter the code with it.
# TODO: passing a dictionary around is a good way of registering information
# that you care about. this is not done very explicitly. using just a dictionary
# is not a good way. perhaps reflection about the calling function would be
# a good thing, and some ordering on what elements were called and why.
# TODO: add gradient checking functionality.
# TODO: add some easy way of adding tests to the experiment folders. like
# something as to be true for all experiments.
# also something like, experiments satisfying some property, should
# also satisfy some other property.
# NOTE: bridges is SLURM managed. I assume it is only slightly different.
# do something to easily register conditions to test that the experiments
# should satisfy.
# add a function to say which one should be the empty cell placeholder in the
# table.
# TODO: stuff for error analysis. what are the main mistakes that the model
# is doing. there should exist a simple way of filtering examples by the number
# of mistakes.
# curious how debugging of machine learning systems work. based on performance.
# differential testing.
# have default values for the hyperparameters that you are looking at.
# for example, for step sizes, there should exist a default search range.\
# TODO: do the featurizer and add a few common featurizers for cross product
# and bucketing and stuff.
# think about how to get the featurizer, that can be done either through
# it's creation or it is assumed that the required information is passed as
# argument.
# this is a simple map followed by a sort or simply sorting by the number of
# mistakes or something like that.
# perhaps something more explicit about error analysis.
# generation of synthetic data.
# TODO: think about the implementation of some stateful elements.
# I think that the implementation of beam search in our framework is something
# worthy.
# simple binarization,
# TODO: check utils to train models online. there may exist stuff that
# can be easily used.
# TODO: interface with Pandas to get feature types of something like that
# I think that nonetheless, there is still information that needs to
# be kept.
# tests to make the model fail very rapidly if there are bugs.
# functions to analyze those results very rapidly.
# TODO: stuff for checkpointing, whatever that means.
# add functionality to send email once stops or finishes.
# this helps keep track of stuff.
# TODO: some easy interfacing with Pandas would be nice.
# for example to create some dictionaries and stuff like that.
# very easy to work on some of these problems, for example,
# by having some patterns that we can use. this is interesting in terms of
# model composition, what are typical model composition strategies?
# for example, for models with embeddings.
# TODO: functionality to work with ordered products.
# NOTE: perhaps it is possible to keep scripts out of the main library folder,
# by making a scripts folder. check if this is interesting or not.
# still, the entry point for running this code would be the main folder.
# of course, this can be changed, but perhaps it does not matter too much.
# NOTE: what about stateful feature extraction. that seems a bit more
# tricky.
# TODO: online changes to configurations of the experiment.
# this would require loading all the files, and substituting them by some
# alteration. this is kind of like a map over the experiments folder.
# it does not have to return anything, but it can really, I think.
# returning the prefix to the folder is the right way of doing things.
# make it easy to do the overfit tests.
# it is a matter of passing a lot of fit functions. this can be
# done online or by sampling a bunch of models from some cross product.
# NOTE: some of these aspects can be better done using the structure
# information that we have introduced before.
# TODO: add functionality to make it easy to load models and look at it
# an analyze them.
# TODO: error inspection is something that is important to understand
# some subset of the indices can be ortho while other can be prod.
# TODO: in the config generation, it needs to be done independently for
# each of the models. for example, the same variables may be there
# but the way they are group is different.
# TODO: for the copy update, it is not a matter of just copying
# I can also change some grouping around. for example, by grouping
# some previously ungrouped variables.
# it is possible by rearranging the groups.
# the new grouping overrides the previous arrangement.
# composite parameters that get a list of numbers are interesting, but have
# not been used much yet. it is interesting to see
# how they are managed by the model.
# TODO: add the change learning rate to the library for Pytorch.
# TODO: I have to make sure that the patience counters work the way I expect
# them to when they are reset, like what is the initial value in that case.
# for example, this makes sense in the case where what is the initial
# number that we have to improve upon.
# if None is passed, it has to improve from the best possible.
# for the schedules, it is not just the prev value, it is if it improves on the
# the prev value or not.
# TODO: functionality to rearrange dictionaries, for example, by reshuffling things
# this is actually quite tricky.
# TODO: have an easy way of doing a sequence of transformations to some
# objects. this is actually quite simple. can just just
# TODO: do some form of applying a function over some sequence,
# while having some of the arguments fixed to some values.
# the function needs to have all the other arguments fixed.
# TODO: also make it simple to apply a sequence of transformations
# to the elements of a string.
# TODO: dict updates that returns the same dictionary, such that they
# can be sequenced easily.
# easy to run things conditionally, like only runs a certain function
# if some condition is true, otherwise returns the object unchanged.
# TODO: a trailing call is going to be difficult, as it is not an object in
# general, nesting things, makes it less clear. otherwise, it can
# pass as a sequence of things to execute. looks better.
# conditions.
# NOTE: a bunch of featurizers that take an object and compute something based
# on that object.
# sequence of transformations.
# TODO: code to run a function whenever some condition is verified.
# TODO: code to handle the rates and to change the schedules whenever some
# condition is verified.
# TODO: stuff with glob looks nice.
# TODO: an interesting thing is to have an LSTM compute an embedding for the
# unknown words. I think that this is a good compromise between
# efficiency and generalization.
# NOTE: some auxiliary functions that allows to easily create a set of experiments
# there are also questions.
# each config may have a description attached.
# NOTE: some of the stuff from the plots can come out of current version of
# of the plots.
# the run script for a group of experiments is going to be slighly different
# what can be done there. it would be inconvenient for large numbers to send
# it one by one. I guess it it possible to easily do it, so not too much of a
# problem.
# TODO: ortho increments to a current model that may yield improvements.
# or just better tools to decide on what model to check.
# I think that it is going to be important to manage the standard ways of
# finding good hyperparameters.
# TODO: add function to check everybody's usage on matrix. same thing for
# lithium and matrix.
# TODO: functions to inspect the mistakes made by the model.
# NOTE: some of these consistency checks can be done automatically.
# TODO: something to randomize the training data, and to keep some set of sentences
# aligned.
# TODO: some tests like checking agreement of dimensions.
# or for paired iteration: that is pretty much just izip or something like that.
# TODO: another test that may be worth validating is that the model,
# may have some property that must be satisfied between pairs of the models.
# using stuff in terms of dictionaries is nice.
# TODO: differential testing, given the same prediction files, are there mistakes
# that one model makes that the other does not.
# having a function to check this is interesting.
# stuff to go back and train on all data. check this.
### some useful assert checks.
# NOTE: this is recursive, and should be applied only to sequences
# of examples.
# TOOD: something that can be computed per length.
### TODO: another important thing is managing the experiments.
# this means that
# TODO: perhaps important to keep a few torch models
# and stuff.
# TODO: the Tensorflow models can be kept in a different file.
# TODO: add code to deal with the exploration of the results.
# TODO: also some tools for manipulation with LSTMs.
# TODO: stuff to build ensembles easily.
# TODO: add stuff for initial step size tuning.
# TODO: add functionality to run DeepArchitect on the matrix server.
# TODO: add function doing cross validation, that is then ran on the
# full dataset.
# TODO: batchification
# TODO: stuff for cross validation . it has to be more general than the
# stuff that was done in scikit-learn.
# are there things that are common to all models and all experiments.
# handling multiple datasets is interesting. what can be done there?
# how to manage main files and options more effectively.
# TODO: it is interesting to think about how can the featurizers be applied to
# a sequence context. that is more tricky. think about CRFs and stuff like that.
# TODO: think about an easy map of neural network code to C code, or even to
# more efficient python code, but that is tricky.
# think about when does something become so slow that it turns impractical?
# TODO: look into how to use multi-gpu code.
# TODO: tools for masking and batching for sequence models and stuff.
# these are interesting.
# check mongo db or something like that, and see if it is worth it.
# how would beam search look on Tensorflow, it would require executing with the
# current parameters, and the graph would have to be fixed.
# possible to add words in some cases with small descriptions to the experiments.
# by default, the description can be the config name.
# the only thing that how approach changes is the way the model is trained.
# beam search only changes model training.
# is it possible to use the logger to save information to generate results or
# graphs.
# in Tensorflow and torch, models essentially communicate through
# variables that they pass around.
# for trading of computation and safety of predictions.
# check that this is in fact possible. multi-pass predictions.
# TODO: an interesting interface for many things with state is the
# step; and query functions that overall seem to capture what we care about.
# TODO: encode, decode, step,
# I think that this is actually a good way of putting this interface.
# TODO: have something very standard to sweep learning rates.
# what can be done here?
# NOTE: it is not desirable for the rate counter to always return a step size,
# because the optimizers have state which can be thrown away in the case of a
# TODO: stuff to deal with unknown tokens.
# TODO: sort of the best performance of the model is when it works with
# there is all this logic about training that needs to be reused.
# this is some nice analysis there.
# some of the stuff that I mention for building networks.
## TODO: interface for encoder decoder models.
# TODO: stuff that handles LSTM masking and unmasking.
# pack and unpack. functionality. keeping track of the batch dimension.
# use of map and reduce with hidden layers.
# TODO: perhaps the results can be put in the end of the model.
# padding seems to be done to some degree by the other model.
# TODO: the notion of a replay settings. a set of variables that is kept track
# during various places.
# TODO: notion of driving training, and keeping some set of variables
# around that allow me to do that.
# keeping the stuff way is a good way of not cluttering the output.
# batching is quite important, and I need to do something that easily goes
# from sequences to batches.
# there is a problem with the resets.
# once it resets, you have to consider where did it reset to, so you can
# reduce the step size from there.
# basically, resetting gets much harder.
# TODO: basically switch between the format of column and row for sequences
# of dictionaries. this is going to be simple. also, can just provide an
# interface for this.
# TODO: checkpoints with resetting seem like a nice pattern to the model.
# TODO: extend this information about the model to make sure that the model
# is going to look the way
# it is going to be nice to extend the config generator to make sure that
# things work out. for example, different number of arguments, or rather
# just sequence multiple config generators or something like that.
# that is simple to do. it is just a matter of extending a lot of stuff.
# improve debugging messages because this is hard to read.
# TODO: add some plotting scripts for typical things like training and
# validation error. make sure that this is easy to do from the checkpoints and
# stuff.
# TODO: perhaps for the most part, these models could be independent.
# this means that they would not be a big deal. this is going to be interesting.
# also, the notion of copying something or serializing it to disk is relevant.
# managing a lot of hyperparameters that keep growing is troublesome.
# might also be a good idea to use a tuner.
# TODO: clean for file creation and deletion, this may be useful to group
# common operations.
# TODO: registering various clean up operations and stuff like that.
# that can be done at teh
# TODO: tests through config validation.
# TODO: possibility of running multiple times and getting the median.
# TODO: what is a good way of looking at different results of the model.
# it would be interesting to consider the case
# TODO: those runs about rate patiences are the right way of doing things.
# TODO: add easy support for running once and then running much slower.
# TODO: rates make a large difference.
# TODO: add stuff that forces the reloading of all modules.
# TODO: equivalence between training settings.
# TODO: also, for exploration of different models. what is important to
# check layer by layer.
# TODO: visualization of different parts of the model, and statistics. this is
# going to be interesting. like exploring how different parts of the model.
# change with training time.
# NOTE: things that you do once per epoch do not really matter, as they will be
# probably very cheap computationally comparing to the epoch.
# TODO: support for differential programming trained end to end.
# TODO: even to find a good configuration, it should be possible to figure
# out reasonable configurations by hand.
# equivalence tests. this running multiple configs that you know that
# should give the same result.
# TODO: it may be worth to come with some form of prefix code for the experiments
# it may be worth to sort them according to the most recent.
# TODO: some stuff to do an ensemble. it should be simple. just average the
# predictions of the top models, or do some form of weighting.
# it should be trivial to do, perhaps stack a few of these images, or tie
# the weights completely. maybe not necessary.
# TODO: processing the data is going to be interesting.
# TODO: stuff for easily doing ensembles of models.
# if just based on full predictions, that is fine, but if transitioning
# requires running the model, it gets a bit more complicated. multi-gpu
# options are possible, and doable. it is a matter of model serving.
# TODO: throw away 5 percent of the data where you make the most mistakes and
# retrain. perhaps these are training mistakes.
# transpose a nested dict.
# TODO: several step functions to featurize, and perhaps visualize the
# results of that model.
# TODO: totally add stuff for cross validation.
# use the same train and test splits.
# TODO: perhaps base it around iterables.
# TODO: do train_dev split in a reasonable way.
# perhaps with indices. how did I do it in CONLL-2000 integrate that.
# TODO: for the seed add support for other things that may have independent
# seeds.
# TODO: can add hyperparameters for the data loading too. it
# it just get more complicated.
# TODO: look at writing reentrant code based on checkpoint.json
# TODO: allow aliasing in some configurations. what is the right way
# of going about this. I can be done in the experiments preprocessing part.
# NOTE: the aliasing idea for configs is quite nice.
# NOTE: better logging and accumulation of information.
# I think that this can be done through registering, and passing
# information that uses that registered function to do something.
# it is quite clean like this.
# TODO: stuff to maintain aliasing, or to run alias in slightly different
# configurations; e.g., sharing some parameters, but having the other ones
# fixed to some relevant specification.
# for managing experiments and comparing.
# to develop something for experiment management, it it is convenient to have
# a way of interacting with them online. this would make it.
# add information for each time a logging event of a given type is called.
# also add the option to do things to the terminal.
# it would be convenient to make sure that I can debug a model in such a way
# that it makes it easy to look at differential parts of the score.
# what to print in the terminal. there is also the question about suppression,
# in the beginning,. the probes that we can use make sense.
# it would be nice to have a way of deactivating the logging somehow.
# TODO: to get configs that have variable number of arguments, it has to be
# possible to specify what are the options that are defining the arguments that we care about.
# only if these are set, will those arguments be available.
# the fact that the configs are variable will mean that I will have to pass a
# dictionary to the create_experiment function.
# TODO: it should be possible to call a function with a dictionary with
# a superset of the arguments of a function.
# if that function has defaults, use those if those elements are not in the
# dictionary.
# this is mostly for convenience, for functions that have a lot of
# arguments. it may be important to structure and unstructure a dictionary
# to retrieve these types of arguments, because it gets really messy.
# namespace for arguments.
# NOTE: certain things should be more compositional.
# TODO: perhaps add the is sparse information.
# there is information about the unknown tokens and stuff like that?
# there are things that can be done through indexing.
# can featurize a set of objects, perhaps.
# do it directly to each of the xs passed.
# the construction of the dictionary can be done incrementally.
# this should handle integer featurization, string featurization, float
# featurization
# subset featurizers, and simple ways of featurizing models.
# features from side information.
# this is kind of a bipartite graph.
# each field may have different featurizers
# one field may have multiple featurizers
# one featurizers may be used in multiple fields
# one featurizer may be used across fields.
# featurizers may have a set of fields, and should be able to handle these
# easily.
# features from history.
# features from different types of data. it should be easy to integrate.
# easy to featurize sets of elements,
# handling gazeteers and stuff like that.
# it is essentially a dictionary.
# there is stuff that I can do through
# I can also register functions that take something and compute something.
# and the feature is that. work around simple feature types.
# come up with some reasonable interface for featurizers.
# TODO: have a few operations defined on featurizers.
# NOTE: that there may exist different featurizers.
# NOTE: I can have some form of function that guesses the types of elements.
# NOTE: for each CSV field, I can register multiple features.
# NOTE that this is mainly for a row. what about for things with state.
# compressing and decompressing folders.
# TODO: add stuff to do animations easily.
# TODO: add plotting functionality to generate grid plots easily.
# what is the difference between axis and figures. plot, axis, figures
# NOTE: for example, you can keep the labels and do something with the
# the rest of the model.
# you can do a lot of thing.
# another type of graph that is common.
# which is.
# TODO: log plots vs non log plots. more properties to change.
# TODO: updatable figure.
# # TODO: also have some way of adding
# subplots with animation. that would be nice. multiple animations side by side.
# rather than classes, it may be worth
# NOTE: this is going to be done in the head node of the servers for now.
# NOTE: may return information about the different files.
# may do something with the verbose setting.
# TODO: this can be more sophisticated to make sure that I can run this
# by just getting a subset of the files that are in the source directory.
# there is also questions about how does this interact with the other file
# management tools.
# NOTE: this might be useful but for the thing that I have in mind, these
# are too many options.
# NOTE: this is not being used yet.
# there is the override and transfer everything, and remove everything.
# NOTE: the use case is mostly to transfer stuff that exists somewhere
# from stuff that does not exist.
# there is compression stuff and other stuff.
# NOTE: the stuff above is useful, but it is a little bit too much.
# only transfer newer.
# delete on destination and stuff like that. I think that these are too many
# options.
# remote deletes are probably reasonable.
# imagine that keeps stuff that should not been there.
# can also, remove the folder and start from scratch
# this is kind of tricky to get right. for now, let us just assume that
| 1.851563 | 2 |
backend/notifiers/services/discord/discord.py | hibare/Moni | 1 | 12758650 | <gh_stars>1-10
"""Discord notification service"""
import logging
import json
from typing import List
from django.conf import settings
from moni.utils.requests_proxy import requests_post
from notifiers.services import NotifierService
logger = logging.getLogger(__name__)
class Discord(NotifierService):
"""Discord notifiers"""
def __init__(self) -> None:
self.payload = json.dumps({
"content": "Moni: Test notification",
"embeds": None
}).encode("utf-8")
self.HEADERS = {
"Content-type": "application/json"
}
self.SERVICE_DOWN_TEMPLATE = settings.BASE_DIR / \
"notifiers/services/discord/template_service_down.json"
self.SERVICE_UP_TEMPLATE = settings.BASE_DIR / \
"notifiers/services/discord/template_service_up.json"
def prep_payload(self, title: str, health_check_url: str, success: bool, expected_status: List, received_status: int, error: str = None) -> None:
TEMPLATE = self.SERVICE_UP_TEMPLATE if success else self.SERVICE_DOWN_TEMPLATE
with open(TEMPLATE) as ft:
template_data = ft.read()
template_data = template_data % (
title, health_check_url, expected_status, received_status, error)
self.payload = template_data.encode("utf-8")
def send(self, webhook: str) -> bool:
try:
response = requests_post(webhook, self.payload, self.HEADERS)
logger.info("Response from Discord, status_code=%s, response=%s",
response.status, response.data)
if response.status == 204:
return True, response.status, None
return False, response.status, None
except Exception as err:
logger.exception("Discord notification exception")
return False, None, repr(err)
| 2.3125 | 2 |