content stringlengths 5 1.05M |
|---|
from winrm.protocol import Protocol
import sys
'''
Author: Mike Felch (c) 2020, @ustayready
-
Copyright 2020 Mike Felch
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
def main(computer, username, password, command):
p = Protocol(
endpoint='https://{}:5986/wsman'.format(computer),
transport='ntlm',
username=username,
password=password,
server_cert_validation='ignore'
)
shell_id = p.open_shell()
#command_id = p.run_command(shell_id, 'query', ['user'])
command_id = p.run_command(shell_id,command, [])
std_out, std_err, status_code = p.get_command_output(shell_id, command_id)
p.cleanup_command(shell_id, command_id)
p.close_shell(shell_id)
if __name__ == '__main__':
computer = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
command = sys.argv[4]
main(computer, username, password, command) |
# https://leetcode.com/problems/linked-list-cycle/description/
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
"""
传统思路,循环一遍,每次都做一个标记
"""
if not head:
return False
visited = set()
p = head
while p.next != None:
if id(p) in visited:
return True
visited.add(id(p))
p = p.next
return False
def hasCycle_v0_5(self, head: ListNode) -> bool:
"""
传统思路,但是指针速度翻倍,可以略微提高效率
"""
if not head:
return False
visited = set()
p = head
while p.next != None and p.next.next != None:
if id(p) in visited:
return True
visited.add(id(p))
p = p.next.next
return False
def hasCycle_v1(self, head: ListNode) -> bool:
"""
同上,但是借助了 Python 的特殊操作,对其他语言不太适用
"""
p = head
while p != None:
if not hasattr(p, 'check'):
p.check = True
elif p.check:
return True
p = p.next
return False
def hasCycle_v2(self, head: ListNode) -> bool:
"""
快慢指针
"""
p1 = head
p2 = head
while p1 != None and p2 != None and p2.next != None:
p1 = p1.next
p2 = p2.next.next
if p1 == p2:
return True
return False
|
from flask import Flask, request, jsonify
from flask_cors import CORS
from os.path import dirname, realpath
from os import listdir
from analyze_documents import build_lda_model
app = Flask(__name__)
CORS(app)
def json_response(status_code, data):
res = jsonify(data)
res.status_code = status_code
return res
@app.route('/get_CIKs', methods=['GET'])
def available_CIKs():
path = dirname(realpath(__file__)) + "/data/14d9"
CIKs = listdir(path)
return json_response(200, CIKs)
@app.route('/get_topics', methods=['GET'])
def get_topics():
CIKs = request.args.getlist('CIKs')
num_topics = request.args.get('num_topics')
ngram_num = request.args.get('ngram_num')
topic_data = build_lda_model(CIKs, int(num_topics), int(ngram_num))
return json_response(200, topic_data)
@app.route('/')
def index():
return "<h1>Topic Modeling API</h1>"
if __name__ == '__main__':
app.run(threaded=True) |
from urllib.parse import urljoin
from scrapy import Request
from product_spider.items import RawData
from product_spider.utils.maketrans import formula_trans
from product_spider.utils.spider_mixin import BaseSpider
class VivanSpider(BaseSpider):
name = "vivan"
allowd_domains = ["vivanls.com/"]
start_urls = ["https://vivanls.com/products/all/all/default"]
base_url = "https://vivanls.com/"
def parse(self, response):
rel_urls = response.xpath('//h5/a/@href').getall()
for rel_url in rel_urls:
yield Request(url=urljoin(self.base_url, rel_url), callback=self.parse_detail)
next_page = response.xpath('//li[@aria-current]/following-sibling::li/a/@href').get()
if next_page:
yield Request(next_page, callback=self.parse)
def parse_detail(self, response):
tmp = '//h4[contains(text(), {!r})]/following-sibling::p/text()'
rel_img = response.xpath('//div[contains(@class, "product-detail-image")]/figure/img/@src').get()
sym = response.xpath('//ul[contains(@class, "synmlist")]/li/text()').getall()
sym = filter(bool, map(str.strip, sym))
d = {
'brand': 'vivan',
'cat_no': response.xpath(tmp.format('Catalogue No.:')).get(),
'en_name': response.xpath('//div[@class="product-detail"]//h2/text()').get(),
'cas': response.xpath(tmp.format('CAS No. :')).get(),
'mf': formula_trans(response.xpath(tmp.format('Mol. Formula :')).get()),
'mw': response.xpath(tmp.format('Mol. Weight :')).get(),
'img_url': rel_img and urljoin(self.base_url, rel_img),
'info1': ';'.join(sym),
'prd_url': response.url,
}
yield RawData(**d)
|
import numpy as np
from tabulate import tabulate
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summerized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model! Set scores to -1")
return {metric: -1 for metric in metrics}
# the standard metrics
results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222- L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
|
#! /usr/bin/env python
import rospy
from multimodal_writer.msg import HandInfoList, HandInfo, FingerInfo
from visualization_msgs.msg import Marker, MarkerArray
import tf
from geometry_msgs.msg import Pose, Vector3, Quaternion, Point
from tf import transformations as transf
from std_msgs.msg import Header, ColorRGBA
br = tf.TransformBroadcaster()
rospy.init_node("leap_viz")
marker_pub = rospy.Publisher("hand_markers",MarkerArray,queue_size=10)
TOPIC_HAND_INFO = "hands_topic"
def leap_callback(msg):
marker_array = MarkerArray()
#for each hand in the list
rospy.loginfo("Frame has %d hands!",len(msg.hands))
for i in range(0, len(msg.hands)):
hand = msg.hands[i]
# publish the transform
hand_origin = (-hand.pose.position.z/1000,
-hand.pose.position.x/1000,
hand.pose.position.y/1000)
hand_orientation = (-hand.pose.orientation.x,
-hand.pose.orientation.y,
-hand.pose.orientation.z,
hand.pose.orientation.w)
br.sendTransform(hand_origin, hand_orientation, hand.header.stamp, "leap", hand.header.frame_id)
# create a hand marker
hand_marker = Marker(type = Marker.CUBE, color=ColorRGBA(0.0,1.0,0.0,0.8))
hand_marker.header = hand.header
hand_marker.header.stamp = hand.header.stamp
hand_marker.ns = "leap"
hand_marker.id = hand.id
#hand_marker.action = Marker.ADD
hand_marker.scale.x = 0.1
hand_marker.scale.y = 0.07
hand_marker.scale.z = 0.02
hand_marker.lifetime = rospy.Duration(0.1)
marker_array.markers.append(hand_marker)
# create a marker for the fingers
lines_marker = Marker()
lines_marker.header.frame_id = "leap"
lines_marker.header.stamp = hand.header.stamp
lines_marker.ns = "leap_lines"
lines_marker.id = hand.id
lines_marker.type= Marker.LINE_LIST
lines_marker.action = Marker.ADD
lines_marker.scale.x = 0.02
lines_marker.scale.y = 0.02
lines_marker.scale.z = 0.02
lines_marker.color.r = 0.9
lines_marker.color.g = 0.1
lines_marker.color.b = 0.1
lines_marker.color.a = 0.8
lines_marker.lifetime = rospy.Duration.from_sec(0.1)
for j in range(0,len(hand.fingers)):
# publish a transform for the fingertips
finger = hand.fingers[j]
finger_transform = Pose()
tip_origin = (-finger.tip_position.z/1000,
-finger.tip_position.x/1000,
finger.tip_position.y/1000)
tip_orientation = (-hand.pose.orientation.x,
-hand.pose.orientation.y,
-hand.pose.orientation.z,
hand.pose.orientation.w)
br.sendTransform(tip_origin, tip_orientation, hand.header.stamp, "leap", finger.header.frame_id)
# create the fingertip marker
finger_marker = Marker()
finger_marker.header.frame_id = finger.header.frame_id;
finger_marker.header.stamp = finger.header.stamp
finger_marker.ns = hand.header.frame_id
finger_marker.id = finger.id
finger_marker.type= Marker.CUBE;
finger_marker.action = Marker.ADD;
finger_marker.scale.x = 0.02
finger_marker.scale.y = 0.02
finger_marker.scale.z = 0.02
finger_marker.color.r = 0.7
finger_marker.color.g = 0.7
finger_marker.color.b = 0.3
finger_marker.color.a = 0.8
finger_marker.lifetime = rospy.Duration.from_sec(0.1)
marker_array.markers.append(finger_marker)
hand_point = Point()
hand_point.x = hand_origin[0]
hand_point.y = hand_origin[1]
hand_point.z = hand_origin[2]
tip_point = Point()
tip_point.x = tip_origin[0]
tip_point.y = tip_origin[1]
tip_point.z = tip_origin[2]
lines_marker.points.append(hand_point)
lines_marker.points.append(tip_point)
marker_array.markers.append(lines_marker)
# publish the markers
if(len(marker_array.markers)>0):
marker_pub.publish(marker_array)
#marker_pub = rospy.Publisher("hand_markers",MarkerArray,queue_size=10)
br = tf.TransformBroadcaster()
leap_sub = rospy.Subscriber("hands_topic", HandInfoList,leap_callback)
rospy.spin()
#if __name__ == '__main__':#
# main()
|
# -*- coding: utf-8 -*-
"""Edit tests for the flow module."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 9844f5d04efa704b740caff527921bade7038828 $'
from pywikibot.flow import Board, Topic, Post
from pywikibot.tools import PY2
from tests.aspects import TestCase
if not PY2:
unicode = str
class TestFlowCreateTopic(TestCase):
"""Test the creation of Flow topics."""
family = 'test'
code = 'test'
user = True
write = True
def test_create_topic(self):
"""Test creation of topic."""
site = self.get_site()
content = 'If you can read this, the Flow code in Pywikibot works!'
board = Board(site, 'Talk:Pywikibot test')
topic = board.new_topic('Pywikibot test', content, 'wikitext')
first_post = topic.replies()[0]
wikitext = first_post.get(format='wikitext')
self.assertIn('wikitext', first_post._content)
self.assertNotIn('html', first_post._content)
self.assertIsInstance(wikitext, unicode)
self.assertEqual(wikitext, content)
class TestFlowReply(TestCase):
"""Test replying to existing posts."""
family = 'test'
code = 'test'
user = True
write = True
def test_reply_to_topic(self):
"""Test replying to "topic" (really the topic's root post)."""
# Setup
content = 'I am a reply to the topic. Replying works!'
topic = Topic(self.site, 'Topic:Sl4ssgh123c3e1bh')
old_replies = topic.replies(force=True)[:]
# Reply
reply_post = topic.reply(content, 'wikitext')
# Test content
wikitext = reply_post.get(format='wikitext')
self.assertIn('wikitext', reply_post._content)
self.assertNotIn('html', reply_post._content)
self.assertIsInstance(wikitext, unicode)
self.assertEqual(wikitext, content)
# Test reply list in topic
new_replies = topic.replies(force=True)
self.assertEqual(len(new_replies), len(old_replies) + 1)
def test_reply_to_topic_root(self):
"""Test replying to the topic's root post directly."""
# Setup
content = "I am a reply to the topic's root post. Replying still works!"
topic = Topic(self.site, 'Topic:Sl4ssgh123c3e1bh')
topic_root = topic.root
old_replies = topic_root.replies(force=True)[:]
# Reply
reply_post = topic_root.reply(content, 'wikitext')
# Test content
wikitext = reply_post.get(format='wikitext')
self.assertIn('wikitext', reply_post._content)
self.assertNotIn('html', reply_post._content)
self.assertIsInstance(wikitext, unicode)
self.assertEqual(wikitext, content)
# Test reply list in topic
new_replies = topic_root.replies(force=True)
self.assertEqual(len(new_replies), len(old_replies) + 1)
def test_reply_to_post(self):
"""Test replying to an ordinary post."""
# Setup
content = 'I am a nested reply to a regular post. Still going strong!'
topic = Topic(self.site, 'Topic:Sl4ssgh123c3e1bh')
root_post = Post(topic, 'smjnql768bl0h0kt')
old_replies = root_post.replies(force=True)[:]
# Reply
reply_post = root_post.reply(content, 'wikitext')
# Test content
wikitext = reply_post.get(format='wikitext')
self.assertIn('wikitext', reply_post._content)
self.assertNotIn('html', reply_post._content)
self.assertIsInstance(wikitext, unicode)
self.assertEqual(wikitext, content)
# Test reply list in topic
new_replies = root_post.replies(force=True)
self.assertEqual(len(new_replies), len(old_replies) + 1)
def test_nested_reply(self):
"""Test replying to a previous reply to a topic."""
# Setup
first_content = 'I am a reply to the topic with my own replies. Great!'
second_content = 'I am a nested reply. This conversation is getting pretty good!'
topic = Topic(self.site, 'Topic:Sl4ssgh123c3e1bh')
topic_root = topic.root
# First reply
old_root_replies = topic_root.replies(force=True)[:]
first_reply_post = topic_root.reply(first_content, 'wikitext')
# Test first reply's content
first_wikitext = first_reply_post.get(format='wikitext')
self.assertIn('wikitext', first_reply_post._content)
self.assertNotIn('html', first_reply_post._content)
self.assertIsInstance(first_wikitext, unicode)
self.assertEqual(first_wikitext, first_content)
# Test reply list in topic
new_root_replies = topic_root.replies(force=True)
self.assertEqual(len(new_root_replies), len(old_root_replies) + 1)
# Nested reply
old_nested_replies = first_reply_post.replies(force=True)[:]
self.assertListEqual(old_nested_replies, [])
second_reply_post = first_reply_post.reply(second_content,
'wikitext')
# Test nested reply's content
second_wikitext = second_reply_post.get(format='wikitext')
self.assertIn('wikitext', second_reply_post._content)
self.assertNotIn('html', second_reply_post._content)
self.assertIsInstance(second_wikitext, unicode)
self.assertEqual(second_wikitext, second_content)
# Test reply list in first reply
# Broken due to current Flow reply structure (T105438)
# new_nested_replies = first_reply_post.replies(force=True)
# self.assertEqual(len(new_nested_replies), len(old_nested_replies) + 1)
# Current test for nested reply list
self.assertListEqual(old_nested_replies, [])
more_root_replies = topic_root.replies(force=True)
self.assertEqual(len(more_root_replies), len(new_root_replies) + 1)
|
"""Combine all winner solutions in previous challenges (AutoCV, AutoCV2,
AutoNLP and AutoSpeech).
"""
import os
import sys
here = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(here, ""))
model_dirs = [
"", # current directory
"Auto_Tabular",
]
for model_dir in model_dirs:
sys.path.append(os.path.join(here, model_dir))
def meta_domain_2_model(domain):
if domain in ["image"]:
sys.path.append(os.path.join(here, "Auto_Image"))
from Auto_Image.model import Model as AutoImageModel
return AutoImageModel
elif domain in ["video"]:
sys.path.append(os.path.join(here, "Auto_Video"))
from Auto_Video.model import Model as AutoVideoModel
return AutoVideoModel
elif domain in ["text"]:
from model_nlp import Model as AutoNlpModel
return AutoNlpModel
elif domain in ["speech"]:
from at_speech.model import Model as AutoSpeechModel
return AutoSpeechModel
else:
from Auto_Tabular.model import Model as TabularModel
return TabularModel
class Model:
"""A model that combine all winner solutions. Using domain inferring and
apply winner solution in the corresponding domain."""
def __init__(self, metadata):
"""
Args:
metadata: an AutoDLMetadata object. Its definition can be found in
AutoDL_ingestion_program/dataset.py
"""
self.done_training = False
self.metadata = metadata
self.domain = infer_domain(metadata)
DomainModel = meta_domain_2_model(self.domain)
self.domain_model = DomainModel(self.metadata)
self.has_exception = False
self.y_pred_last = None
def train(self, dataset, remaining_time_budget=None):
"""Train method of domain-specific model."""
# Convert training dataset to necessary format and
# store as self.domain_dataset_train
self.domain_model.train(dataset, remaining_time_budget)
self.done_training = self.domain_model.done_training
# try:
# self.domain_model.train(dataset, remaining_time_budget)
# self.done_training = self.domain_model.done_training
# except Exception as exp:
# self.has_exception = True
# self.done_training = True
def test(self, dataset, remaining_time_budget=None):
"""Test method of domain-specific model."""
# Convert test dataset to necessary format and
# store as self.domain_dataset_test
# Make predictions
if self.done_training is True or self.has_exception is True:
return self.y_pred_last
Y_pred = self.domain_model.test(dataset, remaining_time_budget=remaining_time_budget)
self.y_pred_last = Y_pred
self.done_training = self.domain_model.done_training
# try:
# Y_pred = self.domain_model.test(dataset, remaining_time_budget=remaining_time_budget)
# self.y_pred_last = Y_pred
# self.done_training = self.domain_model.done_training
# except MemoryError as mem_error:
# self.has_exception = True
# self.done_training = True
# except Exception as exp:
# self.has_exception = True
# self.done_training = True
return self.y_pred_last
def infer_domain(metadata):
"""Infer the domain from the shape of the 4-D tensor.
Args:
metadata: an AutoDLMetadata object.
"""
row_count, col_count = metadata.get_matrix_size(0)
sequence_size = metadata.get_sequence_size()
channel_to_index_map = metadata.get_channel_to_index_map()
domain = None
if sequence_size == 1:
if row_count == 1 or col_count == 1:
domain = "tabular"
else:
domain = "image"
else:
if row_count == 1 and col_count == 1:
if len(channel_to_index_map) > 0:
domain = "text"
else:
domain = "speech"
else:
domain = "video"
return domain
|
from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.2/sensor.xsd"
@dataclass
class Sensor:
"""
The sensor tag describes the type and properties of a sensor.
Parameters
----------
always_on: If true the sensor will always be updated according to
the update rate.
update_rate: The frequency at which the sensor data is generated. If
left unspecified, the sensor will generate data every cycle.
visualize: If true, the sensor is visualized in the GUI
pose: This is the pose of the sensor, relative to the parent link
reference frame.
topic: Name of the topic on which data is published. This is
necessary for visualization
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
camera: These elements are specific to camera sensors.
ray: These elements are specific to the ray (laser) sensor.
contact: These elements are specific to the contact sensor.
rfidtag:
rfid:
name: A unique name for the sensor. This name must not match another
model in the model.
type: The type name of the sensor. By default, gazebo supports types
camera, depth, stereocamera, contact, imu, ir and ray.
"""
class Meta:
name = "sensor"
always_on: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
update_rate: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
visualize: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
topic: str = field(
default="__default",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["Sensor.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
camera: Optional["Sensor.Camera"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ray: Optional["Sensor.Ray"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
contact: Optional["Sensor.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
rfidtag: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
rfid: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Camera:
"""
These elements are specific to camera sensors.
Parameters
----------
horizontal_fov: Horizontal field of view
image: The image size in pixels and format.
clip: The near and far clip planes. Objects closer or farther
than these planes are not rendered.
save: Enable or disable saving of camera frames.
depth_camera: Depth camera parameters
"""
horizontal_fov: float = field(
default=1.047,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
image: Optional["Sensor.Camera.Image"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
clip: Optional["Sensor.Camera.Clip"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
save: Optional["Sensor.Camera.Save"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
depth_camera: Optional["Sensor.Camera.DepthCamera"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Image:
"""
The image size in pixels and format.
Parameters
----------
width: Width in pixels
height: Height in pixels
format:
(L8|R8G8B8|B8G8R8|BAYER_RGGB8|BAYER_BGGR8|BAYER_GBRG8|BAYER_GRBG8)
"""
width: int = field(
default=320,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
height: int = field(
default=240,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
format: str = field(
default="R8G8B8",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Clip:
"""The near and far clip planes.
Objects closer or farther than these planes are not
rendered.
Parameters
----------
near: Near clipping plane
far: Far clipping plane
"""
near: float = field(
default=0.1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
far: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Save:
"""
Enable or disable saving of camera frames.
Parameters
----------
path: The path name which will hold the frame data. If path
name is relative, then directory is relative to current
working directory.
enabled: True = saving enabled
"""
path: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
enabled: Optional[bool] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class DepthCamera:
"""
Depth camera parameters.
Parameters
----------
output: Type of output
"""
output: str = field(
default="depths",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ray:
"""
These elements are specific to the ray (laser) sensor.
Parameters
----------
scan:
range: specifies range properties of each simulated ray
"""
scan: Optional["Sensor.Ray.Scan"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
range: Optional["Sensor.Ray.Range"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Scan:
horizontal: Optional["Sensor.Ray.Scan.Horizontal"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
vertical: Optional["Sensor.Ray.Scan.Vertical"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Horizontal:
"""
Parameters
----------
samples: The number of simulated rays to generate per
complete laser sweep cycle.
resolution: This number is multiplied by samples to
determine the number of range data points returned.
If resolution is less than one, range data is
interpolated. If resolution is greater than one,
range data is averaged.
min_angle:
max_angle: Must be greater or equal to min_angle
"""
samples: int = field(
default=640,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
resolution: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Vertical:
"""
Parameters
----------
samples: The number of simulated rays to generate per
complete laser sweep cycle.
resolution: This number is multiplied by samples to
determine the number of range data points returned.
If resolution is less than one, range data is
interpolated. If resolution is greater than one,
range data is averaged.
min_angle:
max_angle: Must be greater or equal to min_angle
"""
samples: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
resolution: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Range:
"""
specifies range properties of each simulated ray.
Parameters
----------
min: The minimum distance for each ray.
max: The maximum distance for each ray.
resolution: Linear resolution of each ray.
"""
min: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
resolution: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
These elements are specific to the contact sensor.
Parameters
----------
collision: name of the collision element within a link that acts
as the contact sensor.
topic: Topic on which contact data is published.
"""
collision: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
topic: str = field(
default="__default_topic__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
|
"""Classes for representing properties of STIX Objects and Cyber Observables."""
import base64
import binascii
import copy
import inspect
import re
import uuid
from .base import _STIXBase
from .exceptions import (
CustomContentError, DictionaryKeyError, MissingPropertiesError,
MutuallyExclusivePropertiesError, STIXError,
)
from .parsing import parse, parse_observable
from .registry import STIX2_OBJ_MAPS
from .utils import _get_dict, get_class_hierarchy_names, parse_into_datetime
from .version import DEFAULT_VERSION
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
TYPE_REGEX = re.compile(r'^\-?[a-z0-9]+(-[a-z0-9]+)*\-?$')
TYPE_21_REGEX = re.compile(r'^([a-z][a-z0-9]*)+(-[a-z0-9]+)*\-?$')
ERROR_INVALID_ID = (
"not a valid STIX identifier, must match <object-type>--<UUID>: {}"
)
def _check_uuid(uuid_str, spec_version):
"""
Check whether the given UUID string is valid with respect to the given STIX
spec version. STIX 2.0 requires UUIDv4; 2.1 only requires the RFC 4122
variant.
:param uuid_str: A UUID as a string
:param spec_version: The STIX spec version
:return: True if the UUID is valid, False if not
:raises ValueError: If uuid_str is malformed
"""
uuid_obj = uuid.UUID(uuid_str)
ok = uuid_obj.variant == uuid.RFC_4122
if ok and spec_version == "2.0":
ok = uuid_obj.version == 4
return ok
def _validate_id(id_, spec_version, required_prefix):
"""
Check the STIX identifier for correctness, raise an exception if there are
errors.
:param id_: The STIX identifier
:param spec_version: The STIX specification version to use
:param required_prefix: The required prefix on the identifier, if any.
This function doesn't add a "--" suffix to the prefix, so callers must
add it if it is important. Pass None to skip the prefix check.
:raises ValueError: If there are any errors with the identifier
"""
if required_prefix:
if not id_.startswith(required_prefix):
raise ValueError("must start with '{}'.".format(required_prefix))
try:
if required_prefix:
uuid_part = id_[len(required_prefix):]
else:
idx = id_.index("--")
uuid_part = id_[idx+2:]
result = _check_uuid(uuid_part, spec_version)
except ValueError:
# replace their ValueError with ours
raise ValueError(ERROR_INVALID_ID.format(id_))
if not result:
raise ValueError(ERROR_INVALID_ID.format(id_))
def _validate_type(type_, spec_version):
"""
Check the STIX type name for correctness, raise an exception if there are
errors.
:param type_: The STIX type name
:param spec_version: The STIX specification version to use
:raises ValueError: If there are any errors with the identifier
"""
if spec_version == "2.0":
if not re.match(TYPE_REGEX, type_):
raise ValueError(
"Invalid type name '%s': must only contain the "
"characters a-z (lowercase ASCII), 0-9, and hyphen (-)." %
type_,
)
else: # 2.1+
if not re.match(TYPE_21_REGEX, type_):
raise ValueError(
"Invalid type name '%s': must only contain the "
"characters a-z (lowercase ASCII), 0-9, and hyphen (-) "
"and must begin with an a-z character" % type_,
)
if len(type_) < 3 or len(type_) > 250:
raise ValueError(
"Invalid type name '%s': must be between 3 and 250 characters." % type_,
)
class Property(object):
"""Represent a property of STIX data type.
Subclasses can define the following attributes as keyword arguments to
``__init__()``.
Args:
required (bool): If ``True``, the property must be provided when
creating an object with that property. No default value exists for
these properties. (Default: ``False``)
fixed: This provides a constant default value. Users are free to
provide this value explicity when constructing an object (which
allows you to copy **all** values from an existing object to a new
object), but if the user provides a value other than the ``fixed``
value, it will raise an error. This is semantically equivalent to
defining both:
- a ``clean()`` function that checks if the value matches the fixed
value, and
- a ``default()`` function that returns the fixed value.
Subclasses can also define the following functions:
- ``def clean(self, value) -> any:``
- Return a value that is valid for this property. If ``value`` is not
valid for this property, this will attempt to transform it first. If
``value`` is not valid and no such transformation is possible, it
should raise an exception.
- ``def default(self):``
- provide a default value for this property.
- ``default()`` can return the special value ``NOW`` to use the current
time. This is useful when several timestamps in the same object
need to use the same default value, so calling now() for each
property-- likely several microseconds apart-- does not work.
Subclasses can instead provide a lambda function for ``default`` as a
keyword argument. ``clean`` should not be provided as a lambda since
lambdas cannot raise their own exceptions.
When instantiating Properties, ``required`` and ``default`` should not be
used together. ``default`` implies that the property is required in the
specification so this function will be used to supply a value if none is
provided. ``required`` means that the user must provide this; it is
required in the specification and we can't or don't want to create a
default value.
"""
def _default_clean(self, value):
if value != self._fixed_value:
raise ValueError("must equal '{}'.".format(self._fixed_value))
return value
def __init__(self, required=False, fixed=None, default=None):
self.required = required
if required and default:
raise STIXError(
"Cant't use 'required' and 'default' together. 'required'"
"really means 'the user must provide this.'",
)
if fixed:
self._fixed_value = fixed
self.clean = self._default_clean
self.default = lambda: fixed
if default:
self.default = default
def clean(self, value):
return value
def __call__(self, value=None):
"""Used by ListProperty to handle lists that have been defined with
either a class or an instance.
"""
return value
class ListProperty(Property):
def __init__(self, contained, **kwargs):
"""
``contained`` should be a Property class or instance, or a _STIXBase
subclass.
"""
self.contained = None
if inspect.isclass(contained):
# Property classes are instantiated; _STIXBase subclasses are left
# as-is.
if issubclass(contained, Property):
self.contained = contained()
elif issubclass(contained, _STIXBase):
self.contained = contained
elif isinstance(contained, Property):
self.contained = contained
if not self.contained:
raise TypeError(
"Invalid list element type: {}".format(
str(contained),
),
)
super(ListProperty, self).__init__(**kwargs)
def clean(self, value):
try:
iter(value)
except TypeError:
raise ValueError("must be an iterable.")
if isinstance(value, (_STIXBase, str)):
value = [value]
if isinstance(self.contained, Property):
result = [
self.contained.clean(item)
for item in value
]
else: # self.contained must be a _STIXBase subclass
result = []
for item in value:
if isinstance(item, self.contained):
valid = item
elif isinstance(item, Mapping):
# attempt a mapping-like usage...
valid = self.contained(**item)
else:
raise ValueError(
"Can't create a {} out of {}".format(
self.contained._type, str(item),
),
)
result.append(valid)
# STIX spec forbids empty lists
if len(result) < 1:
raise ValueError("must not be empty.")
return result
class StringProperty(Property):
def __init__(self, **kwargs):
super(StringProperty, self).__init__(**kwargs)
def clean(self, value):
if not isinstance(value, str):
return str(value)
return value
class TypeProperty(Property):
def __init__(self, type, spec_version=DEFAULT_VERSION):
_validate_type(type, spec_version)
self.spec_version = spec_version
super(TypeProperty, self).__init__(fixed=type)
class IDProperty(Property):
def __init__(self, type, spec_version=DEFAULT_VERSION):
self.required_prefix = type + "--"
self.spec_version = spec_version
super(IDProperty, self).__init__()
def clean(self, value):
_validate_id(value, self.spec_version, self.required_prefix)
return value
def default(self):
return self.required_prefix + str(uuid.uuid4())
class IntegerProperty(Property):
def __init__(self, min=None, max=None, **kwargs):
self.min = min
self.max = max
super(IntegerProperty, self).__init__(**kwargs)
def clean(self, value):
try:
value = int(value)
except Exception:
raise ValueError("must be an integer.")
if self.min is not None and value < self.min:
msg = "minimum value is {}. received {}".format(self.min, value)
raise ValueError(msg)
if self.max is not None and value > self.max:
msg = "maximum value is {}. received {}".format(self.max, value)
raise ValueError(msg)
return value
class FloatProperty(Property):
def __init__(self, min=None, max=None, **kwargs):
self.min = min
self.max = max
super(FloatProperty, self).__init__(**kwargs)
def clean(self, value):
try:
value = float(value)
except Exception:
raise ValueError("must be a float.")
if self.min is not None and value < self.min:
msg = "minimum value is {}. received {}".format(self.min, value)
raise ValueError(msg)
if self.max is not None and value > self.max:
msg = "maximum value is {}. received {}".format(self.max, value)
raise ValueError(msg)
return value
class BooleanProperty(Property):
def clean(self, value):
if isinstance(value, bool):
return value
trues = ['true', 't', '1']
falses = ['false', 'f', '0']
try:
if value.lower() in trues:
return True
if value.lower() in falses:
return False
except AttributeError:
if value == 1:
return True
if value == 0:
return False
raise ValueError("must be a boolean value.")
class TimestampProperty(Property):
def __init__(self, precision="any", precision_constraint="exact", **kwargs):
self.precision = precision
self.precision_constraint = precision_constraint
super(TimestampProperty, self).__init__(**kwargs)
def clean(self, value):
return parse_into_datetime(
value, self.precision, self.precision_constraint,
)
class DictionaryProperty(Property):
def __init__(self, spec_version=DEFAULT_VERSION, **kwargs):
self.spec_version = spec_version
super(DictionaryProperty, self).__init__(**kwargs)
def clean(self, value):
try:
dictified = _get_dict(value)
except ValueError:
raise ValueError("The dictionary property must contain a dictionary")
for k in dictified.keys():
if self.spec_version == '2.0':
if len(k) < 3:
raise DictionaryKeyError(k, "shorter than 3 characters")
elif len(k) > 256:
raise DictionaryKeyError(k, "longer than 256 characters")
elif self.spec_version == '2.1':
if len(k) > 250:
raise DictionaryKeyError(k, "longer than 250 characters")
if not re.match(r"^[a-zA-Z0-9_-]+$", k):
msg = (
"contains characters other than lowercase a-z, "
"uppercase A-Z, numerals 0-9, hyphen (-), or "
"underscore (_)"
)
raise DictionaryKeyError(k, msg)
if len(dictified) < 1:
raise ValueError("must not be empty.")
return dictified
HASHES_REGEX = {
"MD5": (r"^[a-fA-F0-9]{32}$", "MD5"),
"MD6": (r"^[a-fA-F0-9]{32}|[a-fA-F0-9]{40}|[a-fA-F0-9]{56}|[a-fA-F0-9]{64}|[a-fA-F0-9]{96}|[a-fA-F0-9]{128}$", "MD6"),
"RIPEMD160": (r"^[a-fA-F0-9]{40}$", "RIPEMD-160"),
"SHA1": (r"^[a-fA-F0-9]{40}$", "SHA-1"),
"SHA224": (r"^[a-fA-F0-9]{56}$", "SHA-224"),
"SHA256": (r"^[a-fA-F0-9]{64}$", "SHA-256"),
"SHA384": (r"^[a-fA-F0-9]{96}$", "SHA-384"),
"SHA512": (r"^[a-fA-F0-9]{128}$", "SHA-512"),
"SHA3224": (r"^[a-fA-F0-9]{56}$", "SHA3-224"),
"SHA3256": (r"^[a-fA-F0-9]{64}$", "SHA3-256"),
"SHA3384": (r"^[a-fA-F0-9]{96}$", "SHA3-384"),
"SHA3512": (r"^[a-fA-F0-9]{128}$", "SHA3-512"),
"SSDEEP": (r"^[a-zA-Z0-9/+:.]{1,128}$", "SSDEEP"),
"WHIRLPOOL": (r"^[a-fA-F0-9]{128}$", "WHIRLPOOL"),
"TLSH": (r"^[a-fA-F0-9]{70}$", "TLSH"),
}
class HashesProperty(DictionaryProperty):
def clean(self, value):
clean_dict = super(HashesProperty, self).clean(value)
for k, v in copy.deepcopy(clean_dict).items():
key = k.upper().replace('-', '')
if key in HASHES_REGEX:
vocab_key = HASHES_REGEX[key][1]
if vocab_key == "SSDEEP" and self.spec_version == "2.0":
vocab_key = vocab_key.lower()
if not re.match(HASHES_REGEX[key][0], v):
raise ValueError("'{0}' is not a valid {1} hash".format(v, vocab_key))
if k != vocab_key:
clean_dict[vocab_key] = clean_dict[k]
del clean_dict[k]
return clean_dict
class BinaryProperty(Property):
def clean(self, value):
try:
base64.b64decode(value)
except (binascii.Error, TypeError):
raise ValueError("must contain a base64 encoded string")
return value
class HexProperty(Property):
def clean(self, value):
if not re.match(r"^([a-fA-F0-9]{2})+$", value):
raise ValueError("must contain an even number of hexadecimal characters")
return value
class ReferenceProperty(Property):
def __init__(self, valid_types=None, invalid_types=None, spec_version=DEFAULT_VERSION, **kwargs):
"""
references sometimes must be to a specific object type
"""
self.spec_version = spec_version
# These checks need to be done prior to the STIX object finishing construction
# and thus we can't use base.py's _check_mutually_exclusive_properties()
# in the typical location of _check_object_constraints() in sdo.py
if valid_types and invalid_types:
raise MutuallyExclusivePropertiesError(self.__class__, ['invalid_types', 'valid_types'])
elif valid_types is None and invalid_types is None:
raise MissingPropertiesError(self.__class__, ['invalid_types', 'valid_types'])
if valid_types and type(valid_types) is not list:
valid_types = [valid_types]
elif invalid_types and type(invalid_types) is not list:
invalid_types = [invalid_types]
self.valid_types = valid_types
self.invalid_types = invalid_types
super(ReferenceProperty, self).__init__(**kwargs)
def clean(self, value):
if isinstance(value, _STIXBase):
value = value.id
value = str(value)
possible_prefix = value[:value.index('--')]
if self.valid_types:
ref_valid_types = enumerate_types(self.valid_types, self.spec_version)
if possible_prefix in ref_valid_types:
required_prefix = possible_prefix
else:
raise ValueError("The type-specifying prefix '%s' for this property is not valid" % (possible_prefix))
elif self.invalid_types:
ref_invalid_types = enumerate_types(self.invalid_types, self.spec_version)
if possible_prefix not in ref_invalid_types:
required_prefix = possible_prefix
else:
raise ValueError("An invalid type-specifying prefix '%s' was specified for this property" % (possible_prefix))
_validate_id(value, self.spec_version, required_prefix)
return value
def enumerate_types(types, spec_version):
"""
`types` is meant to be a list; it may contain specific object types and/or
the any of the words "SCO", "SDO", or "SRO"
Since "SCO", "SDO", and "SRO" are general types that encompass various specific object types,
once each of those words is being processed, that word will be removed from `return_types`,
so as not to mistakenly allow objects to be created of types "SCO", "SDO", or "SRO"
"""
return_types = []
return_types += types
if "SDO" in types:
return_types.remove("SDO")
return_types += STIX2_OBJ_MAPS[spec_version]['objects'].keys()
if "SCO" in types:
return_types.remove("SCO")
return_types += STIX2_OBJ_MAPS[spec_version]['observables'].keys()
if "SRO" in types:
return_types.remove("SRO")
return_types += ['relationship', 'sighting']
return return_types
SELECTOR_REGEX = re.compile(r"^([a-z0-9_-]{3,250}(\.(\[\d+\]|[a-z0-9_-]{1,250}))*|id)$")
class SelectorProperty(Property):
def clean(self, value):
if not SELECTOR_REGEX.match(value):
raise ValueError("must adhere to selector syntax.")
return value
class ObjectReferenceProperty(StringProperty):
def __init__(self, valid_types=None, **kwargs):
if valid_types and type(valid_types) is not list:
valid_types = [valid_types]
self.valid_types = valid_types
super(ObjectReferenceProperty, self).__init__(**kwargs)
class EmbeddedObjectProperty(Property):
def __init__(self, type, **kwargs):
self.type = type
super(EmbeddedObjectProperty, self).__init__(**kwargs)
def clean(self, value):
if type(value) is dict:
value = self.type(**value)
elif not isinstance(value, self.type):
raise ValueError("must be of type {}.".format(self.type.__name__))
return value
class EnumProperty(StringProperty):
def __init__(self, allowed, **kwargs):
if type(allowed) is not list:
allowed = list(allowed)
self.allowed = allowed
super(EnumProperty, self).__init__(**kwargs)
def clean(self, value):
cleaned_value = super(EnumProperty, self).clean(value)
if cleaned_value not in self.allowed:
raise ValueError("value '{}' is not valid for this enumeration.".format(cleaned_value))
return cleaned_value
class PatternProperty(StringProperty):
pass
class ObservableProperty(Property):
"""Property for holding Cyber Observable Objects.
"""
def __init__(self, spec_version=DEFAULT_VERSION, allow_custom=False, *args, **kwargs):
self.allow_custom = allow_custom
self.spec_version = spec_version
super(ObservableProperty, self).__init__(*args, **kwargs)
def clean(self, value):
try:
dictified = _get_dict(value)
# get deep copy since we are going modify the dict and might
# modify the original dict as _get_dict() does not return new
# dict when passed a dict
dictified = copy.deepcopy(dictified)
except ValueError:
raise ValueError("The observable property must contain a dictionary")
if dictified == {}:
raise ValueError("The observable property must contain a non-empty dictionary")
valid_refs = dict((k, v['type']) for (k, v) in dictified.items())
for key, obj in dictified.items():
parsed_obj = parse_observable(
obj,
valid_refs,
allow_custom=self.allow_custom,
version=self.spec_version,
)
dictified[key] = parsed_obj
return dictified
class ExtensionsProperty(DictionaryProperty):
"""Property for representing extensions on Observable objects.
"""
def __init__(self, spec_version=DEFAULT_VERSION, allow_custom=False, enclosing_type=None, required=False):
self.allow_custom = allow_custom
self.enclosing_type = enclosing_type
super(ExtensionsProperty, self).__init__(spec_version=spec_version, required=required)
def clean(self, value):
try:
dictified = _get_dict(value)
# get deep copy since we are going modify the dict and might
# modify the original dict as _get_dict() does not return new
# dict when passed a dict
dictified = copy.deepcopy(dictified)
except ValueError:
raise ValueError("The extensions property must contain a dictionary")
specific_type_map = STIX2_OBJ_MAPS[self.spec_version]['observable-extensions'].get(self.enclosing_type, {})
for key, subvalue in dictified.items():
if key in specific_type_map:
cls = specific_type_map[key]
if type(subvalue) is dict:
if self.allow_custom:
subvalue['allow_custom'] = True
dictified[key] = cls(**subvalue)
else:
dictified[key] = cls(**subvalue)
elif type(subvalue) is cls:
# If already an instance of an _Extension class, assume it's valid
dictified[key] = subvalue
else:
raise ValueError("Cannot determine extension type.")
else:
if self.allow_custom:
dictified[key] = subvalue
else:
raise CustomContentError("Can't parse unknown extension type: {}".format(key))
return dictified
class STIXObjectProperty(Property):
def __init__(self, spec_version=DEFAULT_VERSION, allow_custom=False, *args, **kwargs):
self.allow_custom = allow_custom
self.spec_version = spec_version
super(STIXObjectProperty, self).__init__(*args, **kwargs)
def clean(self, value):
# Any STIX Object (SDO, SRO, or Marking Definition) can be added to
# a bundle with no further checks.
if any(
x in ('_DomainObject', '_RelationshipObject', 'MarkingDefinition')
for x in get_class_hierarchy_names(value)
):
# A simple "is this a spec version 2.1+ object" test. For now,
# limit 2.0 bundles to 2.0 objects. It's not possible yet to
# have validation co-constraints among properties, e.g. have
# validation here depend on the value of another property
# (spec_version). So this is a hack, and not technically spec-
# compliant.
if 'spec_version' in value and self.spec_version == '2.0':
raise ValueError(
"Spec version 2.0 bundles don't yet support "
"containing objects of a different spec "
"version.",
)
return value
try:
dictified = _get_dict(value)
except ValueError:
raise ValueError("This property may only contain a dictionary or object")
if dictified == {}:
raise ValueError("This property may only contain a non-empty dictionary or object")
if 'type' in dictified and dictified['type'] == 'bundle':
raise ValueError("This property may not contain a Bundle object")
if 'spec_version' in dictified and self.spec_version == '2.0':
# See above comment regarding spec_version.
raise ValueError(
"Spec version 2.0 bundles don't yet support "
"containing objects of a different spec version.",
)
parsed_obj = parse(dictified, allow_custom=self.allow_custom)
return parsed_obj
|
###################################################################################
# Author: Bert Van Acker (bva.bmkr@gmail.com)
# Version: 0.1.0
# Lisence: LGPL-3.0 (GNU Lesser General Public License version 3)
#
# Description: Documentor class handling document and template generation
###################################################################################
#imports
import nbformat as nbf
from os import mkdir
from os.path import exists, dirname, join
import jinja2
from docx import Document
from docx.shared import Mm
import python_markdown_maker
class Documentor():
"""
Documentor: Class representing the generic documentation handler
:param bool DEBUG: setting the verbose
:param object Logger: Logger object for uniform logging
"""
def __init__(self,DEBUG=True,LOGGER=None):
#verbose and logging
self.DEBUG = DEBUG
self.LOGGER = LOGGER
def generateNotebook(self,NotebookObject,output):
"""
Function to generate a jupyter notebook
:param object NotebookObject: Jupyter notebook class
:param string output: Path to the output location
"""
self.notebook = nbf.v4.new_notebook()
content = []
for cell in NotebookObject.Content:
if cell.Format=='intro':
title = '# '+cell.Title+'\n' #heading 1 title
elif cell.Format=='subsection':
title = '## ' + cell.Title + '\n' # heading 2 title
elif cell.Format=='subsubsection':
title = '### ' + cell.Title + '\n' # heading 3 title
text = cell.Text
if cell.Type=='markdown':
section = title+text
content.append(nbf.v4.new_markdown_cell(section))
if cell.Type == 'code':
section = title+cell.Text
codeSection = cell.Code
content.append(nbf.v4.new_markdown_cell(section))
content.append(nbf.v4.new_code_cell(codeSection))
#add all cells to the notebook
self.notebook['cells'] = content
#Generate the jupyter notebook and store in output
fname = output+NotebookObject.Name+'.ipynb'
with open(fname, 'w') as f:
nbf.write(self.notebook, f)
def generateSTEAM_notebook(self,SteamObject,output):
"""
Function to generate a jupyter notebook STEAM lession
:param object SteamObject: SteamObject class
:param string output: Path to the output location
"""
self.notebook = nbf.v4.new_notebook()
content = []
#----------add the intro------------
title = '# ' + SteamObject.Introduction.Title + '\n' # heading 1 title
text = SteamObject.Introduction.Text
content.append(nbf.v4.new_markdown_cell(title + text))
# ----------add the supported devices------------
title = '## ' + SteamObject.SupportedDevices.Title + '\n' # heading 2 title
text = SteamObject.SupportedDevices.Text
content.append(nbf.v4.new_markdown_cell(title + text))
# ----------add the related modules------------
title = '## ' + SteamObject.RelatedModules.Title + '\n' # heading 2 title
text = SteamObject.RelatedModules.Text
content.append(nbf.v4.new_markdown_cell(title + text))
# ----------add other content as formatted------------
for cell in SteamObject.Content:
if cell.Format=='intro':
title = '# '+cell.Title+'\n' #heading 1 title
elif cell.Format=='subsection':
title = '## ' + cell.Title + '\n' # heading 2 title
elif cell.Format=='subsubsection':
title = '### ' + cell.Title + '\n' # heading 3 title
text = cell.Text
if cell.Type=='markdown':
section = title+text
content.append(nbf.v4.new_markdown_cell(section))
if cell.Type == 'code':
section = title+cell.Text
codeSection = cell.Code
content.append(nbf.v4.new_markdown_cell(section))
content.append(nbf.v4.new_code_cell(codeSection))
#add all cells to the notebook
self.notebook['cells'] = content
#Generate the jupyter notebook and store in output
fname = output+SteamObject.Name+'.ipynb'
with open(fname, 'w') as f:
nbf.write(self.notebook, f)
def generateFirmwareTemplate_python(self,FirmwareObject,output):
# Create the output folder
srcgen_folder = join(output, 'Generated')
if not exists(srcgen_folder):
mkdir(srcgen_folder)
# Initialize the template engine.
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader('/Users/bertvanacker/Documents/BertVanAcker/03_RemoteRepositories/steam-jack/steam_jack/'),
trim_blocks=True,
lstrip_blocks=True)
# Load the Java template
template = jinja_env.get_template('Documentor/templates/Firmware_Python_DeviceFunctions.template')
# Generate device-specific part of python firmware prototype
with open((join(srcgen_folder, 'DeviceSpecific.py')), 'w') as f:
f.write(template.render(object=FirmwareObject))
def generateDocx(self,docList,output):
#start a new docx document
document = Document()
for doc in docList:
for docElement in doc.Content:
if 'Header' in str(type(docElement)):
document.add_heading(docElement.Text, docElement.Level)
if 'Paragraph' in str(type(docElement)):
document.add_paragraph(docElement.Text)
if 'Image' in str(type(docElement)):
document.add_picture(docElement.ImagePath, width=Mm(int(docElement.Width)),height=Mm(int(docElement.Height)))
if 'ListSection' in str(type(docElement)):
if docElement.Bullet == "-":
for listItem in docElement.ListElements:
document.add_paragraph(listItem, style='List Bullet')
elif docElement.Bullet=="1.":
document.add_paragraph(listItem, style='List Number')
if 'CodeSection' in str(type(docElement)):
document.add_paragraph(docElement.Code)
document.save(output)
def generateMarkdown(self,docList,output):
#start a new md document
document = python_markdown_maker.Document()
content = []
for doc in docList:
for docElement in doc.Content:
if 'Header' in str(type(docElement)):
content.append(python_markdown_maker.headers(docElement.Text, level=docElement.Level))
content.append('\n')
if 'Paragraph' in str(type(docElement)):
content.append(docElement.Text)
content.append('\n')
if 'Image' in str(type(docElement)):
# RESOLVE github asset location
githubPath = "https://github.com/BertVanAcker/steam-jack/blob/main/Resources"
path = docElement.ImagePath.split("Resources")
remotePath = githubPath+path[1]
#content.append('\n<img src="'+remotePath+'?raw=True" width="'+docElement.Width+'" height="'+docElement.Height+'" />\n') #TODO: no width and height setting now, check if needed!
content.append('\n<img src="' + remotePath + '?raw=True"/>\n') # TODO: no width and height setting now, check if needed!
content.append('\n')
if 'ListSection' in str(type(docElement)):
if docElement.Bullet == "-":
content.append(python_markdown_maker.lists(docElement.ListElements))
content.append('\n')
elif docElement.Bullet=="1.":
listContent = ['order']
for listItem in docElement.ListElements:
content.append(listContent.append(listItem))
content.append(python_markdown_maker.lists(listContent))
content.append('\n')
if 'CodeSection' in str(type(docElement)):
content.append(python_markdown_maker.code_block(docElement.Code, lang=docElement.Formalism))
content.append('\n')
#clean content
content_cleaned = []
for val in content:
if val != None:
content_cleaned.append(val)
document.write(content_cleaned)
with open(output, 'w') as file:
document.render.save_as_md(file)
|
from rest_framework import serializers
#from .models import Account
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
# class UserSerializer(serializers.ModelSerializer):
# def create(self, *args, **kwargs):
# user = super().create(*args, **kwargs)
# p = user.password
# user.set_password(p)
# user.save()
# return user
# def update(self, *args, **kwargs):
# user = super().update(*args, **kwargs)
# p = user.password
# user.set_password(p)
# user.save()
# return user
# class Meta:
# model = get_user_model()
# This is the RegistrationSerializer that will be used in api.py
# It extends the ModelSerializer class from django rest framework.
# Serializer converts sql data to json data facilitating the use in web apps
# It takes data from the User table in the database and parses the fields mentioned in 'fields' Any extra fields required can be added there itself.
class RegistrationSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username','first_name','last_name','password','email','is_staff']
#These are overriden methods given by django itself. They are used so that when new user is added, his password is saved after encrypting.
#One can add methods to directly add users to the User model. But passwords will not be crypted.
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
#Password changes are taken care of
def update(self, instance, validated_data):
for attr, value in validated_data.items():
if attr == 'password':
instance.set_password(value)
else:
setattr(instance, attr, value)
instance.save()
return instance
#This can be used to log in to one's account. This uses username and password to login. It can be changed to emaial and password or anything else too if required.
class LoginSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username','password'] |
"""DSM Upgrade data and actions."""
class SynoCoreUpgrade:
"""Class containing upgrade data and actions."""
API_KEY = "SYNO.Core.Upgrade"
API_SERVER_KEY = API_KEY + ".Server"
def __init__(self, dsm):
"""Constructor method."""
self._dsm = dsm
self._data = {}
def update(self):
"""Updates Upgrade data."""
raw_data = self._dsm.get(self.API_SERVER_KEY, "check")
if raw_data:
self._data = raw_data["data"].get("update", raw_data["data"])
@property
def update_available(self):
"""Gets available update info."""
return self._data.get("available")
@property
def available_version(self):
"""Gets available verion info."""
return self._data.get("version")
@property
def available_version_details(self):
"""Gets details about available verion."""
return self._data.get("version_details")
@property
def reboot_needed(self):
"""Gets info if reboot is needed."""
return self._data.get("reboot")
@property
def service_restarts(self):
"""Gets info if services are restarted."""
return self._data.get("restart")
|
import os
import shutil
import time
import click
from flask import Flask, current_app
from flask.cli import with_appcontext
from flask_sqlalchemy import SQLAlchemy
import flask_uploads
from flask_uploads import UploadSet, configure_uploads
import flask_login
from flask_apscheduler import APScheduler
from flask_bootstrap import Bootstrap
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_alembic import Alembic
from AM_Nihoul_website import settings
from AM_Nihoul_website.base_filters import filters
# init modules
db = SQLAlchemy(session_options={'expire_on_commit': False})
uploads_set = UploadSet('uploads', flask_uploads.DEFAULTS)
login_manager = flask_login.LoginManager()
scheduler = APScheduler()
bootstrap = Bootstrap()
alembic = Alembic()
limiter = Limiter(key_func=get_remote_address)
class User(flask_login.UserMixin):
def __init__(self, id_):
super().__init__()
self.id = id_
@login_manager.user_loader
def load_user(login):
if login != settings.APP_CONFIG['USERNAME']:
return
return User(login)
@click.command('init')
@with_appcontext
def init_command():
"""Initializes stuffs:
+ directories
+ database
+ bootstrap data
"""
# directories
data_dir = settings.DATA_DIRECTORY
upload_dir = current_app.config['UPLOADED_UPLOADS_DEST']
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
os.mkdir(data_dir)
print('!! Data directory in {}'.format(data_dir))
if os.path.exists(upload_dir):
shutil.rmtree(upload_dir)
os.mkdir(upload_dir)
print('!! Upload directory in {}'.format(upload_dir))
# DB:
db.create_all()
print('!! database created')
# bootstrap
from AM_Nihoul_website.app_bootstrap import bootstrap
bootstrap()
alembic.stamp() # stamp the version of the database as being the latest version (with all the migrations)
@click.command('bot')
@with_appcontext
def bot_command():
from AM_Nihoul_website import bot
while True:
bot.bot_iteration()
time.sleep(settings.APP_CONFIG['JOBS'][0]['seconds'])
def create_app():
app = Flask(__name__)
app.config.update(settings.APP_CONFIG)
db.init_app(app)
db.app = app
configure_uploads(app, (uploads_set, ))
login_manager.init_app(app)
login_manager.login_view = 'admin.login' # automatic redirection
bootstrap.init_app(app)
alembic.init_app(app, db)
limiter.init_app(app)
# add cli
app.cli.add_command(init_command)
app.cli.add_command(bot_command)
# add blueprint(s)
from AM_Nihoul_website.visitor.views import visitor_blueprint
app.register_blueprint(visitor_blueprint)
from AM_Nihoul_website.admin.views import admin_blueprint
app.register_blueprint(admin_blueprint)
# add filtersn
app.jinja_env.filters.update(**filters)
# launch bot, if any
if settings.APP_CONFIG['LAUNCH_BOT']:
scheduler.init_app(app)
scheduler.start()
return app
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------
# @ File : cfgs.py
# @ Description:
# @ Author : Alex Chung
# @ Contact : yonganzhong@outlook.com
# @ License : Copyright (c) 2017-2018
# @ Time : 2020/9/7 下午2:45
# @ Software : PyCharm
#-------------------------------------------------------
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
# ------------------------------------------------
# VERSION = 'FPN_Res101_20181201'
VERSION = 'LSTM_IMDB_20200908'
NET_NAME = 'lstm_imdb'
# ---------------------------------------- System_config
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
print (20*"++--")
print (ROOT_PATH)
GPU_GROUP = "4"
SHOW_TRAIN_INFO_INTE = 10
SMRY_ITER = 100
SAVE_WEIGHTS_INTE = 10000
SUMMARY_PATH = ROOT_PATH + '/outputs/summary'
INFERENCE_SAVE_PATH = ROOT_PATH + '/outputs/inference_results'
TEST_SAVE_PATH = ROOT_PATH + '/outputs/test_results'
INFERENCE_IMAGE_PATH = ROOT_PATH + '/outputs/inference_image'
# INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'outputs/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/outputs/evaluate_result'
WORD_INDEX = ROOT_PATH + '/outputs/word_index.json'
#------------------------network config--------------------------------
BATCH_SIZE = 32
MAX_LENGTH = 500 # the number in singe time dimension of a single sequence of input data
FEATURE_SIZE = 10000
EMBEDDING_SIZE = 100
# NUM_UNITS = [128, 64, 32]
NUM_UNITS = [32, 16]
NUM_LAYERS = 2
#-------------------------train config-------------------------------
EMBEDDING_TRANSFER = False
LEARNING_RATE = 0.01
NUM_EPOCH = 10
KEEP_PROB = 0.8
# data
SPLIT_RATIO = 0.2
|
# user_model.py
# Bibliotkek für Data Visualizing
# Author : Dr.-Ing. The Anh Vuong
# + your Name
# Model Template
# yc(x) = function(y(x), reprod, Incubation Period, recovery rate)
# x = date time
# implement
# yc[] = my_model_x(y[],faktor,Tau, gesund)
# faktor = R -Factor Fix or realtiv
# Tau = Incubation period
# gesund = recovery rate
import math
import numpy as np
def my_model_1(ys,y, faktor, Tau, gesund) :
# model: summe y(k) , time delay and fix faktor
k = 0
T = Tau # Dummy
f = faktor # Dummy
ys[0] = 0
py2 = 0
for i in y:
py2 = y[k]*faktor + py2
ys[k] = py2
# print (y[k] , py2, ys [k])
k= k+1
return{}
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.urls import path, include
from .views import (
Go_theme_Page,
Theme1_View,
PubTopic_View,
Index_View,
default_index,
Topic_Content_View,
Topic_Content_View1,
Go_Page,
Go_Comment_Page,
Theme2_View,
delete_topic,
search1,
search2,
Go_Search_Page,
topicmodify,
modifypage,
)
app_name = "topic"
urlpatterns = [
path("", default_index, name="index"),
path("create/<str:username>/", PubTopic_View.as_view(), name="create_topic"),
path("page/<int:page_id>/", Index_View.as_view(), name="page"),
path(
"content/<int:content_id>/", Topic_Content_View.as_view(), name="topic_content"
),
path(
"content/<int:content_id>/<int:page_id>",
Topic_Content_View1.as_view(),
name="topic_content1",
),
path("theme/<int:theme_id>/", Theme1_View.as_view(), name="theme1"),
path("theme/<int:theme_id>/<int:page_id>/", Theme2_View.as_view(), name="theme1"),
path("page/go/", Go_Page, name="go"),
path("theme/go/<int:theme_id>/", Go_theme_Page, name="theme_change"),
# path('info/go/<str:username1>/',Go_info_page, name='info_Pagego'),
path("search/<int:page_id>/", search1, name="search1"),
path("search/<str:keywords>/<int:page_id>/", search2, name="search2"),
path(
"delete/<str:title1>/<str:name>/", delete_topic.as_view(), name="delete_topic"
),
path("search/go/", Go_Search_Page, name="go_search_page"),
path("modifypage/<int:content_id>/", modifypage.as_view(), name="modifypage"),
path("topicmodify/<int:content_id>/", topicmodify.as_view(), name="topicmodify"),
path("comment/go/<int:content_id>", Go_Comment_Page.as_view(), name="go_comment"),
]
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from shared import config
engine = create_engine(config.get_engine_uri())
Session = sessionmaker(bind=engine)
Base = declarative_base()
|
from .nested import * |
import ezdxf
import numpy as np
import matplotlib.pyplot as plt
def inch_to_mm(val):
return 25.4*val
def mm_to_inch(val):
return val/25.4
def get_sawtooth_point_list(param):
inner_radius = param['arena_radius'] - 0.5*param['sawtooth_depth']
outer_radius = param['arena_radius'] + 0.5*param['sawtooth_depth']
# Compute angle between points on inner radius
angle_rad = np.arccos(1.0 - (param['sawtooth_width']**2)/(2.0*inner_radius**2))
angle_deg = np.rad2deg(angle_rad)
# Modify slightly so that it evenly divides cicle
num_angle = int(360/angle_deg)
angle_deg = 360/num_angle
angle_rad = np.rad2deg(angle_deg)
# Create list of points for sawtool (on both inner and outer radii)
angle_array = np.linspace(0,2.0*np.pi, 2*num_angle+1, endpoint=True)
pt_list = []
for i, angle in enumerate(angle_array):
if i%2 == 0:
radius = inner_radius
else:
radius = outer_radius
x = radius*np.cos(angle)
y = radius*np.sin(angle)
pt_list.append((x,y))
return pt_list
def create_sawtooth_arena(filename, param, display=False):
doc = ezdxf.new('R2010')
doc.units = ezdxf.units.IN
msp = doc.modelspace()
doc.layers.new(name='sawtooth', dxfattribs={'linetype': 'SOLID', 'color': 7})
pt_list = get_sawtooth_point_list(param)
if display:
x_list = [x for (x,y) in pt_list]
y_list = [y for (x,y) in pt_list]
plt.plot(x_list, y_list)
plt.axis('equal')
plt.show()
for i in range(len(pt_list)-1):
msp.add_line(pt_list[i], pt_list[i+1], dxfattribs={'layer': 'sawtooth'})
doc.saveas(filename)
def create_sawtooth_test_array(filename, param_list, margin=0.5, display=False):
doc = ezdxf.new('R2010')
doc.units = ezdxf.units.IN
msp = doc.modelspace()
doc.layers.new(name='sawtooth', dxfattribs={'linetype': 'SOLID', 'color': 7})
offset = 0
offset_list = [offset]
for i in range(1,len(param_list)):
offset += param_list[i-1]['arena_radius'] + param_list[i]['arena_radius'] + margin
offset_list.append(offset)
for param, offset in zip(param_list, offset_list):
pt_list = get_sawtooth_point_list(param)
pt_list = [(x+offset, y) for (x,y) in pt_list]
if display:
x_list = [x for (x,y) in pt_list]
y_list = [y for (x,y) in pt_list]
plt.plot(x_list, y_list)
plt.axis('equal')
for j in range(len(pt_list)-1):
msp.add_line(pt_list[j], pt_list[j+1], dxfattribs={'layer': 'sawtooth'})
if display:
plt.show()
doc.saveas(filename)
# -----------------------------------------------------------------------------
if 1:
param = {
'arena_radius' : 3.0,
'sawtooth_depth' : mm_to_inch(3.0),
'sawtooth_width' : mm_to_inch(1.5),
}
create_sawtooth_arena('sawtooth.dxf', param, display=True)
if 0:
param_list = [
{
'arena_radius' : 0.5,
'sawtooth_depth' : mm_to_inch(1.0),
'sawtooth_width' : mm_to_inch(0.5),
},
{
'arena_radius' : 0.5,
'sawtooth_depth' : mm_to_inch(2.0),
'sawtooth_width' : mm_to_inch(1.0),
},
{
'arena_radius' : 0.5,
'sawtooth_depth' : mm_to_inch(3.0),
'sawtooth_width' : mm_to_inch(1.5),
},
]
create_sawtooth_test_array('sawtooth_array.dxf', param_list, margin=0.25, display=True)
|
# -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from nose.tools import eq_, ok_
from mozillians.common.tests import TestCase
from mozillians.groups.models import Group, GroupAlias, GroupMembership
from mozillians.groups.tests import (GroupAliasFactory, GroupFactory,
SkillFactory)
from mozillians.users.tests import UserFactory
class GroupBaseTests(TestCase):
def test_groups_are_saved_lowercase(self):
group = GroupFactory.create(name='FooBAR')
eq_(group.name, 'foobar')
def test_group_has_alias(self):
group = GroupFactory.create()
ok_(GroupAlias.objects.get(alias=group))
def test_group_has_url(self):
group = GroupFactory.create()
ok_(group.url)
def test_merge_groups(self):
master_group = GroupFactory.create()
merge_group_1 = GroupFactory.create()
merge_group_2 = GroupFactory.create()
nested_group = GroupFactory.create()
merge_group_1.merge_groups([nested_group])
master_group.merge_groups([merge_group_1, merge_group_2])
eq_(master_group.aliases.count(), 4)
for group in [merge_group_1, merge_group_2, nested_group]:
ok_(master_group.aliases.filter(name=group.name,
url=group.url).exists())
ok_(not Group.objects.filter(pk=group.pk).exists())
def test_merge_group_members(self):
# Test merging groups that have members
master_group = GroupFactory.create()
merge_group_1 = GroupFactory.create()
merge_group_2 = GroupFactory.create()
user1 = UserFactory.create()
user2 = UserFactory.create()
user3 = UserFactory.create()
user4 = UserFactory.create()
user5 = UserFactory.create()
master_group.add_member(user1.userprofile, GroupMembership.MEMBER)
master_group.add_member(user2.userprofile, GroupMembership.PENDING)
master_group.add_member(user5.userprofile, GroupMembership.PENDING)
merge_group_1.add_member(user1.userprofile, GroupMembership.PENDING)
merge_group_1.add_member(user2.userprofile, GroupMembership.MEMBER)
merge_group_2.add_member(user2.userprofile, GroupMembership.PENDING)
merge_group_2.add_member(user3.userprofile, GroupMembership.PENDING)
merge_group_2.add_member(user4.userprofile, GroupMembership.MEMBER)
merge_group_2.add_member(user5.userprofile, GroupMembership.PENDING)
master_group.merge_groups([merge_group_1, merge_group_2])
# user1 should not have been demoted in master group
ok_(master_group.has_member(user1.userprofile))
# user2 gets promoted because they were full member of merged group
ok_(master_group.has_member(user2.userprofile))
# user3 was not in master, pending in merged group, so only get to be pending in result
ok_(master_group.has_pending_member(user3.userprofile))
# user4 was a full member of the merged group, not in master, now full member of master
ok_(master_group.has_member(user4.userprofile))
# user5 pending in both, and is still pending
ok_(master_group.has_pending_member(user5.userprofile))
def test_search(self):
group = GroupFactory.create(visible=True)
GroupFactory.create(visible=False)
eq_(set(Group.search(group.name)), set([group]))
eq_(set(Group.search('roup'.format(group.name))), set([group]))
def test_search_case_insensitive(self):
group = GroupFactory.create(visible=True)
query = 'GROUP'
eq_(set(Group.search(query)), set([group]))
def test_search_no_query(self):
eq_(len(Group.search('invalid')), 0)
def test_search_matches_alias(self):
group_1 = GroupFactory.create(name='lalo', visible=True)
GroupAliasFactory.create(alias=group_1, name='foo')
eq_(set(Group.search('foo')), set([group_1]))
def test_search_distict_results(self):
group_1 = GroupFactory.create(name='automation', visible=True)
GroupAliasFactory.create(alias=group_1, name='automation development')
GroupAliasFactory.create(alias=group_1, name='automation services')
results = Group.search('automation')
eq_(len(results), 1)
eq_(results[0], group_1)
def test_add_member(self):
skill = SkillFactory.create()
user = UserFactory.create()
ok_(user.userprofile not in skill.members.all())
skill.add_member(userprofile=user.userprofile)
ok_(user.userprofile in skill.members.all())
ok_(skill.has_member(userprofile=user.userprofile))
def test_remove_member(self):
skill = SkillFactory.create()
user = UserFactory.create()
skill.members.add(user.userprofile)
skill.remove_member(userprofile=user.userprofile)
ok_(not skill.has_member(userprofile=user.userprofile))
ok_(user.userprofile not in skill.members.all())
def test_has_member(self):
skill = SkillFactory.create()
user = UserFactory.create()
ok_(not skill.has_member(userprofile=user.userprofile))
skill.members.add(user.userprofile)
ok_(skill.has_member(userprofile=user.userprofile))
def test_can_join(self):
group = GroupFactory.create(accepting_new_members='yes')
user = UserFactory.create()
ok_(group.user_can_join(user.userprofile))
def test_can_join_by_request(self):
group = GroupFactory.create(accepting_new_members='by_request')
user = UserFactory.create()
ok_(group.user_can_join(user.userprofile))
def test_unvouched_cant_join(self):
group = GroupFactory.create(accepting_new_members='yes')
user = UserFactory.create(vouched=False)
ok_(not group.user_can_join(user.userprofile))
def test_member_cant_join(self):
group = GroupFactory.create(accepting_new_members='yes')
user = UserFactory.create()
group.add_member(user.userprofile)
ok_(not group.user_can_join(user.userprofile))
def test_pending_cant_join(self):
group = GroupFactory.create(accepting_new_members='yes')
user = UserFactory.create()
group.add_member(user.userprofile, GroupMembership.PENDING)
ok_(not group.user_can_join(user.userprofile))
def test_cant_join_antisocial_group(self):
group = GroupFactory.create(accepting_new_members='no')
user = UserFactory.create()
ok_(not group.user_can_join(user.userprofile))
def test_member_can_leave(self):
group = GroupFactory.create(members_can_leave=True)
user = UserFactory.create()
group.add_member(user.userprofile)
ok_(group.user_can_leave(user.userprofile))
def test_pending_can_leave(self):
group = GroupFactory.create(members_can_leave=True)
user = UserFactory.create()
group.add_member(user.userprofile, GroupMembership.PENDING)
ok_(group.user_can_leave(user.userprofile))
def test_curator_cant_leave(self):
group = GroupFactory.create(members_can_leave=True)
user = UserFactory.create()
group.curator = user.userprofile
group.save()
group.add_member(user.userprofile)
ok_(not group.user_can_leave(user.userprofile))
def test_nonmember_cant_leave(self):
group = GroupFactory.create(members_can_leave=True)
user = UserFactory.create()
ok_(not group.user_can_leave(user.userprofile))
def test_cant_leave_unleavable_group(self):
group = GroupFactory.create(members_can_leave=False)
user = UserFactory.create()
group.add_member(user.userprofile)
ok_(not group.user_can_leave(user.userprofile))
def test_unique_name(self):
group = GroupFactory.create()
GroupAliasFactory.create(alias=group, name='bar')
group_2 = GroupFactory.build(name='bar')
self.assertRaises(ValidationError, group_2.clean)
class GroupTests(TestCase):
def test_visible(self):
group_1 = GroupFactory.create(visible=True)
GroupFactory.create(visible=False)
eq_(set(Group.objects.visible()), set([group_1]))
def test_get_non_functional_areas(self):
UserFactory.create()
UserFactory.create()
GroupFactory.create(functional_area=True)
cgroup_2 = GroupFactory.create(functional_area=False)
eq_(set(Group.get_non_functional_areas()), set([cgroup_2]))
def test_get_functional_areas(self):
GroupFactory.create()
GroupFactory.create()
UserFactory.create()
UserFactory.create()
cgroup_1 = GroupFactory.create(functional_area=True)
GroupFactory.create(functional_area=False)
eq_(set(Group.get_functional_areas()), set([cgroup_1]))
def test_deleted_curator_sets_null(self):
user = UserFactory.create()
group = GroupFactory.create(curator=user.userprofile)
user.delete()
group = Group.objects.get(id=group.id)
eq_(group.curator, None)
def test_remove_member(self):
user = UserFactory.create()
group = GroupFactory.create()
GroupMembership.objects.create(userprofile=user.userprofile, group=group,
status=GroupMembership.MEMBER)
ok_(group.has_member(user.userprofile))
group.remove_member(user.userprofile)
ok_(not GroupMembership.objects.filter(userprofile=user.userprofile, group=group).exists())
ok_(not group.has_member(user.userprofile))
def test_add_member(self):
user = UserFactory.create()
group = GroupFactory.create()
ok_(not group.has_member(user.userprofile))
group.add_member(user.userprofile)
ok_(GroupMembership.objects.filter(userprofile=user.userprofile, group=group,
status=GroupMembership.MEMBER).exists())
ok_(group.has_member(user.userprofile))
group.add_member(user.userprofile, status=GroupMembership.PENDING)
# never demotes anyone
ok_(GroupMembership.objects.filter(userprofile=user.userprofile, group=group,
status=GroupMembership.MEMBER).exists())
ok_(group.has_member(user.userprofile))
def test_has_member(self):
user = UserFactory.create()
group = GroupFactory.create()
ok_(not group.has_member(user.userprofile))
GroupMembership.objects.create(userprofile=user.userprofile, group=group,
status=GroupMembership.MEMBER)
ok_(group.has_member(user.userprofile))
group.remove_member(user.userprofile)
ok_(not group.has_member(user.userprofile))
class GroupAliasBaseTests(TestCase):
def test_auto_slug_field(self):
group = GroupFactory.create()
group_alias = group.aliases.all()[0]
ok_(group_alias.url)
def test_slug_uniqueness(self):
group_1 = GroupFactory.create(name='foo-1')
group_2 = GroupFactory.create(name='foo 1')
ok_(group_1.url != group_2.url)
def test_auto_slug_field_urlness(self):
# The auto slug field comes up with a string that our group URLs will match
group = GroupFactory.create(name=u'A (ñâme)-with_"s0me" \'screwy\' chars')
reverse('groups:show_group', args=[group.url])
def test_auto_slug_field_unicode(self):
# The auto slug field dumbs down unicode into ASCII rather than just
# throwing it away
group = GroupFactory.create(name=u'A (ñâme)-with_ελλάδα "s0me" \'screwy\' chars')
eq_(u'a-name-with_ellada-s0me-screwy-chars', group.url)
|
from collections import deque
from hashlib import md5
normal_puzzle_input = 'pslxynzg'
test_case1 = 'ihgpwlah'
test_case2 = 'kglvqrro'
test_case3 = 'ulqzkmiv'
test_case1_answer = 'DDRRRD'
test_case2_answer = 'DDUDRLRRUDRD'
test_case3_answer = 'DRURDRUDDLLDLUURRDULRLDUUDDDRR'
def next_moves(current):
puzzle_input, path, position, parent, steps = current
x, y = position
door_hash = md5(puzzle_input + path).hexdigest()[:4]
directions = ['U', 'D', 'L', 'R']
door_state = {}
for index, direction in enumerate(directions):
door_state[direction] = door_hash[index] in ['b', 'c', 'd', 'e', 'f']
movement = {
'U': (0, -1),
'D': (0, 1),
'L': (-1, 0),
'R': (1, 0)
}
for direction in directions:
if door_state[direction]:
new_pos = movement[direction][0] + x, movement[direction][1] + y
if 0 <= new_pos[0] < 4 and 0 <= new_pos[1] < 4:
yield (puzzle_input, path + direction, new_pos, current, steps + 1)
def normal(text):
frontier = deque()
start = (text, '', (0, 0), None, 0)
frontier.append(start)
results = []
while frontier:
current = frontier.popleft()
if current[2] == (3, 3):
results.append(current)
else:
for next_move in next_moves(current):
frontier.append(next_move)
return results
print normal(test_case1)[0][1] == test_case1_answer
print normal(test_case2)[0][1] == test_case2_answer
print normal(test_case3)[0][1] == test_case3_answer
for result in normal(normal_puzzle_input):
print len(result[1])
|
"""
Copyright: MAXON Computer GmbH
Author: Maxime Adam
Description:
- Convert a Polygon Object to a Volume and save this volume to a VDB File
- Save a VDB file.
Class/method highlighted:
- maxon.Vector
- maxon.BaseArray
- maxon.frameworks.volume.VolumeRef
- maxon.frameworks.volume.VolumeConversionPolygon
- maxon.frameworks.volume.VolumeToolsInterface.MeshToVolume()
- maxon.frameworks.volume.VolumeToolsInterface.SaveVDBFile()
Compatible:
- Win / Mac
- R20, R21
"""
import c4d
import maxon
from maxon.frameworks import volume
import os
def polygonToVolume(obj):
# Checks if the input obj is a PolygonObject
if not obj.IsInstanceOf(c4d.Opolygon):
raise TypeError("obj is not a c4d.Opolygon.")
# Retrieves the world matrices of the object
matrix = obj.GetMg()
# Creates a BaseArray (list) of all points position in world space
vertices = maxon.BaseArray(maxon.Vector)
vertices.Resize(obj.GetPointCount())
for i, pt in enumerate(obj.GetAllPoints()):
vertices[i] = pt * matrix
# Sets polygons
polygons = maxon.BaseArray(maxon.frameworks.volume.VolumeConversionPolygon)
polygons.Resize(obj.GetPolygonCount())
for i, poly in enumerate(obj.GetAllPolygons()):
newpoly = maxon.frameworks.volume.VolumeConversionPolygon()
newpoly.a = poly.a
newpoly.b = poly.b
newpoly.c = poly.c
if poly.IsTriangle():
newpoly.SetTriangle()
else:
newpoly.d = poly.d
polygons[i] = newpoly
polygonObjectMatrix = maxon.Matrix()
polygonObjectMatrix.off = obj.GetMg().off
polygonObjectMatrix.v1 = obj.GetMg().v1
polygonObjectMatrix.v2 = obj.GetMg().v2
polygonObjectMatrix.v3 = obj.GetMg().v3
gridSize = 10
bandWidthInterior = 1
bandWidthExterior = 1
thread = maxon.ThreadRef()
# Before R21
if c4d.GetC4DVersion() < 21000:
volumeRef = maxon.frameworks.volume.VolumeToolsInterface.MeshToVolume(vertices,
polygons, polygonObjectMatrix,
gridSize,
bandWidthInterior, bandWidthExterior,
thread, None)
else:
volumeRef = maxon.frameworks.volume.VolumeToolsInterface.MeshToVolume(vertices,
polygons, polygonObjectMatrix,
gridSize,
bandWidthInterior, bandWidthExterior,
thread,
maxon.POLYGONCONVERSIONFLAGS.NONE, None)
return volumeRef
def main():
# Gets selected objects
objList = doc.GetActiveObjects(c4d.GETACTIVEOBJECTFLAGS_0)
if not objList:
raise RuntimeError("Failed to retrieve selected objects")
# Creates a maxon.BaseArray with all our obj, we want to convert
volumesArray = maxon.BaseArray(maxon.frameworks.volume.VolumeRef)
volumesArray.Resize(len(objList))
for i, obj in enumerate(objList):
volumesArray[i] = polygonToVolume(obj)
try:
# Generates the final file path to save the vdb
path = maxon.Url(os.path.join(os.path.dirname(__file__), "file.vdb"))
scale = 1.0
metaData = maxon.DataDictionary()
maxon.frameworks.volume.VolumeToolsInterface.SaveVDBFile(path, scale, volumesArray, metaData)
print "File saved to ", path
except Exception as e:
print "SaveVDBFile error {}, {}".format(e.message, e.args)
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
import docutils
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import directives, roles, Directive
from pelican import signals
from pelican.readers import RstReader, PelicanHTMLTranslator
class SemanticHTMLTranslator(PelicanHTMLTranslator):
def visit_literal(self, node):
classes = node.get('classes', node.get('class', []))
if 'code' in classes:
node['classes'] = [cls for cls in classes if cls != 'code']
self.body.append(self.starttag(node, 'code'))
elif 'kbd' in classes:
node['classes'] = [cls for cls in classes if cls != 'kbd']
self.body.append(self.starttag(node, 'kbd'))
else:
self.body.append(self.starttag(node, 'code'))
def depart_literal(self, node):
classes = node.get('classes', node.get('class', []))
if 'code' in classes:
self.body.append('</code>\n')
elif 'kbd' in classes:
self.body.append('</kbd>\n')
else:
self.body.append('</code>\n')
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_title(self, node):
check_id = 0
close_tag = '</p>\n'
if isinstance(node.parent, nodes.topic):
self.body.append(
self.starttag(node, 'p', '', CLASS='topic-title first'))
elif isinstance(node.parent, nodes.sidebar):
self.body.append(
self.starttag(node, 'p', '', CLASS='sidebar-title'))
elif isinstance(node.parent, nodes.Admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
elif isinstance(node.parent, nodes.table):
self.body.append(
self.starttag(node, 'caption', ''))
close_tag = '</caption>\n'
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1', '', CLASS='title'))
close_tag = '</h1>\n'
self.in_document_title = len(self.body)
else:
assert isinstance(node.parent, nodes.section)
h_level = self.section_level + self.initial_header_level - 1
# Add id for internal links
if 'ids' in node.parent:
node['ids'] = node.parent['ids']
atts = {}
if (len(node.parent) >= 2 and
isinstance(node.parent[1], nodes.subtitle)):
atts['CLASS'] = 'with-subtitle'
self.body.append(
self.starttag(node, 'h%s' % h_level, '', **atts))
atts = {}
if node.hasattr('refid'):
atts['class'] = 'toc-backref'
atts['href'] = '#' + node['refid']
if atts:
self.body.append(self.starttag({}, 'a', '', **atts))
close_tag = '</a></h%s>\n' % (h_level)
else:
close_tag = '</h%s>\n' % (h_level)
self.context.append(close_tag)
def visit_enumerated_list(self, node):
atts = {}
if 'start' in node:
atts['start'] = node['start']
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.body.append('</ol>\n')
def visit_bullet_list(self, node):
atts = {}
self.body.append(self.starttag(node, 'ul', **atts))
def depart_bullet_list(self, node):
self.body.append('</ul>\n')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl'))
def visit_transition(self, node):
self.body.append(self.starttag(node, 'hr'))
def visit_table(self, node):
style = ''
if not node['classes']:
if self.settings.table_style:
style = self.settings.table_style.strip()
else:
style = 'ui celled table'
tag = self.starttag(node, 'table', CLASS=style)
self.body.append(tag)
def depart_table(self, node):
self.body.append('</table>\n')
def visit_tgroup(self, node):
node.stubs = []
def visit_thead(self, node):
self.body.append(self.starttag(node, 'thead'))
def visit_tbody(self, node):
self.body.append(self.starttag(node, 'tbody'))
def visit_field_list(self, node):
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = True
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = True
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = False
break
self.body.append(self.starttag(node, 'table',
CLASS='ui definition table'))
self.body.append('<tbody>\n')
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n'
+ self.starttag(node.parent, 'tr', '',
CLASS='')
+ '<td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'td', '', **atts))
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS=''))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS=''))
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote'))
self.body.append('<tbody>\n'
'<tr>')
self.footnote_backrefs(node)
def visit_citation(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation'))
self.body.append('<tbody>\n'
'<tr>')
self.footnote_backrefs(node)
class SemanticRSTReader(RstReader):
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'exit_status_level': 2,
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
source_class=self.FileInput,
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.writer.translator_class = SemanticHTMLTranslator
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish(enable_exit_status=True)
return pub
def keyboard_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
"""
This function creates an inline console input block
overrides the default behaviour of the kbd role
*usage:*
:kbd:`<your code>`
*Example:*
:kbd:`<section>`
This code is not highlighted
"""
new_element = nodes.literal(rawtext, text)
new_element.set_class('kbd')
return [new_element], []
def register_roles():
rst.roles.register_local_role('kbd', keyboard_role)
def add_reader(readers):
readers.reader_classes['rst'] = SemanticRSTReader
def register():
register_roles()
signals.readers_init.connect(add_reader) |
import math
import sys
import pytest
from selenium.common.exceptions import NoSuchElementException, \
WebDriverException
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
import qmxgraph.constants
from qmxgraph import constants, js, server
from qmxgraph.api import QmxGraphApi
from qmxgraph.configuration import GraphOptions, GraphStyles
def test_resize_container(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('empty')
width, height = graph.get_container_size()
new_width = width + 20
new_height = height + 20
graph.selenium.execute_script(
"api.resizeContainer({}, {})".format(new_width, new_height))
width, height = graph.get_container_size()
assert width == new_width
assert height == new_height
def test_insert_vertex(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
assert graph.get_vertex() is not None
@pytest.mark.parametrize('dumped,restored', [('1v', '2v'), ('2v', '1v')])
def test_dump_restore(dumped, restored, graph_cases):
"""
:type dumped: str
:type restored: str
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases(dumped)
dumped_vertices_count = len(graph.get_vertices())
dump = graph.eval_js_function('api.dump')
del graph
graph = graph_cases(restored)
assert dumped_vertices_count != len(graph.get_vertices())
graph.eval_js_function('api.restore', dump)
assert dumped_vertices_count == len(graph.get_vertices())
def test_insert_vertex_with_style(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v_style')
vertex = graph.get_vertex()
# Can't have same color as default vertex style
default = graph.selenium.execute_script(
"return graphEditor.graph.getStylesheet().getDefaultVertexStyle()[mxConstants.STYLE_FILLCOLOR]" # noqa
)
default = default.lower()
assert vertex.get_attribute('fill') != default
@pytest.mark.parametrize(
'mode',
[
'by_code',
'by_drag_drop',
],
)
def test_insert_edge(graph_cases, mode):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
if mode == 'by_code':
case = '2v_1e'
else:
assert mode == 'by_drag_drop'
case = '2v_1eDD'
graph = graph_cases(case)
assert graph.get_edge(*graph.get_vertices()) is not None
def test_get_terminal_points(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e')
edge_id = graph.get_id(graph.get_edge(*graph.get_vertices()))
terminal_points = graph.eval_js_function("api.getEdgeTerminalPoints", edge_id)
(source_x, source_y), (target_x, target_y) = terminal_points
assert source_x == pytest.approx(40.0)
assert source_y == pytest.approx(25.0)
assert target_x == pytest.approx(90.0)
assert target_y == pytest.approx(25.0)
def test_insert_edge_error_endpoint_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('1v')
vertex = graph.get_vertices()[0]
invalid_source_id = invalid_target_id = "999"
with pytest.raises(WebDriverException) as e:
graph.eval_js_function(
"api.insertEdge", invalid_source_id, graph.get_id(vertex))
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(invalid_source_id)
with pytest.raises(WebDriverException) as e:
graph.eval_js_function(
"api.insertEdge", graph.get_id(vertex), invalid_target_id)
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(invalid_target_id)
def test_insert_decoration(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d')
assert len(graph.get_decorations()) == 1
graph.eval_js_function('api.insertDecorationOnEdge', graph.edge_id, 0.75,
10, 10, 'another decoration', 'purple')
assert len(graph.get_decorations()) == 2
def test_decoration_position(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d')
cell_id = graph.get_id(graph.get_decorations()[0])
position = graph.eval_js_function('api.getDecorationPosition', cell_id)
assert position == pytest.approx(0.4)
graph.eval_js_function('api.setDecorationPosition', cell_id, 0.8)
position = graph.eval_js_function('api.getDecorationPosition', cell_id)
assert position == pytest.approx(0.8)
def test_get_decoration_parent_cell_id(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d')
cell_id = graph.get_id(graph.get_decorations()[0])
parent_id = graph.eval_js_function('api.getDecorationParentCellId', cell_id)
assert parent_id == '4'
def test_delete_vertex(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
graph.select_vertex(graph.get_vertex())
actions = ActionChains(graph.selenium)
actions.key_down(Keys.DELETE)
actions.key_up(Keys.DELETE)
actions.perform()
assert not graph.get_vertex()
def test_delete_edge(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e')
graph.select_edge(graph.get_edge(*graph.get_vertices()))
actions = ActionChains(graph.selenium)
actions.key_down(Keys.DELETE)
actions.key_up(Keys.DELETE)
actions.perform()
assert not graph.get_edge(*graph.get_vertices())
def test_group(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v')
actions = ActionChains(graph.selenium)
actions.key_down(Keys.CONTROL)
actions.perform()
vertex_foo, vertex_bar = graph.get_vertices()
graph.select_vertex(vertex_foo)
graph.select_vertex(vertex_bar)
actions = ActionChains(graph.selenium)
actions.key_up(Keys.CONTROL)
actions.perform()
# Group selected vertices
graph.selenium.execute_script("api.group()")
group_fill = graph.host.styles['group']['fill_color']
group_selector = 'g>g>rect[fill="{}"]'.format(group_fill)
group = graph.selenium.find_elements_by_css_selector(group_selector)
assert len(group) == 1
# Ungroup selected vertices
graph.selenium.execute_script("api.ungroup()")
group = graph.selenium.find_elements_by_css_selector(group_selector)
assert not group
def test_toggle_outline(selenium, host, wait_graph_page_ready):
"""
:type selenium: selenium.webdriver.remote.webdriver.WebDriver
:type host: qmxgraph.server.Host
"""
wait_graph_page_ready(host=host)
# By default, outline starts hidden. Basically this means mxGraph's window
# component used to shown outline doesn't exist yet.
with pytest.raises(NoSuchElementException):
selenium.find_element_by_css_selector('div.mxWindow')
# Once shown, outline is displayed in a mxGraph's window component
selenium.execute_script("api.toggleOutline()")
outline = selenium.find_element_by_css_selector('div.mxWindow')
assert outline is not None
# However once toggled back to hidden, it is not destroyed but simply
# hidden
selenium.execute_script("api.toggleOutline()")
assert not outline.is_displayed()
@pytest.mark.parametrize('grid', [True, False])
def test_toggle_grid(selenium, host, grid, wait_graph_page_ready):
"""
:type selenium: selenium.webdriver.remote.webdriver.WebDriver
:type host: qmxgraph.server.Host
"""
wait_graph_page_ready(host=host)
# By default, grid starts visible. To hide grid, a class is
# added to graph container div.
if not grid:
selenium.execute_script("api.toggleGrid()")
container = selenium.find_element_by_css_selector('div.graph')
assert container.get_attribute('id') == 'graphContainer'
assert container.get_attribute('class') == \
'graph' if grid else 'graph hide-bg'
@pytest.mark.parametrize('snap', [True, False])
def test_toggle_snap(graph_cases, snap):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
selenium = graph.selenium
# If snap is enabled, it should move to closest grid block (which are
# always multiples of 10, as 10 is grid size). By default snap is enabled.
if not snap:
selenium.execute_script("api.toggleSnap()")
vertex = graph.get_vertex()
x, y = graph.get_vertex_position(vertex)
w, h = graph.get_vertex_size(vertex)
actions = ActionChains(selenium)
actions.move_to_element(vertex)
actions.move_by_offset(w / 2., h / 2.)
actions.drag_and_drop_by_offset(None, 66, 66)
actions.perform()
vertex = graph.get_vertex()
def expected(v):
result = v + 66
if snap:
result = math.ceil(result / 10.) * 10
return result
assert int(vertex.get_attribute('width')) == w
assert int(vertex.get_attribute('height')) == h
assert int(vertex.get_attribute('x')) == expected(x)
assert int(vertex.get_attribute('y')) == expected(y)
def test_get_cell_id_at(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d_1t')
# mxGraph shares a global id counter for all cell types. The first
# non-reserved id is 2, as lower values are used by internal control
# structures.
assert graph.get_id(graph.get_vertices()[0]) == '2'
assert graph.get_id(graph.get_vertices()[1]) == '3'
assert graph.get_id(graph.get_edge(*graph.get_vertices())) == '4'
assert graph.get_id(graph.get_decorations()[0]) == '5'
assert graph.get_id(graph.get_tables()[0]) == '6'
class Invalid:
def __init__(self):
self.location = {'x': 999, 'y': 999}
assert graph.get_id(Invalid()) is None
def test_set_visible(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d_1t')
# Hide then show vertex again
vertices = graph.get_vertices()
cell_id = graph.get_id(vertices[0])
graph.set_visible(cell_id, False)
assert not graph.is_visible(cell_id)
assert len(graph.get_vertices()) == len(vertices) - 1
graph.set_visible(cell_id, True)
assert graph.is_visible(cell_id)
assert len(graph.get_vertices()) == len(vertices)
# hide then show edge again
cell_id = graph.get_id(graph.get_edge(*graph.get_vertices()))
graph.set_visible(cell_id, False)
assert graph.get_edge(*graph.get_vertices()) is None
graph.set_visible(cell_id, True)
assert graph.get_edge(*graph.get_vertices()) is not None
# Hide then show decoration again
cell_id = graph.get_id(graph.get_decorations()[0])
graph.set_visible(cell_id, False)
assert len(graph.get_decorations()) == 0
graph.set_visible(cell_id, True)
assert len(graph.get_decorations()) == 1
# Hide then show table again
cell_id = graph.get_id(graph.get_tables()[0])
graph.set_visible(cell_id, False)
assert len(graph.get_tables()) == 0
graph.set_visible(cell_id, True)
assert len(graph.get_tables()) == 1
def test_is_and_set_port_visible(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v_1p')
vertex_id = graph.get_id(graph.get_vertex())
assert graph.eval_js_function('api.isPortVisible', vertex_id, 'foo')
assert graph.get_port() is not None
graph.eval_js_function('api.setPortVisible', vertex_id, 'foo', False)
assert not graph.eval_js_function('api.isPortVisible', vertex_id, 'foo')
assert graph.get_port() is None
graph.eval_js_function('api.setPortVisible', vertex_id, 'foo', True)
assert graph.eval_js_function('api.isPortVisible', vertex_id, 'foo')
assert graph.get_port() is not None
def test_parse_port_id(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('empty')
port_data = graph.eval_js_function(
'mxCell.parsePortId', 'qmxgraph-port-PARENT-PORT-NAME')
assert port_data == ['PARENT', 'PORT-NAME']
def test_set_visible_error_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('2v_1e_1d_1t')
cell_id = '999'
with pytest.raises(WebDriverException) as e:
graph.set_visible(cell_id, False)
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
with pytest.raises(WebDriverException) as e:
graph.is_visible(cell_id)
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
def test_get_geometry_plain(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d_1t')
assert graph.get_geometry(graph.get_vertices()[0]) == [10, 10, 30, 30]
assert graph.get_geometry(graph.get_edge(*graph.get_vertices())) == \
[40, 25, 50, 1]
assert graph.get_geometry(graph.get_decorations()[0]) == [55, 20, 10, 10]
# Table geometry is dependent on how the contents are rendered.
# Using `pytest.approx` to account for platform differences.
obtained_table_geometry = graph.get_geometry(graph.get_tables()[0])
assert pytest.approx(obtained_table_geometry, rel=0.1) == \
[20, 60, 108, 72]
def test_get_geometry_error_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('2v_1e_1d_1t')
cell_id = "999"
with pytest.raises(WebDriverException) as e:
graph.get_geometry(cell_id)
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
def test_insert_table(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1t')
assert len(graph.get_tables()) == 1
@pytest.mark.parametrize(
'action, expected_scale',
[(None, 1.0), ('zoomIn', 1.2), ('zoomOut', 0.83)],
)
def test_insert_child_table(graph_cases, action, expected_scale):
"""
When supplying `parent_id` the origin (the x and y coordinates) are
normalized and relative to the parent bounds. Here is used another table
as parent but any cell is eligible.
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1t')
# Applying zoom, so it changes the scale and transformation
obtained_scale = graph.eval_js_function('api.getZoomScale')
assert obtained_scale == 1.0
ini_scale, ini_x, ini_y = graph.eval_js_function('api.getScaleAndTranslation')
assert (ini_scale, ini_x, ini_y) == (1, 0, 0)
if action is not None:
graph.eval_js_function('api.{}'.format(action))
obtained_scale = graph.eval_js_function('api.getZoomScale')
assert obtained_scale == expected_scale
ini_scale, ini_x, ini_y = graph.eval_js_function('api.getScaleAndTranslation')
assert ini_scale != 1
assert ini_x != 0
assert ini_y != 0
tables = graph.get_tables()
assert len(tables) == 1
parent_id = graph.get_id(tables[0])
child_id = graph.eval_js_function(
'api.insertTable', 0.5, 1.5, 300, {'contents': []}, 'foobar', None,
None, parent_id)
tables = graph.get_tables()
assert len(tables) == 2
# After resetting the zoom, bounds of the tables must respect the
# constraints being tested below between parent and child bounds
graph.eval_js_function('api.resetZoom')
obtained_scale = graph.eval_js_function('api.getZoomScale')
assert obtained_scale == 1.0
def get_bounds(cell_id):
return graph.eval_js_function('api.getGeometry', cell_id)
parent_bounds = get_bounds(parent_id)
child_bounds = get_bounds(child_id)
assert parent_bounds[0] < child_bounds[0] < parent_bounds[2]
assert parent_bounds[3] < child_bounds[1]
def test_table_with_image(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1t')
tables = graph.get_tables()
assert len(tables) == 1
table_id = graph.get_id(tables[0])
contents = { # graphs.utils.TableRowDescription
'contents': [
{ # graphs.utils.TableDataDescription
'contents': [
{ # graphs.utils.TableDataDescription
'contents': [
'foo ',
{
'tag': 'img', 'src': 'some-image-path',
'width': 16, 'height': 16,
},
]
}
]
}
]
}
graph.eval_js_function('api.updateTable', table_id, contents, '')
image_elements = graph.selenium.find_elements_by_css_selector(
'.table-cell-contents img')
assert len(image_elements) == 1
image = image_elements[0]
assert image.get_attribute('src').endswith('some-image-path')
def test_update_table(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1t')
table_id = graph.get_id(graph.get_tables()[0])
contents = { # graphs.utils.TableDescription
'contents': [
# graphs.utils.TableRowDescription's
{'contents': ['a', 1]},
{'contents': ['b', 2]},
]
}
title = 'updated'
graph.selenium.execute_script(
js.prepare_js_call('api.updateTable', table_id, contents, title))
table = graph.get_tables()[0]
assert graph.get_table_title(table) == 'updated'
assert graph.get_table_contents(table) == ['a', '1', 'b', '2']
def test_update_table_error_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('1t')
table_id = "999"
contents = []
title = 'will not matter'
with pytest.raises(WebDriverException) as e:
graph.selenium.execute_script(
js.prepare_js_call('api.updateTable', table_id, contents, title))
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(table_id)
def test_update_table_error_not_table(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('2v_1e_1d_1t')
table_id = graph.get_id(graph.get_edge(*graph.get_vertices()))
contents = []
title = 'will not matter'
with pytest.raises(WebDriverException) as e:
graph.selenium.execute_script(
js.prepare_js_call('api.updateTable', table_id, contents, title))
assert selenium_extras.get_exception_message(e) == "Cell is not a table"
def test_remove_cells(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e')
vertices = graph.get_vertices()
cell_ids = [
graph.get_id(vertices[0]),
graph.get_id(graph.get_edge(*vertices)),
]
graph.eval_js_function('api.removeCells', cell_ids)
assert len(graph.get_vertices()) == 1
assert graph.get_edge(*vertices) is None
def test_remove_cells_error_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('empty')
cell_id = 999
with pytest.raises(WebDriverException) as e:
graph.eval_js_function('api.removeCells', [cell_id])
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
def test_on_cells_removed(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e')
graph.selenium.execute_script(
'callback = function(cellIds) {window.cellIds = cellIds;}')
graph.eval_js_function('api.onCellsRemoved', js.Variable('callback'))
cell_ids = [
graph.get_id(graph.get_vertices()[0]),
graph.get_id(graph.get_edge(*graph.get_vertices())),
]
graph.eval_js_function('api.removeCells', cell_ids)
assert graph.selenium.execute_script('return window.cellIds') == cell_ids
def test_custom_shapes(selenium, port, tmpdir, wait_graph_page_ready):
"""
:type selenium: selenium.webdriver.remote.webdriver.WebDriver
:type port: qmxgraph.tests.conftest.Port
"""
# Shape found in by https://www.draw.io/stencils/basic.xml
custom_stencil = '''\
<shapes>
<shape name="Moon" h="103.05" w="77.05" aspect="variable" strokewidth="inherit">
<connections>
<constraint x="0.48" y="0" perimeter="0" name="N"/>
<constraint x="1" y="0.89" perimeter="0" name="SE"/>
</connections>
<background>
<path>
<move x="37.05" y="0"/>
<arc rx="48" ry="48" x-axis-rotation="0" large-arc-flag="1" sweep-flag="0" x="77.05" y="92"/>
<arc rx="60" ry="60" x-axis-rotation="0" large-arc-flag="0" sweep-flag="1" x="37.05" y="0"/>
<close/>
</path>
</background>
<foreground>
<fillstroke/>
</foreground>
</shape>
</shapes>''' # noqa
stencil_file = tmpdir.mkdir("stencils").join("custom.xml")
stencil_file.write(custom_stencil)
stencils = [str(stencil_file)]
styles = GraphStyles({
'moon': {
'shape': 'Moon',
'fill_color': '#ffff00',
},
})
def has_custom_shape():
return bool(selenium.find_elements_by_css_selector(
'g>g>path[fill="#ffff00"]'))
with server.host(
port=port.get(), styles=styles, stencils=stencils) as host:
wait_graph_page_ready(host=host)
assert not has_custom_shape()
selenium.execute_script(
"api.insertVertex(10, 10, 20, 20, 'custom', 'moon')")
assert has_custom_shape()
@pytest.mark.parametrize(
'mode',
[
'by_code',
'by_drag_drop',
],
)
def test_edge_with_style(port, mode, graph_cases_factory):
"""
:type port: qmxgraph.tests.conftest.Port
:type mode: str
:type graph_cases_factory: callable
"""
styles = GraphStyles({
'edge': {
'stroke_color': '#000000',
},
})
with server.host(port=port.get(), styles=styles) as host:
cases = graph_cases_factory(host)
graph = cases('2v_1e' if mode == 'by_code' else '2v_1eDD')
assert graph.get_edge(
*graph.get_vertices()).get_attribute('stroke') == '#000000'
def test_get_label(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d_1t')
assert graph.get_label(graph.get_vertices()[0]) == 'foo'
assert graph.get_label(graph.get_vertices()[1]) == 'bar'
assert graph.get_label(graph.get_edge(*graph.get_vertices())) == 'edge'
assert graph.get_label(graph.get_decorations()[0]) == 'decoration'
# Tables use a complex label in HTML
table_label = graph.get_label(graph.get_tables()[0])
table_html_data = []
from html.parser import HTMLParser
class TableHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
# tags used are considered implementation detail, don't care
# about it
pass
def handle_endtag(self, tag):
# tags used are considered implementation detail, don't care
# about it
pass
def handle_data(self, data):
table_html_data.append(data)
parser = TableHTMLParser()
parser.feed(table_label)
assert table_html_data == \
['Hitchhikers', 'arthur', 'dent', 'ford', 'prefect']
def test_get_label_error_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('2v_1e_1d_1t')
cell_id = "999"
with pytest.raises(WebDriverException) as e:
graph.get_label(cell_id)
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
def test_has_cell(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('empty')
cell_id = graph.insert_vertex(x=10, y=10)
assert graph.eval_js_function("api.hasCell", cell_id)
graph.remove_cells(cell_id)
assert not graph.eval_js_function("api.hasCell", cell_id)
def test_get_cell_type(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d_1t')
def get_cell_type(web_element):
cell_id = graph.get_id(web_element)
return graph.eval_js_function('api.getCellType', cell_id)
assert get_cell_type(graph.get_vertices()[0]) == \
constants.CELL_TYPE_VERTEX
assert get_cell_type(graph.get_edge(*graph.get_vertices())) == \
constants.CELL_TYPE_EDGE
assert get_cell_type(graph.get_decorations()[0]) == \
constants.CELL_TYPE_DECORATION
assert get_cell_type(graph.get_tables()[0]) == \
constants.CELL_TYPE_TABLE
def test_get_cell_type_error_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('empty')
cell_id = "999"
with pytest.raises(WebDriverException) as e:
graph.eval_js_function("api.getCellType", cell_id)
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
@pytest.mark.parametrize(
'cell_type',
[
qmxgraph.constants.CELL_TYPE_VERTEX,
qmxgraph.constants.CELL_TYPE_EDGE,
qmxgraph.constants.CELL_TYPE_TABLE,
qmxgraph.constants.CELL_TYPE_DECORATION,
]
)
def test_insert_with_tags(graph_cases, cell_type):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type cell_type: qmxgraph.constants.CELL_TYPE_*
"""
graph = graph_cases('empty')
# Listen to on cells added event to be sure tags are already configured
# as soon as cell is created
graph.selenium.execute_script(
'callback = function(cellIds) {'
' window.tags = cellIds.map('
' function(cellId) {'
' return api.hasTag(cellId, "tagTest")? api.getTag(cellId, "tagTest") : null;' # noqa
' }'
' );'
'}')
graph.eval_js_function('api.onCellsAdded', js.Variable('callback'))
tags = {'tagTest': '1'}
cell_id = insert_by_parametrized_type(graph, cell_type, tags=tags)
assert graph.selenium.execute_script(
"return api.getTag({}, 'tagTest')".format(cell_id)) == tags['tagTest']
assert graph.selenium.execute_script('return window.tags') == \
[tags['tagTest']]
@pytest.mark.parametrize(
'cell_type',
[
qmxgraph.constants.CELL_TYPE_VERTEX,
qmxgraph.constants.CELL_TYPE_EDGE,
qmxgraph.constants.CELL_TYPE_TABLE,
qmxgraph.constants.CELL_TYPE_DECORATION,
]
)
def test_insert_with_tags_error_value_not_string(
graph_cases, cell_type, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type cell_type: qmxgraph.constants.CELL_TYPE_*
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('empty')
tags = {'tagTest': 999}
with pytest.raises(WebDriverException) as e:
insert_by_parametrized_type(graph, cell_type, tags=tags)
assert selenium_extras.get_exception_message(e) == \
"Tag '{}' is not a string".format("tagTest")
@pytest.mark.parametrize(
'cell_type',
[
qmxgraph.constants.CELL_TYPE_VERTEX,
qmxgraph.constants.CELL_TYPE_EDGE,
qmxgraph.constants.CELL_TYPE_TABLE,
qmxgraph.constants.CELL_TYPE_DECORATION,
]
)
def test_set_get_tag(graph_cases, cell_type):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type cell_type: qmxgraph.constants.CELL_TYPE_*
"""
graph = graph_cases('empty')
cell_id = insert_by_parametrized_type(graph, cell_type)
assert not graph.eval_js_function("api.hasTag", cell_id, "test")
graph.eval_js_function("api.setTag", cell_id, "test", "foo")
assert graph.eval_js_function("api.hasTag", cell_id, "test")
assert graph.eval_js_function("api.getTag", cell_id, "test") == "foo"
@pytest.mark.parametrize(
'cell_type',
[
qmxgraph.constants.CELL_TYPE_VERTEX,
qmxgraph.constants.CELL_TYPE_EDGE,
qmxgraph.constants.CELL_TYPE_TABLE,
qmxgraph.constants.CELL_TYPE_DECORATION,
]
)
def test_set_get_tag_error_tag_not_found(
graph_cases, cell_type, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type cell_type: qmxgraph.constants.CELL_TYPE_*
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('empty')
cell_id = insert_by_parametrized_type(graph, cell_type)
assert not graph.eval_js_function("api.hasTag", cell_id, "test")
with pytest.raises(WebDriverException) as e:
graph.eval_js_function("api.getTag", cell_id, "test")
assert selenium_extras.get_exception_message(e) == \
"Tag '{}' not found in cell with id {}".format("test", cell_id)
@pytest.mark.parametrize(
'cell_type',
[
qmxgraph.constants.CELL_TYPE_VERTEX,
qmxgraph.constants.CELL_TYPE_EDGE,
qmxgraph.constants.CELL_TYPE_TABLE,
qmxgraph.constants.CELL_TYPE_DECORATION,
]
)
def test_set_get_tag_error_value_not_string(
graph_cases, cell_type, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type cell_type: qmxgraph.constants.CELL_TYPE_*
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('empty')
cell_id = insert_by_parametrized_type(graph, cell_type)
with pytest.raises(WebDriverException) as e:
graph.eval_js_function("api.setTag", cell_id, "test", 999)
assert selenium_extras.get_exception_message(e) == \
"Tag '{}' is not a string".format("test")
@pytest.mark.parametrize(
'cell_type',
[
qmxgraph.constants.CELL_TYPE_VERTEX,
qmxgraph.constants.CELL_TYPE_EDGE,
qmxgraph.constants.CELL_TYPE_TABLE,
qmxgraph.constants.CELL_TYPE_DECORATION,
]
)
def test_set_get_tag_doesnt_overwrite_protected_tags(graph_cases, cell_type):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type cell_type: qmxgraph.constants.CELL_TYPE_*
"""
graph = graph_cases('empty')
cell_id = insert_by_parametrized_type(graph, cell_type)
assert not graph.eval_js_function("api.hasTag", cell_id, "label")
graph.eval_js_function("api.setTag", cell_id, "label", "test")
assert graph.eval_js_function("api.hasTag", cell_id, "label")
assert graph.eval_js_function("api.getTag", cell_id, "label") == "test"
assert graph.eval_js_function("api.getTag", cell_id, "label") != \
graph.get_label(cell_id)
def test_set_get_tag_error_cell_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('empty')
cell_id = "999"
with pytest.raises(WebDriverException) as e:
graph.eval_js_function("api.setTag", cell_id, "test", "foo")
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
with pytest.raises(WebDriverException) as e:
graph.eval_js_function("api.getTag", cell_id, "test")
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
with pytest.raises(WebDriverException) as e:
graph.eval_js_function("api.hasTag", cell_id, "test")
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
def test_set_get_tag_without_initial_tag_support(graph_cases):
"""
For instance, edges created by drag&drop in a graph won't have tags in a
first moment, as they are created directly by mxGraph code and don't have
any knowledge about custom tag support. However it should be possible to
use tag method without problems even with these cells, as tag support
should be dynamically setup in that case.
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v')
graph.insert_edge_by_drag_drop(*graph.get_vertices())
edge = graph.get_edge(*graph.get_vertices())
edge_id = graph.get_id(edge)
assert not graph.eval_js_function("api.hasTag", edge_id, "test")
graph.eval_js_function("api.setTag", edge_id, "test", "foo")
assert graph.eval_js_function("api.hasTag", edge_id, "test")
assert graph.eval_js_function("api.getTag", edge_id, "test") == "foo"
def test_on_cells_added(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e_1d_1t')
added = [
graph.get_id(graph.get_vertices()[0]),
graph.get_id(graph.get_vertices()[1]),
graph.get_id(graph.get_edge(*graph.get_vertices())),
graph.get_id(graph.get_decorations()[0]),
graph.get_id(graph.get_tables()[0]),
]
assert graph.get_added_cell_ids() == added
def test_on_label_changed(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
vertex_id = graph.get_id(graph.get_vertex())
# Sanity check: custom tags are internally stored in same node element as
# label. This is to make sure tags aren't lost when label is changed by
# mistakenly overwriting whole node element instead of just label.
graph.eval_js_function('api.setTag', vertex_id, 'test', 'test')
label = graph.get_label(graph.get_vertex())
label_element = graph.get_label_element(graph.get_vertex())
actions = ActionChains(graph.selenium)
actions.double_click(label_element)
actions.send_keys('foo')
actions.click(graph.get_container()) # to lose focus and confirm
actions.perform()
assert graph.get_label(graph.get_vertex()) == 'foo'
label_changes = graph.get_label_changes()
assert label_changes == \
[{
'cellId': vertex_id,
'newLabel': 'foo',
'oldLabel': label,
}]
assert graph.eval_js_function('api.getTag', vertex_id, 'test') == 'test'
def test_set_label(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
vertex_id = graph.get_id(graph.get_vertex())
label = graph.get_label(graph.get_vertex())
graph.eval_js_function('api.setLabel', vertex_id, 'foo')
assert graph.get_label(graph.get_vertex()) == 'foo'
label_changes = graph.get_label_changes()
assert label_changes == \
[{
'cellId': vertex_id,
'newLabel': 'foo',
'oldLabel': label,
}]
def test_set_label_error_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases("empty")
cell_id = "999"
with pytest.raises(WebDriverException) as e:
graph.eval_js_function('api.setLabel', cell_id, 'foo')
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
def test_set_double_click_handler(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
vertex_id = graph.get_id(graph.get_vertex())
graph.selenium.execute_script(
'callback = function(cellId) {'
' if (!window.__dblClick__) {'
' window.__dblClick__ = [];'
' }'
' window.__dblClick__.push(cellId);'
'}')
graph.eval_js_function(
'api.setDoubleClickHandler', qmxgraph.js.Variable('callback'))
actions = ActionChains(graph.selenium)
actions.double_click(graph.get_vertex())
actions.perform()
assert graph.selenium.execute_script('return window.__dblClick__') == \
[vertex_id]
def test_add_selection_change_handler(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e')
source, target = graph.get_vertices()
edge = graph.get_edge(source, target)
graph.selenium.execute_script(
'callback = function(cellIds) {'
' if (!window.__selectionChange__) {'
' window.__selectionChange__ = [];'
' }'
' window.__selectionChange__.push(cellIds);'
'}')
graph.eval_js_function(
'api.onSelectionChanged', qmxgraph.js.Variable('callback'))
# Select all cells.
actions = ActionChains(graph.selenium)
actions.key_down(Keys.CONTROL)
actions.click(source)
actions.click(target)
actions.click(edge)
actions.key_up(Keys.CONTROL)
actions.perform()
fired_selection_events = graph.selenium.execute_script(
'return window.__selectionChange__')
assert fired_selection_events == [
['2'],
['3', '2'],
['4', '3', '2'],
]
assert graph.eval_js_function('api.getSelectedCells') == ['4', '3', '2']
# Programmatically select one cell.
graph.eval_js_function('api.setSelectedCells', ['3'])
# Clear selection.
graph.eval_js_function('api.setSelectedCells', [])
fired_selection_events = graph.selenium.execute_script(
'return window.__selectionChange__')
assert fired_selection_events == [
['2'],
['3', '2'],
['4', '3', '2'],
['3'],
[],
]
def test_set_popup_menu_handler(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
vertex_id = graph.get_id(graph.get_vertex())
graph.selenium.execute_script(
'callback = function(cellId, x, y) {'
' if (!window.__popupMenu__) {'
' window.__popupMenu__ = [];'
' }'
' window.__popupMenu__.push([cellId, x, y]);'
'}')
graph.eval_js_function(
'api.setPopupMenuHandler', qmxgraph.js.Variable('callback'))
vertex_label_el = graph.get_label_element(graph.get_vertex())
actions = ActionChains(graph.selenium)
actions.context_click(vertex_label_el)
actions.perform()
x = vertex_label_el.location['x'] + vertex_label_el.size['width'] // 2
y = vertex_label_el.location['y'] + vertex_label_el.size['height'] // 2
assert graph.selenium.execute_script('return window.__popupMenu__') == \
[[vertex_id, x, y]]
@pytest.mark.parametrize(
'action, expected_scale',
[('zoomIn', 1.2), ('zoomOut', 0.83)],
)
def test_zoom(graph_cases, action, expected_scale):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type action: str
:type expected_scale: float
"""
graph = graph_cases('2v_1e')
obtained_scale = graph.eval_js_function('api.getZoomScale')
assert obtained_scale == 1.0
graph.eval_js_function('api.{}'.format(action))
obtained_scale = graph.eval_js_function('api.getZoomScale')
assert obtained_scale == expected_scale
graph.eval_js_function('api.resetZoom')
obtained_scale = graph.eval_js_function('api.getZoomScale')
assert obtained_scale == 1.0
@pytest.mark.xfail(
'sys.platform != "win32"',
reason='need investigate differences between linux and windows',
)
def test_set_scale_and_translation(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
ini_scale, ini_x, ini_y = graph.eval_js_function(
'api.getScaleAndTranslation')
assert (ini_scale, ini_x, ini_y) == (1, 0, 0)
from selenium.webdriver.common.actions.mouse_button import MouseButton
from selenium.webdriver.remote.command import Command
class MyActionChains(ActionChains):
def click_and_hold_right(self, on_element=None):
if self._driver.w3c:
if on_element:
self.w3c_actions.pointer_action.move_to(on_element)
self.w3c_actions.pointer_action.pointer_down(
MouseButton.RIGHT)
self.w3c_actions.key_action.pause()
if on_element:
self.w3c_actions.key_action.pause()
else:
if on_element:
self.move_to_element(on_element)
self._actions.append(lambda: self._driver.execute(
Command.MOUSE_DOWN, {'button': 2}))
return self
def release_right(self, on_element=None):
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.pointer_up(MouseButton.RIGHT)
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOUSE_UP, {'button': 2}))
return self
vertex = graph.get_vertex()
w, h = graph.get_vertex_size(vertex)
def ScaleAndTranslateGraph():
graph.eval_js_function('api.zoomIn')
actions = MyActionChains(graph.selenium)
actions.move_to_element_with_offset(vertex, w * 2, h * 2)
actions.click_and_hold_right()
actions.move_by_offset(30, 100)
actions.release_right() # mxgraph does some extra work on release.
actions.perform()
graph.eval_js_function('api.zoomIn')
ScaleAndTranslateGraph()
saved_scale, saved_x, saved_y = graph.eval_js_function(
'api.getScaleAndTranslation')
assert saved_scale == pytest.approx(1.44, abs=2)
assert saved_x == pytest.approx(-36.11, abs=2)
assert saved_y == pytest.approx(60.42, abs=2)
ScaleAndTranslateGraph()
new_scale, new_x, new_y = graph.eval_js_function(
'api.getScaleAndTranslation')
assert new_scale == pytest.approx(2.08, abs=2)
assert new_x == pytest.approx(-61.50, abs=2)
assert new_y == pytest.approx(97.28, abs=2)
graph.eval_js_function(
'api.setScaleAndTranslation', saved_scale, saved_x, saved_y)
scale, x, y = graph.eval_js_function('api.getScaleAndTranslation')
assert (scale, x, y) == (saved_scale, saved_x, saved_y)
@pytest.mark.parametrize('action', [None, 'zoomIn', 'zoomOut'])
def test_fit(graph_cases, action):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type action: Optional[str]
"""
graph = graph_cases('2v_1e')
obtained_scale = graph.eval_js_function('api.getZoomScale')
assert obtained_scale == 1.0
if action is not None:
graph.eval_js_function('api.{}'.format(action))
graph.eval_js_function('api.fit')
obtained_scale = graph.eval_js_function('api.getZoomScale')
assert obtained_scale == pytest.approx(3.14, abs=2)
def test_get_edge_terminals(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('2v_1e')
source, target = graph.get_vertices()
edge = graph.get_edge(source, target)
source_id, target_id = graph.eval_js_function(
'api.getEdgeTerminals', graph.get_id(edge))
assert source_id == graph.get_id(source)
assert target_id == graph.get_id(target)
@pytest.mark.parametrize('terminal_type', ['source', 'target'])
def test_set_edge_terminals(graph_cases, terminal_type):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type terminal_type: str
"""
graph = graph_cases('3v_1e')
graph.eval_js_function(
'api.setEdgeTerminal', graph.edge_id, terminal_type, graph.vertex3_id)
source_id, target_id = graph.eval_js_function(
'api.getEdgeTerminals', graph.edge_id)
if terminal_type == 'source':
assert source_id == graph.vertex3_id
assert target_id == graph.target_id
elif terminal_type == 'target':
assert source_id == graph.source_id
assert target_id == graph.vertex3_id
else:
assert 0
def test_set_get_style(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
vertices = graph.get_vertices()
assert len(vertices) == 1
vertex_id = graph.get_id(vertices[0])
style = graph.eval_js_function('api.getStyle', vertex_id)
assert style is None
graph.eval_js_function('api.setStyle', vertex_id, 'foo')
style = graph.eval_js_function('api.getStyle', vertex_id)
assert style == 'foo'
with pytest.raises(WebDriverException) as excinfo:
graph.eval_js_function('api.getStyle', 'nonexistent')
assert 'Unable to find cell with id nonexistent' in str(excinfo.value)
def test_set_get_connectable(graph_cases):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
"""
graph = graph_cases('1v')
vertices = graph.get_vertices()
assert len(vertices) == 1
vertex_id = graph.get_id(vertices[0])
connectable = graph.eval_js_function('api.isConnectable', vertex_id)
assert connectable
graph.eval_js_function('api.setConnectable', vertex_id, False)
connectable = graph.eval_js_function('api.isConnectable', vertex_id)
assert not connectable
graph.eval_js_function('api.setConnectable', vertex_id, True)
connectable = graph.eval_js_function('api.isConnectable', vertex_id)
assert connectable
def test_get_edge_terminals_error_edge_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('empty')
edge_id = "999"
with pytest.raises(WebDriverException) as e:
graph.eval_js_function('api.getEdgeTerminals', edge_id)
assert selenium_extras.get_exception_message(e) == \
"Unable to find edge with id {}".format(edge_id)
def test_get_edge_terminals_error_not_an_edge(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('1v')
vertex = graph.get_vertices()[0]
with pytest.raises(WebDriverException) as e:
graph.eval_js_function('api.getEdgeTerminals', graph.get_id(vertex))
assert selenium_extras.get_exception_message(e) == \
"Cell with id {} is not an edge".format(graph.get_id(vertex))
def test_custom_font_family(graph_cases_factory, port):
"""
:type graph_cases_factory: callable
:type port: qmxgraph.tests.conftest.Port
"""
options = GraphOptions(
font_family=('Helvetica',),
)
with server.host(port=port.get(), options=options) as host:
cases = graph_cases_factory(host)
graph = cases("1v")
match = graph.selenium.find_elements_by_css_selector(
'div[style*="font-family:"][style*="Helvetica"]')
assert len(match) == 1
def test_ports(graph_cases):
graph = graph_cases('2v')
vertex_a, vertex_b = graph.get_vertices()
port_x_name, port_y_name = 'X', 'Y'
vertex_a_id = graph.get_id(vertex_a)
vertex_b_id = graph.get_id(vertex_b)
# Test insert port.
graph.eval_js_function('api.insertPort', vertex_a_id, port_x_name, 0, 0, 9, 9)
with pytest.raises(WebDriverException) as e:
graph.eval_js_function('api.insertPort', vertex_a_id, port_x_name, 1, 1, 9, 9)
expected = 'The cell {} already have a port named {}'.format(vertex_a_id, port_x_name)
assert expected in str(e.value)
# Test remove port.
graph.eval_js_function('api.removePort', vertex_a_id, port_x_name)
with pytest.raises(WebDriverException) as e:
graph.eval_js_function('api.removePort', vertex_a_id, port_x_name)
expected = 'The cell {} does not have a port named {}'.format(vertex_a_id, port_x_name)
assert expected in str(e.value)
# Test insert edge.
graph.eval_js_function('api.insertPort', vertex_a_id, port_x_name, 0, 0, 9, 9)
graph.eval_js_function('api.insertPort', vertex_b_id, port_y_name, 0, 0, 9, 9)
edge_id = graph.eval_js_function(
'api.insertEdge', vertex_a_id, vertex_b_id, None, None, None, port_x_name, port_y_name)
# When removing a port remove edges connected through it.
assert graph.eval_js_function('api.hasCell', edge_id)
assert (
[vertex_a_id, vertex_b_id]
== graph.eval_js_function('api.getEdgeTerminals', edge_id)
)
assert [
[vertex_a_id, port_x_name], [vertex_b_id, port_y_name]
] == graph.eval_js_function('api.getEdgeTerminalsWithPorts', edge_id)
graph.eval_js_function('api.removePort', vertex_b_id, port_y_name)
assert not graph.eval_js_function('api.hasCell', edge_id)
def insert_by_parametrized_type(graph, cell_type, tags=None):
if cell_type == qmxgraph.constants.CELL_TYPE_VERTEX:
cell_id = graph.insert_vertex(tags=tags)
elif cell_type == qmxgraph.constants.CELL_TYPE_TABLE:
cell_id = graph.insert_table(tags=tags)
elif cell_type in (
qmxgraph.constants.CELL_TYPE_EDGE,
qmxgraph.constants.CELL_TYPE_DECORATION):
graph.insert_vertex(x=10, y=10)
graph.insert_vertex(x=90, y=10)
cell_id = graph.insert_edge(*graph.get_vertices(), tags=tags)
if cell_type == qmxgraph.constants.CELL_TYPE_DECORATION:
cell_id = graph.insert_decoration(x=50, y=25, tags=tags)
else:
assert False, "Unexpected cell type: {}".format(cell_type)
return cell_id
@pytest.mark.parametrize(
'layout_name',
[
QmxGraphApi.LAYOUT_ORGANIC,
QmxGraphApi.LAYOUT_COMPACT,
QmxGraphApi.LAYOUT_CIRCLE,
QmxGraphApi.LAYOUT_COMPACT_TREE,
QmxGraphApi.LAYOUT_EDGE_LABEL,
QmxGraphApi.LAYOUT_PARALLEL_EDGE,
QmxGraphApi.LAYOUT_PARTITION,
QmxGraphApi.LAYOUT_RADIAL_TREE,
QmxGraphApi.LAYOUT_STACK,
],
)
def test_run_all_layouts(layout_name, graph_cases):
graph = graph_cases('3v_1e')
graph.eval_js_function('api.runLayout', layout_name)
def test_run_organic_layout(graph_cases):
graph = graph_cases('3v_3e')
label = lambda cell: graph.get_label(cell)
nodes_positions = {
label(v): {
'before': None,
'after': None,
}
for v in graph.get_vertices()
}
for v in graph.get_vertices():
nodes_positions[label(v)]['before'] = graph.get_vertex_position(v)
graph.eval_js_function('api.runLayout', QmxGraphApi.LAYOUT_ORGANIC)
for v in graph.get_vertices():
nodes_positions[label(v)]['after'] = graph.get_vertex_position(v)
for position_data in nodes_positions.values():
# We do not have the exact expected position to check - But we do know that the positions
# should at least change.
assert not pytest.approx(position_data['before']) == position_data['after'], \
"Expected position different from %s, but got %s" % ({position_data['before']}, {position_data['after']})
def test_run_invalid_layout(graph_cases):
graph = graph_cases('3v_1e')
with pytest.raises(WebDriverException):
graph.eval_js_function('api.runLayout', 'invalid_layout_name')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from simulariumio.mcell import McellConverter, McellData
from simulariumio import DisplayData, MetaData
from simulariumio.constants import (
DEFAULT_CAMERA_SETTINGS,
CURRENT_VERSION,
DISPLAY_TYPE,
)
@pytest.mark.parametrize(
"trajectory, expected_data",
[
# truncated data from organelle model example
(
McellData(
path_to_data_model_json="simulariumio/tests/data/mcell/"
"organelle_model_viz_output/Scene.data_model.00.json",
path_to_binary_files="simulariumio/tests/data/mcell/"
"organelle_model_viz_output",
meta_data=MetaData(box_size=np.array([50.0, 50.0, 50.0])),
display_data={
"a": DisplayData(
name="Kinesin",
radius=0.03,
display_type=DISPLAY_TYPE.PDB,
url="https://files.rcsb.org/download/3KIN.pdb",
color="#0080ff",
),
"t2": DisplayData(
name="Transporter",
color="#ff1493",
),
},
surface_mol_rotation_angle=0.0,
),
{
"trajectoryInfo": {
"version": CURRENT_VERSION.TRAJECTORY_INFO,
"timeUnits": {
"magnitude": 1.0,
"name": "µs",
},
"timeStepSize": 1.0,
"totalSteps": 3,
"spatialUnits": {
"magnitude": 1.0,
"name": "µm",
},
"size": {"x": 50.0, "y": 50.0, "z": 50.0},
"cameraDefault": {
"position": {
"x": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[0],
"y": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[1],
"z": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[2],
},
"lookAtPosition": {
"x": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[0],
"y": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[1],
"z": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[2],
},
"upVector": {
"x": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[0],
"y": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[1],
"z": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[2],
},
"fovDegrees": DEFAULT_CAMERA_SETTINGS.FOV_DEGREES,
},
"typeMapping": {
"0": {
"name": "b",
"geometry": {
"displayType": "SPHERE",
},
},
"1": {
"name": "Transporter",
"geometry": {
"displayType": "SPHERE",
"color": "#ff1493",
},
},
"2": {
"name": "Kinesin",
"geometry": {
"displayType": "PDB",
"url": "https://files.rcsb.org/download/3KIN.pdb",
"color": "#0080ff",
},
},
},
},
"spatialData": {
"version": CURRENT_VERSION.SPATIAL_DATA,
"msgType": 1,
"bundleStart": 0,
"bundleSize": 3,
"bundleData": [
{
"frameNumber": 0,
"time": 0.0,
"data": [
1000.0,
0.0,
0.0,
0.12416012585163116,
-0.1974048614501953,
-0.10042950510978699,
0.0,
0.0,
0.0,
0.005,
0.0,
1000.0,
1.0,
1.0,
-0.027653440833091736,
0.1265464723110199,
-0.07352104783058167,
-160.8765121025542,
0.0,
-9.231996800714258,
0.005,
0.0,
1000.0,
2.0,
2.0,
0.3647538423538208,
0.1595117300748825,
0.3979622721672058,
0.0,
0.0,
0.0,
0.00015,
0.0,
],
},
{
"frameNumber": 1,
"time": 1.0,
"data": [
1000.0,
0.0,
0.0,
0.10336990654468536,
-0.20304752886295319,
-0.08513453602790833,
0.0,
0.0,
0.0,
0.005,
0.0,
1000.0,
1.0,
1.0,
-0.0269027017056942,
0.12665313482284546,
-0.07417202740907669,
-160.8765121025542,
0.0,
-9.231996800714258,
0.005,
0.0,
1000.0,
2.0,
2.0,
0.38411179184913635,
0.17711672186851501,
0.4012693464756012,
0.0,
0.0,
0.0,
0.00015,
0.0,
],
},
{
"frameNumber": 2,
"time": 2.0,
"data": [
1000.0,
0.0,
0.0,
0.11451153457164764,
-0.1880205273628235,
-0.08175600320100784,
0.0,
0.0,
0.0,
0.005,
0.0,
1000.0,
1.0,
1.0,
-0.024035021662712097,
0.12565766274929047,
-0.07266511768102646,
-160.8765121025542,
0.0,
-9.231996800714258,
0.005,
0.0,
1000.0,
2.0,
2.0,
0.39510709047317505,
0.17876243591308594,
0.3935079276561737,
0.0,
0.0,
0.0,
0.00015,
0.0,
],
},
],
},
"plotData": {"version": CURRENT_VERSION.PLOT_DATA, "data": []},
},
),
],
)
def test_mcell_converter(trajectory, expected_data):
converter = McellConverter(trajectory)
buffer_data = converter._read_trajectory_data(converter._data)
expected_data == buffer_data
assert converter._check_agent_ids_are_unique_per_frame(buffer_data)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for useful metrics not provided by tf.metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_model_analysis import types
from tensorflow_model_analysis.types_compat import Optional, Tuple
def total(
values):
"""Metric to compute the running total of a value."""
with tf.variable_scope('total', values):
values = tf.cast(values, tf.float64)
total_value = tf.Variable(
initial_value=0.0,
dtype=tf.float64,
trainable=False,
collections=[
tf.GraphKeys.METRIC_VARIABLES, tf.GraphKeys.LOCAL_VARIABLES
],
validate_shape=True,
name='total')
update_op = tf.assign_add(total_value, tf.reduce_sum(values))
value_op = tf.identity(total_value)
return value_op, update_op
def calibration_plot(predictions,
labels,
left,
right,
num_buckets,
weights = None
):
"""Calibration plot for predictions in [left, right].
A calibration plot contains multiple buckets, based on the prediction.
Each bucket contains:
(i) the weighted sum of predictions that fall within that bucket
(ii) the weighted sum of labels associated with those predictions
(iii) the sum of weights of the associated examples
Note that the calibration plot also contains enough information to build
to prediction histogram (which doesn't need the information about the labels).
Args:
predictions: Predictions to compute calibration plot for.
labels: Labels associated with the corresponding predictions.
left: Left-most bucket boundary.
right: Right-most bucket boundary.
num_buckets: Number of buckets to divide [left, right] into.
weights: Optional weights for each of the predictions/labels. If None,
each of the predictions/labels will be assumed to have a weight of 1.0.
left=1.0, right=2.0, num_buckets=2 yields buckets:
bucket 0: (-inf, 1.0)
bucket 1: [1.0, 1.5)
bucket 2: [1.5, 2.0)
bucket 3: [2.0, inf)
The value_op will return a matrix with num_buckets + 2 rows and 3 columns:
[ bucket 0 weighted prediction sum, weighted label sum, sum of weights ]
[ bucket 1 weighted prediction sum, weighted label sum, sum of weights ]
[ : : : ]
[ : : : ]
[ bucket k weighted prediction sum, weighted label sum, sum of weights ]
where k = num_buckets + 1
Returns:
(value_op, update_op) for the calibration plot.
"""
with tf.variable_scope('calibration_plot', [predictions, labels]):
predictions_f64 = tf.cast(predictions, tf.float64)
labels_f64 = tf.cast(labels, tf.float64)
# Ensure that we don't mistakenly use the non-casted versions.
del predictions, labels
prediction_bucket_counts = tf.Variable(
initial_value=[0.0] * (num_buckets + 2),
dtype=tf.float64,
trainable=False,
collections=[
tf.GraphKeys.METRIC_VARIABLES, tf.GraphKeys.LOCAL_VARIABLES
],
validate_shape=True,
name='prediction_bucket_counts')
label_bucket_counts = tf.Variable(
initial_value=[0.0] * (num_buckets + 2),
dtype=tf.float64,
trainable=False,
collections=[
tf.GraphKeys.METRIC_VARIABLES, tf.GraphKeys.LOCAL_VARIABLES
],
validate_shape=True,
name='label_bucket_counts')
weight_bucket_counts = tf.Variable(
initial_value=[0.0] * (num_buckets + 2),
dtype=tf.float64,
trainable=False,
collections=[
tf.GraphKeys.METRIC_VARIABLES, tf.GraphKeys.LOCAL_VARIABLES
],
validate_shape=True,
name='weight_bucket_counts')
bucket_width = (right - left) / num_buckets
indices = tf.cast(
tf.clip_by_value(
tf.floor((predictions_f64 - left) / bucket_width),
clip_value_min=-1,
clip_value_max=num_buckets) + 1, tf.int32)
if weights is not None:
weights_f64 = tf.cast(weights, tf.float64)
else:
weights_f64 = tf.ones_like(indices, dtype=tf.float64)
update_prediction_buckets_op = tf.scatter_add(
prediction_bucket_counts, indices, predictions_f64 * weights_f64)
update_label_buckets_op = tf.scatter_add(label_bucket_counts, indices,
labels_f64 * weights_f64)
update_weight_buckets_op = tf.scatter_add(weight_bucket_counts, indices,
weights_f64)
update_op = tf.group(update_prediction_buckets_op, update_label_buckets_op,
update_weight_buckets_op)
value_op = tf.transpose(
tf.stack([
prediction_bucket_counts, label_bucket_counts, weight_bucket_counts
]))
return value_op, update_op
|
# Simple TMX library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
from . import local
from .ImageLayer import ImageLayer
from .Layer import Layer
from .ObjectGroup import ObjectGroup
from .Property import Property
class GroupLayer:
"""
.. attribute:: name
The name of the group layer.
.. attribute:: offsetx
Rendering offset for the group layer in pixels.
.. attribute:: offsety
Rendering offset for the group layer in pixels.
.. attribute:: opacity
The opacity of the group layer as a value from 0 to 1.
.. attribute:: visible
Whether or not the group layer is visible.
.. attribute:: properties
A list of :class:`Property` objects indicating the group layer's
properties.
.. attribute:: layers
A list of :class:`Layer`, :class:`ObjectGroup`,
:class:`GroupLayer`, and :class:`ImageLayer` objects indicating
the map's tile layers, object groups, group layers, and image
layers, respectively. Those that appear in this list first are
rendered first (i.e. furthest in the back).
"""
def __init__(self, name, offsetx=0, offsety=0, opacity=1, visible=True,
properties=None, layers=None):
self.name = name
self.offsetx = offsetx
self.offsety = offsety
self.opacity = opacity
self.visible = visible
self.properties = properties or []
self.layers = layers or []
@classmethod
def read_elem(cls, elem, fd):
"""
Read XML element ``elem`` and return an object of this class.
This is a low-level method used internally by this library; you
don't typically need to use it.
"""
name = elem.attrib.get("name")
offsetx = int(elem.attrib.get("offsetx", 0))
offsety = int(elem.attrib.get("offsety", 0))
opacity = float(elem.attrib.get("opacity", 1))
visible = bool(int(elem.attrib.get("visible", True)))
properties = []
layers = []
for child in elem:
if child.tag == "properties":
properties.extend(local.read_list_elem(child, "property",
Property, fd))
elif child.tag == "layer":
layers.append(Layer.read_elem(child, fd))
elif child.tag == "objectgroup":
layers.append(ObjectGroup.read_elem(child, fd))
elif child.tag == "imagelayer":
layers.append(ImageLayer.read_elem(child, fd))
elif child.tag == "group":
layers.append(GroupLayer.read_elem(child, fd))
return cls(name, offsetx, offsety, opacity, visible, properties, layers)
def get_elem(self, fd, encoding, compression, compressionlevel):
"""
Return an XML element for the object.
This is a low-level method used internally by this library; you
don't typically need to use it.
"""
attr = {"name": self.name, "offsetx": self.offsetx,
"offsety": self.offsety}
if not self.visible:
attr["visible"] = 0
if self.opacity != 1:
attr["opacity"] = self.opacity
elem = ET.Element("group", attrib=local.clean_dict(attr))
if self.properties:
elem.append(local.get_list_elem(
self.properties, "properties", fd, encoding, compression,
compressionlevel))
for layer in self.layers:
elem.append(layer.get_elem(fd, encoding, compression,
compressionlevel))
return elem
|
#!/usr/bin/env python3
import numpy as np
import spacy
from spacy.lang.en import English
import torch
from infersent.models import InferSent
MODEL_VERSION = 1
MODEL_PATH = "infersent/encoder/infersent%s.pkl" % MODEL_VERSION
MODEL_PARAMS = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': MODEL_VERSION}
W2V_PATH = 'infersent/GloVe/glove.840B.300d.txt'
class InfersentSimilarityUtils:
def __init__(self):
self.model = InferSent(MODEL_PARAMS)
self.model.load_state_dict(torch.load(MODEL_PATH))
self.model.set_w2v_path(W2V_PATH)
self.model.build_vocab_k_words(K=100000)
def sentencize(self, input_string):
"""Produces a list of sentences"""
nlp = English()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
doc = nlp(input_string)
sentences = [s.text.strip() for s in doc.sents if s.text.strip() != '']
return sentences
def cosine(self, u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
def get_similarity(self, sentence1, sentence2):
encoding1 = self.model.encode([sentence1])[0]
encoding2 = self.model.encode([sentence2])[0]
similarity = self.cosine(encoding1, encoding2)
return similarity |
import discord
from discord.ext import tasks, commands
import asyncio
import socketio
import threading
import subprocess
import time
from queue import Queue, Empty
from threading import Thread
from requests import get
import os
import re
import boto3
import utils
client = boto3.client('ec2')
chat_reg = re.compile("<[^ ]+>")
active_players = set()
class SpinupThread (threading.Thread):
def __init__(self, ):
threading.Thread.__init__(self)
def run(self):
client = Spinup()
client.run(os.environ['DISCORD_TOKEN'])
class ServerThread (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
run_minecraft([])
class Spinup(discord.Client):
def __init__(self):
super().__init__()
self.voting = False
self.voted = set()
self.running = False
self.upsince = 0
self.voteStarted = 0
self.voteChannel = None
self.locked = False
self.dimensional_rift = None
self.ip = None
self.vc = None
self.sock = None
self.sock_connected = False
async def on_ready(self):
print('Logged on as {0}!'.format(self.user))
self.dimensional_rift = discord.utils.get(self.get_all_channels(), name = "dimensional-rift")
self.server_status = discord.utils.get(self.get_all_channels(), name = "server-status")
async def on_message(self, message):
print(message.author.id, message.channel.name, message.channel.id)
if message.channel.name == 'dimensional-rift':
# this is a message sent from minecraft
if (message.author == client.user) and message.content.startswith("```"):
return
await self.sock.emit('discord-chat', {
"task" : 'message-minecraft',
"message" : message.content,
"user" : message.author.nick
})
if message.content.startswith('#purge'):
summary = {}
num = int(message.content.split(" ")[1])
if num > 10:
num = 10
num += 1
if 'admin' in [r.name for r in message.author.roles]:
history = await message.channel.history(limit = 100).flatten()
for m in history[:num]:
if m.author.display_name not in summary:
summary[m.author.display_name] = 1
else:
summary[m.author.display_name] += 1
summary_msg = ">>> "
for n in summary:
summary_msg += n + ": " + str(summary[n]) + "\n"
await message.channel.delete_messages(history[:num])
await message.channel.send(summary_msg)
# TODO: Put these in a dictionary or smth
if message.content == "!clipthat":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./wardell_clipthat.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == "!yessir":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./wardell_yessir.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == "!yooo":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./csgo_niceknife.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!bwaaa':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./victory.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!bwaa':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./imposter_victory.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!delib':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./naruto_deliberation.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
elif message.content == '!!delib':
if self.vc:
await self.vc.disconnect()
self.vc = None
if message.content.startswith("!spinup"):
if self.voting:
await message.channel.send("you mf clown there's already an active vote")
elif self.running:
await message.channel.send("the server is already up u fool")
elif self.locked:
await message.channel.send("the server is locked! nathan's probably playing valorant...")
else:
if (message.author.id == 279456734773510145) and not message.content.endswith("nosudo"):
await self.spinup(message)
else:
await message.channel.send("starting vote, need 5 people to confirm. you have 3 MINUTES to vote [type `!yes` to vote, `!no` to cancel your existing vote]")
self.voteChannel = message.channel
self.voteStarted = time.time()
self.voting = True
self.voted = set()
elif message.content.startswith("!whois"):
if len(active_players):
res = "players currently on: \n```"
for p in active_players:
res += " - " + p + "\n"
await message.channel.send(res + "```")
else:
await message.channel.send("no one is on, hop in!")
elif message.content.startswith("!lock"):
if (message.author.id == 279456734773510145):
await message.channel.send("the server is locked and cannot be spun up")
self.locked = True
if self.voting:
await message.channel.send("the active vote has been cancelled")
self.voting = False
self.voted = set()
elif message.content.startswith("!unlock"):
if (message.author.id == 279456734773510145):
await message.channel.send("the server is unlocked can can be spun up")
self.locked = False
elif message.content.startswith("!help"):
await message.channel.send("""
`!spinup` - starts a vote to spin up the minecraft server, type `!yes` to vote, `!no` to cancel
`!spindown` - spins down the minecraft server, there is NO voting process
`!ip` - returns the IP address of the server
`!isup` - checks if the server is currently up/starting up
`!uptime` - returns the uptime of the server in seconds
""")
elif message.content.startswith("!yes"):
if message.author not in self.voted and self.voting:
self.voted.add(message.author)
await message.channel.send("%s out of 5 votes recorded" % len(self.voted))
if len(self.voted) == 5:
# spin up the mineraft server
await self.spinup(message)
elif message.content.startswith("!no"):
if message.author in self.voted and self.voting:
self.voted.remove(message.author)
await message.channel.send("%s out of 5 votes recorded" % len(self.voted))
elif message.content.startswith("!spindown"):
await message.channel.send("spinning down the minecraft server")
# tell the minecraft server to gracefully shut down
await self.sock.emit("quit")
# dc from the websocket connection
await self.sock.disconnect()
# then spin down the server
utils.alter_instance(os.environ['EC2_INSTANCE_ID'], state = 'OFF')
self.running = False
elif message.content.startswith("!isup"):
if self.running:
await message.channel.send("the server IS up")
else:
await message.channel.send("the server is NOT up")
elif message.content.startswith("!uptime"):
if self.running:
await message.channel.send("the server has been up for %s seconds" % ((time.time() - self.upsince)))
else:
await message.channel.send("the server is not currently up")
elif message.content.startswith("!ip"):
self.ip = client.describe_instances()['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']
await message.channel.send("`%s:25565`" % (self.ip))
async def spinup(self, message):
self.ip = client.describe_instances()['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']
await message.channel.send("vote succeeded, spinning up minecraft @ %s:25565" % self.ip)
self.voting = False
self.voted = set()
if (not self.running):
# spin up the server
utils.alter_instance(os.environ['EC2_INSTANCE_ID'], state = 'ON')
self.running = True
self.upsince = time.time()
client = Spinup()
c = 0
async def check_messages(ctx):
await ctx.wait_until_ready()
sock = socketio.AsyncClient(logger = True, reconnection_attempts=1)
@sock.event
def connect():
ctx.sock_connected = True
print("I'm connected!")
@sock.event
async def connect_error():
print("The connection failed!")
@sock.event
def disconnect():
ctx.sock_connected = False
print("I'm disconnected!")
@sock.on("joinleave")
async def joinleave(data):
if data['task'] == 'message-discord-joinleave':
user = data['user']
message = data['message']
await ctx.dimensional_rift.send(message)
@sock.on('minecraft-chat')
async def chat(data):
if data['task'] == 'message-discord':
#channel = discord.utils.get(ctx.get_all_channels(), name = "dimensional-rift")
#print(channel)
if not data['message'].endswith("Disconnected"):
await ctx.dimensional_rift.send("```diff\n+ <%s> %s```" % (data['user'], data['message']))
last_message = None
prev_topic = ""
c = 0
while True:
c += 1
# establish connection to the aws instance
# we're going to run this every 2 seconds
if ctx.running and (time.time() - ctx.upsince) > 100 and not ctx.sock_connected and c % 20 == 0:
try:
instances = client.describe_instances()
ip_addr = instances['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']
await sock.connect(url = 'https://{}:5000'.format(ip_addr))
except:
print("attempted to connect and failed.")
ctx.sock = sock
if ctx.dimensional_rift and ctx.server_status:
if not last_message:
last_message = ctx.server_status.last_message_id
# set the topic of the chat
statuses = []
statuses.append("ON @ %s" % ctx.ip if ctx.running else "OFF")
statuses.append("LOCKED" if ctx.locked else "UNLOCKED")
if ctx.voting:
statuses.append("VOTING")
topic = "SERVER: "
for status in statuses:
topic += status + ", "
topic = topic[:-2]
if len(active_players) and ctx.running:
topic += " | "
for player in active_players:
topic += player + ", "
topic = topic[:-2]
elif len(active_players) == 0 and ctx.running:
topic += " | no one is on, hop on!"
if topic != prev_topic:
print("EDITING TOPIC: %s, %s" % (prev_topic, topic))
# delete the last message
if last_message:
try:
if type(last_message) == int:
msg = await ctx.server_status.fetch_message(last_message)
await msg.delete()
else:
await last_message.delete()
except Exception as e:
print(e)
last_message = await ctx.server_status.send(topic)
prev_topic = topic
if (time.time() - ctx.voteStarted) > 180 and ctx.voting:
ctx.voting = False
ctx.voted = set()
await ctx.voteChannel.send("sorry! the vote has ended, type `!spinup` to start another vote")
elif int(time.time() - ctx.voteStarted) == 120 and ctx.voting:
ctx.voteStarted -= 1 # this is so fucking janky. we only want this message sent once, so we rely on the 0.1 second resolution of the check_messages function. we subtract one from voteStarted to simulate a second of time passing, ensuring this message is only sent once.
await ctx.voteChannel.send("the vote will end in 1 MINUTE")
elif int(time.time() - ctx.voteStarted) == 60 and ctx.voting:
ctx.voteStarted -= 1
await ctx.voteChannel.send("the vote will end in 2 MINUTES")
"""
while not outq.empty():
item = outq.get()
if item['task'] == 'message-discord':
#channel = discord.utils.get(ctx.get_all_channels(), name = "dimensional-rift")
#print(channel)
if not item['message'].endswith("Disconnected"):
await ctx.dimensional_rift.send("```diff\n+ <%s> %s```" % (item['user'], item['message']))
elif item['task'] == 'message-discord-joinleave':
user = item['user']
message = item['message']
await ctx.dimensional_rift.send(message)
"""
await asyncio.sleep(0.1)
async def main():
pass
if __name__ == '__main__':
client.loop.create_task(check_messages(client))
client.run(os.environ['DISCORD_TOKEN'])
#loop = asyncio.get_event_loop()
#loop.run_until_complete(client.start(os.environ['DISCORD_TOKEN']))
#loop.close()
#print("closed")
#asyncio.run(main())
|
import tensorflow as tf
import argparse
import os
import re
from PIL import Image
import numpy as np
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"input_dir",
type=str,
help="directory which contains all images in ImageNet.",
)
parser.add_argument(
"output_filename",
type=str,
help="tfrecord path",
)
parser.add_argument("--image_size", type=int, default=128)
return parser.parse_args()
def get_image(path, height, width):
image = np.array(Image.open(path), dtype=np.float)
if len(image.shape) == 3:
return center(image, height, width)
else:
# raise ValueError("Not a rgb picture: {}".format(path))
return None
def center(image, height, width):
"""
crop the image
image: (original_height,original_width,channels)
"""
i_h, i_w, _ = image.shape
if i_h < height:
pu = (height - i_h) // 2
pd = height - i_h - pu
image = np.pad(
image, ((pu, pd), (0, 0), (0, 0)), mode="constant", constant_values=0
)
else:
h_s = (i_h - height) // 2
image = image[h_s : h_s + height]
if i_w < width:
pu = (width - i_w) // 2
pd = width - i_w - pu
image = np.pad(
image, ((0, 0), (pu, pd), (0, 0)), mode="constant", constant_values=0
)
else:
w_s = (i_w - width) // 2
image = image[:, w_s : w_s + width]
return image
def make_example(height, width, depth, image_raw, label):
return tf.train.Example(
features=tf.train.Features(
feature={
"height": _int64_feature(height),
"width": _int64_feature(width),
"depth": _int64_feature(depth),
"image_raw": _bytes_feature(image_raw),
"label": _int64_feature(label),
}
)
)
def main():
args = parse_args()
class_labels = {}
outfile = "{}_{}".format(args.image_size, args.output_filename)
writer = tf.python_io.TFRecordWriter(outfile)
count = 0
for root, _, files in os.walk(args.input_dir):
for file in files:
r = re.search("(.+)_(.+).JPEG", file, re.IGNORECASE)
if r:
class_label = r.group(1)
file_path = os.path.abspath(os.path.join(root, file))
image = get_image(file_path, args.image_size, args.image_size)
if image is None:
continue
image += 1
image *= 255.0 / 2.0
image = image.astype("uint8")
if not class_labels.get(class_label):
class_labels[class_label] = len(class_labels.keys())
example = make_example(
args.image_size,
args.image_size,
3,
image.tostring(),
class_labels[class_label],
)
writer.write(example.SerializeToString())
count += 1
print("Processed {}".format(file_path))
writer.close()
print("total samples: {}".format(count))
if __name__ == "__main__":
main()
|
from enum import Enum
from collections import defaultdict
import multiprocessing
import os
import sys
import pickle
import random
import time
import warnings
from ilm.string_util import check_exists_and_return
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from tqdm import tqdm
from transformers import GPT2Config, GPT2LMHeadModel, AdamW, CONFIG_NAME, WEIGHTS_NAME
try:
import wandb
except:
pass
import ilm.constants
import ilm.mask
import ilm.mask.util
import ilm.tokenize_util
from src.xl_wrapper import RuGPT3XL
import deepspeed.ops.sparse_attention.sparse_attn_op
warnings.filterwarnings("ignore")
os.environ["USE_DEEPSPEED"] = "1"
class Task(Enum):
# Example: She ate <?> for <?><S>cereal<E>breakfast<E>
ILM = 0
# Example: <S>cereal<E>breakfast<E>
NO_CONTEXT_ILM = 1
# Example: She ate <?> for <?><S>She ate cereal for breakfast<E>
NAIVE = 2
# Example: <S>She ate cereal for breakfast<E>
LM = 3
# Example: <S>breakfast for cereal ate She<E>
REVERSE_LM = 4
# TODO: NAIVE with no stopwords?
class TargetType(Enum):
PAD = 0
CONTEXT = 1
CONTEXT_SPECIAL = 2
CONTEXT_INFILL_SEP = 3
INFILL = 4
INFILL_SPECIAL = 5
INFILL_REDUNDANT = 6
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# NOTE: Multiprocessing pickle/closure issue workaround
_GLOBAL_WORKER_TARGET = None
def _worker_target(doc):
return _GLOBAL_WORKER_TARGET(doc)
def worker_target_factory(
tokenizer,
start_infill_id,
end_infill_id,
mask_type_to_id,
sequence_length,
task,
skip_naive_incomplete):
def fn(doc_and_char_masks):
doc, char_masks = doc_and_char_masks
try:
return doc_and_char_masks_to_input_and_tt(
doc,
char_masks,
tokenizer,
start_infill_id,
end_infill_id,
mask_type_to_id,
task,
sequence_length,
skip_naive_incomplete)
except Exception as e:
print(e)
return None
return fn
def doc_and_char_masks_to_input_and_tt(
doc,
char_masks,
tokenizer,
start_infill_id,
end_infill_id,
mask_type_to_id,
task,
sequence_length,
skip_naive_incomplete):
# Tokenize document
try:
doc_tokens = ilm.tokenize_util.tokenize(doc, tokenizer=tokenizer)
print("doc_tokens: ", doc_tokens)
print("-"*75)
doc_tokens_ids = ilm.tokenize_util.tokens_to_ids(doc_tokens, tokenizer=tokenizer)
except:
doc_tokens = None
print("Failed to tokenize document!", sys.exc_info()[0])
#error_to_count['Failed to tokenize document'] += len(char_masks)
# Align character masks to tokens
tok_masks = []
if doc_tokens is not None:
for char_mask in char_masks:
try:
tok_mask = ilm.mask.util.align_char_mask_to_tokens(doc, doc_tokens, char_mask)
except:
print("Failed to align character-level mask to tokens", sys.exc_info()[0])
#error_to_count['Failed to align character-level mask to tokens'] += 1
continue
tok_masks.append(tok_mask)
# Apply masks
contexts_and_answers = []
for tok_mask in tok_masks:
try:
ca = ilm.mask.util.apply_masked_spans(
doc_tokens_ids,
tok_mask,
mask_type_to_id)
except:
#error_to_count['Failed to apply mask'] += 1
continue
contexts_and_answers.append((tok_mask, ca))
# Skip examples that would be incomplete for Task.NAIVE (typically the longest task)
if skip_naive_incomplete:
contexts_and_answers = [(m, (c, a)) for m, (c, a) in contexts_and_answers if (len(c) + 1 + len(doc_tokens_ids) + 1) <= sequence_length]
special_ids = set([start_infill_id, end_infill_id] + list(mask_type_to_id.values()))
inputs = np.zeros((len(contexts_and_answers), sequence_length), dtype=np.uint16)
tts = np.full((len(contexts_and_answers), sequence_length), TargetType.PAD.value, dtype=np.uint8)
for i, (mask, (context, answers)) in enumerate(contexts_and_answers):
# Create example
example = []
# (Masked) Context
if task in [Task.ILM, Task.NAIVE]:
# Example: She ate <?> for <?>
example += context
# Context / answer separator
context_len = len(example)
# Example: <S>
example += [start_infill_id]
# Answers
if task in [Task.ILM, Task.NO_CONTEXT_ILM]:
# Example: cereal<E>breakfast<E>
for mask_type, answer in answers:
example += answer
example += [end_infill_id]
elif task in [Task.NAIVE, Task.LM]:
# Example: She ate cereal for breakfast<E>
example += doc_tokens_ids
example += [end_infill_id]
elif task == Task.REVERSE_LM:
# Example: breakfast for cereal ate She<E>
example += doc_tokens_ids[::-1]
example += [end_infill_id]
else:
assert False
if len(example) > sequence_length:
example = example[:sequence_length]
#warning_to_count['Example longer than sequence length'] += 1
# Find special tokens
context_special_idxs = [l for l, t in enumerate(example) if l < context_len and t in special_ids]
infill_special_idxs = [l for l, t in enumerate(example) if l > context_len and t in special_ids]
# Store example in output array
if len(example) > 0 and (min(example) < np.iinfo(inputs.dtype).min or max(example) > np.iinfo(inputs.dtype).max):
raise ValueError('Example cannot be stored in numpy array')
inputs[i, :len(example)] = example
# Store target types in output array
tts[i, :context_len] = TargetType.CONTEXT.value
for l in context_special_idxs:
tts[i, l] = TargetType.CONTEXT_SPECIAL.value
tts[i, context_len:context_len+1] = TargetType.CONTEXT_INFILL_SEP.value
if task in [Task.NAIVE, Task.LM, Task.REVERSE_LM]:
tts[i, context_len+1:len(example)] = TargetType.INFILL_REDUNDANT.value
if task == Task.REVERSE_LM:
mask = mask[::-1]
for (_, tok_off, tok_len) in mask:
if task == Task.REVERSE_LM:
tok_off = (len(doc_tokens_ids) - 1) - (tok_off + tok_len - 1)
tts[i, context_len+1+tok_off:context_len+1+tok_off+tok_len] = TargetType.INFILL.value
tts[i, context_len+1+tok_off+tok_len:context_len+1+tok_off+tok_len+1] = TargetType.INFILL_SPECIAL.value
else:
tts[i, context_len+1:len(example)] = TargetType.INFILL.value
for l in infill_special_idxs:
tts[i, l] = TargetType.INFILL_SPECIAL.value
return inputs, tts
def masked_dataset_to_inputs_and_tts(
split,
tokenizer,
start_infill_id,
end_infill_id,
mask_type_to_id,
args):
assert split in ['train', 'eval']
if split == 'train':
examples_tag = args.train_examples_tag
sequence_length = args.train_sequence_length
max_num_examples = args.train_max_num_examples
skip_naive_incomplete = args.train_skip_naive_incomplete
else:
examples_tag = args.eval_examples_tag
sequence_length = args.eval_sequence_length
max_num_examples = args.eval_max_num_examples
skip_naive_incomplete = args.eval_skip_naive_incomplete
with open(os.path.join(args.examples_dir, '{}.pkl'.format(examples_tag)), 'rb') as f:
dataset = pickle.load(f)
num_docs = len(dataset)
# Mask and tokenize documents
global _GLOBAL_WORKER_TARGET
_GLOBAL_WORKER_TARGET = worker_target_factory(
tokenizer,
start_infill_id,
end_infill_id,
mask_type_to_id,
sequence_length,
Task[args.task.upper()],
skip_naive_incomplete)
with multiprocessing.Pool(args.data_loader_num_workers) as p:
docs_inputs_and_tts = list(tqdm(
p.imap(_worker_target, dataset),
total=len(dataset)))
inputs = np.concatenate([i for i, _ in docs_inputs_and_tts], axis=0)
tts = np.concatenate([t for _, t in docs_inputs_and_tts], axis=0)
# TODO: Don't bother doing all the work if we're not going to use it
if max_num_examples is not None:
set_random_seed(args.seed)
example_ids = random.sample(list(range(inputs.shape[0])), max_num_examples)
inputs = np.take(inputs, example_ids, axis=0)
tts = np.take(tts, example_ids, axis=0)
return inputs, tts, num_docs
def tts_to_labels(inputs, tts, label_tts):
selector = torch.zeros_like(inputs, dtype=torch.bool)
for tt in label_tts:
selector |= tts == tt.value
return torch.where(
selector,
inputs,
torch.full_like(inputs, -1))
def train(args):
# Init device
n_gpu = torch.cuda.device_count()
if n_gpu == 0:
warnings.warn('No GPU detected. Training on CPU will be very slow')
elif n_gpu > 1:
warnings.warn('This codebase is not optimized for multi GPU usage')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Lambda for filenames
example_tag_to_fp = lambda tag: os.path.join(args.examples_dir, '{}.pkl'.format(tag))
out_fn_to_fp = lambda fn: os.path.join(args.train_dir, fn)
# Create training dir
os.makedirs(args.train_dir, exist_ok=True)
resuming = os.path.exists(out_fn_to_fp('step.pkl'))
# Create tokenizer
tokenizer = ilm.tokenize_util.Tokenizer[args.tokenizer_name.upper()]
if tokenizer == ilm.tokenize_util.Tokenizer.CUSTOM:
ilm.tokenize_util.set_custom_vocab_fp(args.tokenizer_custom_vocab_fp)
# Update tokenizer
base_vocab_size = ilm.tokenize_util.vocab_size(tokenizer)
start_infill_id = base_vocab_size + 0
end_infill_id = base_vocab_size + 1
additional_ids_to_tokens = {
start_infill_id: '<|startofinfill|>',
end_infill_id: '<|endofinfill|>'
}
mask_cls = ilm.mask.util.mask_cls_str_to_type(args.mask_cls)
mask_types = mask_cls.mask_types()
mask_type_to_id = {}
for i, t in enumerate(mask_types):
t_id = base_vocab_size + 2 + i
t_tok = '<|infill_{}|>'.format(mask_cls.mask_type_serialize(t))
additional_ids_to_tokens[t_id] = t_tok
mask_type_to_id[t] = t_id
print(additional_ids_to_tokens)
vocab_size = ilm.tokenize_util.update_tokenizer(additional_ids_to_tokens, tokenizer)
with open(out_fn_to_fp('additional_ids_to_tokens.pkl'), 'wb') as f:
pickle.dump(additional_ids_to_tokens, f)
# Load training data
if not args.eval_only:
print('Loading training data')
loaded_from_cache = False
if args.data_cache:
print('loading from cache')
try:
train_inputs = np.load(out_fn_to_fp('train_inp.npy'))
train_tts = np.load(out_fn_to_fp('train_tts.npy'))
with open(out_fn_to_fp('train_num_docs.pkl'), 'rb') as f:
train_num_docs = pickle.load(f)
loaded_from_cache = True
except:
pass
if not loaded_from_cache:
print('not loading from cache')
train_inputs, train_tts, train_num_docs = masked_dataset_to_inputs_and_tts(
'train',
tokenizer,
start_infill_id,
end_infill_id,
mask_type_to_id,
args)
if args.data_cache:
np.save(out_fn_to_fp('train_inp.npy'), train_inputs)
np.save(out_fn_to_fp('train_tts.npy'), train_tts)
with open(out_fn_to_fp('train_num_docs.pkl'), 'wb') as f:
pickle.dump(train_num_docs, f)
train_tt_to_count = {TargetType(k):v for k, v in zip(*np.unique(train_tts, return_counts=True))}
print("train_tt_to_count", train_tt_to_count)
num_unmasked = train_tt_to_count.get(TargetType.CONTEXT, 0)
num_masked = train_tt_to_count.get(TargetType.INFILL, 0)
print('Mask rate (tokens): {:.4f}'.format(num_masked / (num_unmasked + num_masked)))
print('{} documents, {} examples'.format(train_num_docs, train_inputs.shape[0]))
print(train_inputs.shape, train_inputs.dtype, train_tts.shape, train_tts.dtype)
train_data = TensorDataset(
torch.from_numpy(train_inputs.astype(np.int64)),
torch.from_numpy(train_tts))
del train_inputs
del train_tts
# Load eval data
print('Loading eval data')
loaded_from_cache = False
if args.data_cache:
try:
eval_inputs = np.load(out_fn_to_fp('eval_inp.npy'))
eval_tts = np.load(out_fn_to_fp('eval_tts.npy'))
with open(out_fn_to_fp('eval_num_docs.pkl'), 'rb') as f:
eval_num_docs = pickle.load(f)
loaded_from_cache = True
except:
pass
if not loaded_from_cache:
eval_inputs, eval_tts, eval_num_docs = masked_dataset_to_inputs_and_tts(
'eval',
tokenizer,
start_infill_id,
end_infill_id,
mask_type_to_id,
args)
if args.data_cache:
np.save(out_fn_to_fp('eval_inp.npy'), eval_inputs)
np.save(out_fn_to_fp('eval_tts.npy'), eval_tts)
with open(out_fn_to_fp('eval_num_docs.pkl'), 'wb') as f:
pickle.dump(eval_num_docs, f)
eval_tt_to_count = {TargetType(k):v for k, v in zip(*np.unique(eval_tts, return_counts=True))}
print(eval_tt_to_count)
num_unmasked = eval_tt_to_count.get(TargetType.CONTEXT, 0)
num_masked = eval_tt_to_count.get(TargetType.INFILL, 0)
print('Mask rate (tokens): {:.4f}'.format(num_masked / (num_unmasked + num_masked)))
print('{} documents, {} examples'.format(eval_num_docs, eval_inputs.shape[0]))
print(eval_inputs.shape, eval_inputs.dtype, eval_tts.shape, eval_tts.dtype)
eval_data = TensorDataset(
torch.from_numpy(eval_inputs.astype(np.int64)),
torch.from_numpy(eval_tts))
del eval_inputs
del eval_tts
# Calculate number of steps to train for (return if we're just pre-cacheing data)
if args.train_num_epochs is not None:
train_num_batches = int(float(train_num_docs * args.train_num_epochs) / args.train_batch_size)
if train_num_batches == 0:
return
print('Maximum number of training steps: {}'.format(train_num_batches / args.train_batch_accumulation))
# Create data iterators
print('Creating datasets')
if not args.eval_only:
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, drop_last=True)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, drop_last=True)
# Load model
print('Initializing model...')
set_random_seed(args.seed)
if args.model_name in ilm.constants.GPT2_MODEL_NAMES:
model_type = GPT2LMHeadModel
cfg_type = GPT2Config
elif args.rugpt3xl:
model_type = RuGPT3XL
if resuming:
print('from saved checkpoint (resuming)')
model = model_type.from_pretrained(args.train_dir)
else:
if args.train_from_scratch:
if args.rugpt3xl:
raise NotImplementedError("Training from scratch with rugpt3xl is not implemented.")
print('from scratch')
cfg = cfg_type.from_pretrained(args.model_name)
model = model_type(cfg)
else:
print('from pretrained checkpoint')
if args.rugpt3xl:
if args.rugpt3xl_weights:
weights = check_exists_and_return(args.rugpt3xl_weights)
else:
raise ValueError("Argument `rugpt3xl_weights` is missing.")
if args.rugpt3xl_deepspeed_path:
deepspeed_config_path = check_exists_and_return(args.rugpt3xl_deepspeed_path)
else:
raise ValueError("Argument `rugpt3xl_deepspeed_path` is missing.")
model = model_type.from_pretrained(args.model_name, weights_path=weights, deepspeed_config_path=deepspeed_config_path)
else:
model = model_type.from_pretrained(args.model_name)
model.resize_token_embeddings(vocab_size)
model.to(device)
model.train()
# Reset random seed in case model init triggered RNG
# Initialize optimizers
if not args.eval_only:
params = list(model.named_parameters())
no_decay = ['bias', 'ln']
optimizer_grouped_parameters = [
{
'params': [p for n, p in params if not any(nd in n for nd in no_decay)],
'weight_decay': args.train_weight_decay
},
{
'params': [p for n, p in params if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=args.train_learning_rate,
eps=args.train_adam_epsilon)
if resuming:
optimizer.load_state_dict(torch.load(out_fn_to_fp('optimizer.pt')))
# Create global step
if resuming:
try:
with open(out_fn_to_fp('step.pkl'), 'rb') as f:
step = pickle.load(f)
except Exception as e:
if args.eval_only:
step = None
else:
raise e
else:
step = 0
if args.eval_only:
print('Evaluating')
model.eval()
eval_start = time.time()
eval_token_counts = defaultdict(int)
eval_token_loss_sums = defaultdict(float)
for i, eval_batch in enumerate(eval_dataloader):
with torch.no_grad():
eval_inputs, eval_tts = tuple(t.to(device) for t in eval_batch)
eval_logits, _ = model(eval_inputs)
eval_logits_relevant = eval_logits[:, :-1].contiguous().view(-1, eval_logits.shape[-1])
for tag, tts in [
('context', [TargetType.CONTEXT]),
('infill', [TargetType.INFILL, TargetType.INFILL_SPECIAL]),
('infill_textonly', [TargetType.INFILL])]:
eval_labels = tts_to_labels(eval_inputs, eval_tts, tts)
eval_labels_relevant = eval_labels[:, 1:]
eval_labels_relevant_count = (eval_labels_relevant != -1).long().sum().item()
eval_labels_loss = F.cross_entropy(
eval_logits_relevant,
eval_labels_relevant.contiguous().view(-1),
ignore_index=-1).item()
eval_token_counts[tag] += eval_labels_relevant_count
eval_token_loss_sums[tag] += eval_labels_loss * eval_labels_relevant_count
eval_dict = {}
for tag, count in eval_token_counts.items():
loss = eval_token_loss_sums[tag]
if count > 0:
loss /= count
eval_dict['eval_{}_count'.format(tag)] = count
eval_dict['eval_{}_loss'.format(tag)] = loss
eval_dict['eval_{}_ppl'.format(tag)] = np.exp(loss)
eval_dict['eval_time'] = time.time() - eval_start
print('-' * 80)
if step is not None:
print('(Step {}) Eval'.format(step))
for k, v in eval_dict.items():
print('{}: {}'.format(k, v))
if args.wandb:
wandb.log(eval_dict, step=step)
else:
print('Training')
set_random_seed(args.seed)
best_eval_loss = None
num_save = -1
num_summary = -1
num_batches_complete = step * args.train_batch_accumulation
start = time.time()
while True:
if args.train_num_epochs is not None and num_batches_complete >= train_num_batches:
break
for batch in train_dataloader:
if args.train_num_epochs is not None and num_batches_complete >= train_num_batches:
break
elapsed = time.time() - start
# Evaluate
if int(elapsed / args.train_eval_secs) > num_save:
num_save = int(elapsed / args.train_eval_secs)
model.eval()
eval_start = time.time()
eval_token_counts = defaultdict(int)
eval_token_loss_sums = defaultdict(float)
for i, eval_batch in enumerate(eval_dataloader):
with torch.no_grad():
eval_inputs, eval_tts = tuple(t.to(device) for t in eval_batch)
eval_logits, _ = model(eval_inputs)
eval_logits_relevant = eval_logits[:, :-1].contiguous().view(-1, eval_logits.shape[-1])
for tag, tts in [
('context', [TargetType.CONTEXT]),
('infill', [TargetType.INFILL, TargetType.INFILL_SPECIAL]),
('infill_textonly', [TargetType.INFILL])]:
eval_labels = tts_to_labels(eval_inputs, eval_tts, tts)
eval_labels_relevant = eval_labels[:, 1:]
eval_labels_relevant_count = (eval_labels_relevant != -1).long().sum().item()
eval_labels_loss = F.cross_entropy(
eval_logits_relevant,
eval_labels_relevant.contiguous().view(-1),
ignore_index=-1).item()
eval_token_counts[tag] += eval_labels_relevant_count
eval_token_loss_sums[tag] += eval_labels_loss * eval_labels_relevant_count
eval_dict = {}
for tag, count in eval_token_counts.items():
loss = eval_token_loss_sums[tag]
if count > 0:
loss /= count
eval_dict['eval_{}_count'.format(tag)] = count
eval_dict['eval_{}_loss'.format(tag)] = loss
eval_dict['eval_time'] = time.time() - eval_start
print('-' * 80)
print('(Step {}) Eval'.format(step))
for k, v in eval_dict.items():
print('{}: {}'.format(k, v))
if args.wandb:
wandb.log(eval_dict, step=step)
if best_eval_loss is None or eval_dict['eval_infill_loss'] < best_eval_loss:
print('Saving')
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.config.to_json_file(out_fn_to_fp(CONFIG_NAME))
torch.save(model_to_save.state_dict(), out_fn_to_fp(WEIGHTS_NAME))
torch.save(optimizer.state_dict(), out_fn_to_fp('optimizer.pt'))
with open(out_fn_to_fp('step.pkl'), 'wb') as f:
pickle.dump(step, f)
best_eval_loss = eval_dict['eval_infill_loss']
model.train()
# Train
# Inputs are token_ids [34, 578, 9002, ...]
inputs, tts = tuple(t.to(device) for t in batch)
# TODO: Option to train on CONTEXT_SPECIAL?
labels_context = tts_to_labels(inputs, tts, [TargetType.CONTEXT])
# TODO: Option to skip training on INFILL_REDUNDANT?
# NOTE: This would give Task.NAIVE/Task.LM less supervision overall but put them more in line with the supervision that Task.ILM and Task.NO_CONTEXT_ILM receive
labels_infill = tts_to_labels(inputs, tts, [TargetType.INFILL, TargetType.INFILL_SPECIAL, TargetType.INFILL_REDUNDANT])
logits, _ = model(inputs)
logits_relevant = logits[:, :-1].contiguous().view(-1, logits.shape[-1])
loss_context = F.cross_entropy(
logits_relevant,
labels_context[:, 1:].contiguous().view(-1),
ignore_index=-1)
loss_infill = F.cross_entropy(
logits_relevant,
labels_infill[:, 1:].contiguous().view(-1),
ignore_index=-1)
loss_context_item = loss_context.item()
loss_infill_item = loss_infill.item()
loss = loss_infill
if args.train_context:
loss += loss_context
if args.train_batch_accumulation != 1:
loss /= float(args.train_batch_accumulation)
loss.backward()
# Summarize
if int(elapsed / args.train_summary_secs) > num_summary:
num_summary = int(elapsed / args.train_summary_secs)
print('-' * 80)
print('(Step {}) Summary'.format(step))
print(loss_context_item)
print(loss_infill_item)
with torch.no_grad():
for t in inputs, labels_context, labels_infill:
t0 = list(t[0].cpu().numpy())
print('-' * 40)
print(t0)
for t in inputs, labels_context, labels_infill:
t0 = list(t[0].cpu().numpy())
print('-' * 40)
print(ilm.tokenize_util.decode([0 if t == -1 else t for t in t0], tokenizer))
if args.wandb:
wandb.log({
'loss_context': loss_context_item,
'loss_infill': loss_infill_item,
}, step=step)
if ((num_batches_complete + 1) % args.train_batch_accumulation) == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.train_max_grad_norm)
optimizer.step()
optimizer.zero_grad()
step += 1
num_batches_complete += 1
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('experiment_name', type=str)
parser.add_argument('train_dir', type=str)
parser.add_argument('examples_dir', type=str)
parser.add_argument('--seed', type=int)
parser.add_argument('--wandb', action='store_true', dest='wandb')
parser.add_argument('--wandb_project_name', type=str)
mask_args = parser.add_argument_group('Mask')
mask_args.add_argument('--mask_cls', type=str)
tokenizer_args = parser.add_argument_group('Tokenizer')
tokenizer_args.add_argument('--tokenizer_name', type=str, choices=[t.name.lower() for t in ilm.tokenize_util.Tokenizer])
tokenizer_args.add_argument('--tokenizer_custom_vocab_fp', type=str)
task_args = parser.add_argument_group('Task')
task_args.add_argument('--task', type=str, choices=[t.name.lower() for t in Task])
data_args = parser.add_argument_group('Data')
data_args.add_argument('--data_no_cache', action='store_false', dest='data_cache')
data_args.add_argument('--data_loader_num_workers', type=int)
model_args = parser.add_argument_group('Model')
model_args.add_argument('--model_name', type=str) #, choices=ilm.constants.GPT2_MODEL_NAMES)
model_args.add_argument('--rugpt3xl', action='store_true', dest='rugpt3xl')
model_args.add_argument('--rugpt3xl_weights', type=str)
model_args.add_argument('--rugpt3xl_deepspeed_path', type=str)
train_args = parser.add_argument_group('Train')
train_args.add_argument('--train_examples_tag', type=str)
train_args.add_argument('--train_max_num_examples', type=int)
train_args.add_argument('--train_num_epochs', type=int)
train_args.add_argument('--train_from_scratch', action='store_true', dest='train_from_scratch')
train_args.add_argument('--train_batch_size', type=int)
train_args.add_argument('--train_batch_accumulation', type=int)
train_args.add_argument('--train_sequence_length', type=int)
train_args.add_argument('--train_skip_naive_incomplete', action='store_true', dest='train_skip_naive_incomplete')
train_args.add_argument('--train_eval_secs', type=float)
train_args.add_argument('--train_summary_secs', type=float)
train_args.add_argument('--train_minimal_supervision', action='store_false', dest='train_context')
train_args.add_argument('--train_learning_rate', type=float)
train_args.add_argument('--train_weight_decay', type=float)
train_args.add_argument('--train_adam_epsilon', type=float)
train_args.add_argument('--train_max_grad_norm', type=float)
eval_args = parser.add_argument_group('Eval')
eval_args.add_argument('--eval_only', action='store_true', dest='eval_only')
eval_args.add_argument('--eval_examples_tag', type=str)
eval_args.add_argument('--eval_max_num_examples', type=int)
eval_args.add_argument('--eval_batch_size', type=int)
eval_args.add_argument('--eval_sequence_length', type=int)
eval_args.add_argument('--eval_skip_naive_incomplete', action='store_true', dest='eval_skip_naive_incomplete')
parser.set_defaults(
seed=None,
wandb=False,
wandb_project_name='ilm',
mask_cls='ilm.mask.hierarchical.MaskHierarchical',
tokenizer_name='gpt2',
tokenizer_custom_vocab_fp=None,
task='ilm',
data_cache=True,
data_loader_num_workers=4,
model_name='gpt2',
rugpt3xl=False,
rugpt3xl_weights=None,
train_examples_tag='train',
train_max_num_examples=None,
train_num_epochs=None,
train_from_scratch=False,
train_batch_size=8,
train_batch_accumulation=3,
train_sequence_length=256,
train_skip_naive_incomplete=False,
train_eval_secs=360,
train_summary_secs=360,
train_context=True,
train_learning_rate=5e-5,
train_weight_decay=0.,
train_adam_epsilon=1e-8,
train_max_grad_norm=1.,
eval_only=False,
eval_examples_tag='valid',
eval_max_num_examples=None,
eval_batch_size=8,
eval_sequence_length=256,
eval_skip_naive_incomplete=False)
args = parser.parse_args()
if args.wandb:
wandb.init(
project=args.wandb_project_name,
name=args.experiment_name)
wandb.config.update(args)
if args.seed is None:
args.seed = random.randint(0, 1e6)
print('Random seed {}'.format(args.seed))
train(args)
|
import numpy as np
from ignite.metrics import Metric
class IntersectionOverUnion(Metric):
"""Computes the intersection over union (IoU) per class.
based on: https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
- `update` must receive output of the form `(y_pred, y)`.
"""
def __init__(self, num_classes=10, ignore_index=255, output_transform=lambda x: x):
self.num_classes = num_classes
self.ignore_index = ignore_index
self.confusion_matrix = np.zeros((num_classes, num_classes))
super(IntersectionOverUnion, self).__init__(output_transform=output_transform)
def _fast_hist(self, label_true, label_pred):
# mask = (label_true >= 0) & (label_true < self.num_classes)
mask = label_true != self.ignore_index
hist = np.bincount(self.num_classes * label_true[mask].astype(np.int) + label_pred[mask],
minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def reset(self):
self.confusion_matrix = np.zeros((self.num_classes, self.num_classes))
def update(self, output):
y_pred, y = output
for label_true, label_pred in zip(y.numpy(), y_pred.numpy()):
self.confusion_matrix += self._fast_hist(label_true.flatten(), label_pred.flatten())
def compute(self):
hist = self.confusion_matrix
with np.errstate(divide='ignore', invalid='ignore'):
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
return np.nanmean(iu)
|
'''
Sample colours from selected image and overlay selected colours along bottom edge of image.
For use in Pythonista
UI selector borrowed directly from Ole Zorn @olemoritz: https://gist.github.com/omz/ae96874dfbda54ed2771
'''
import photos
import Image
import random
import ui
import console
import clipboard
colourcount = 25 # Number of selectable samples
samplesize = 400 # Size of downsampled image. Reduce this for a smaller sample set. Results in colours being lost through pixelation.
selected_image = photos.pick_image() # Get image. Comment this line and uncomment the next line to use camera instead.
# selected_image = photos.capture_image()
select_width, select_height = selected_image.size # These will be used for final colour chip placement.
sample = selected_image.resize((samplesize,samplesize)) # Reduce available colours by reducing a copy of and pixelating the image.
colors = sorted(sample.getcolors(samplesize*samplesize)) # Remove duplicate colours.
colors = random.sample(colors, colourcount) # Get random selection of colours from sample image.
selected_colors = []
def tapped(sender):
r, g, b, a = sender.background_color
select_color = (int(r*255), int(g*255), int(b*255))
# If border colour is active, remove colour from selected_colors. Otherwise, add it.
if sender.border_color == (0.0,1.0,0.0,1.0):
sender.border_width=0
sender.border_color='#000000'
selected_colors.remove(select_color)
else:
sender.border_width=15
sender.border_color='#00ff00'
selected_colors.append(select_color)
console.hud_alert(str(len(selected_colors)) + ' in queue') # Tell us how many colours are selected.
def save_action(sender):
scroll_view.close() # Close the view. Is this really the best place for this?
chipsize = select_width/len(selected_colors)
for i, c in enumerate(selected_colors):
bar = (chipsize*i, select_height-chipsize, (chipsize*i)+chipsize, select_height)
selected_image.paste(c, bar)
selected_image.show()
saveit = photos.save_image(selected_image)
if saveit is True:
console.hud_alert('Sampled image has been saved')
elif saveit is False:
console.hud_alert('Uh oh, not saved')
#Add buttons for all the colors to a scroll view:
scroll_view = ui.ScrollView(frame=(0, 0, 400, 400))
scroll_view.content_size = (0, len(colors) * 80)
for i, c in enumerate(colors):
r, g, b = c[1]
color = (float(r/255.0),float(g/255.0),float(b/255.0))
swatch = ui.Button(frame=(0, i*80, 400, 80), background_color=color)
swatch.title = str(c[1])
swatch.flex = 'w'
swatch.action = tapped
scroll_view.add_subview(swatch)
scroll_view.name = 'Random Color Picker'
save_button = ui.ButtonItem()
save_button.title = 'Save'
save_button.action = save_action
save_button.tint_color = 'red'
scroll_view.right_button_items = [save_button]
scroll_view.present('sheet')
|
need_domains = ['github.com',
'gist.github.com',
'assets-cdn.github.com',
'raw.githubusercontent.com',
'gist.githubusercontent.com',
'cloud.githubusercontent.com',
'camo.githubusercontent.com',
'avatars0.githubusercontent.com',
'avatars1.githubusercontent.com',
'avatars2.githubusercontent.com',
'avatars3.githubusercontent.com',
'avatars4.githubusercontent.com',
'avatars5.githubusercontent.com',
'avatars6.githubusercontent.com',
'avatars7.githubusercontent.com',
'avatars8.githubusercontent.com',
'api.github.com', # 以下新增
'documentcloud.github.com',
'help.github.com',
'nodeload.github.com',
'codeload.github.com',
'raw.github.com',
'status.github.com',
'training.github.com',
'github.global.ssl.fastly.net',
]
# 利用海外的机器进行相关网站的DNS查询, 本地机器不行
# https://github.com/ButterAndButterfly/GithubHost
def haiwai_output_hosts(domains):
import socket
with open('hosts.txt', 'w') as f:
f.write('```\n')
f.write('# GitHub Start \n')
for domain in domains:
print('Querying ip for domain %s' % domain)
ip = socket.gethostbyname(domain)
print(ip)
f.write('%s %s\n' % (ip, domain))
f.write('# GitHub End \n')
f.write('```\n')
# 根据ipaddress.com 或者 https://site.ip138.com/ 搜索域名IP,然后在本机ping ip看下哪个最快,确定最终ip
def ip138_output_hosts(domains):
import requests
import time
import json
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
for domain in domains:
resp = requests.get(url='https://site.ip138.com/domain/read.do', headers=headers,
params={'domain': domain, 'time': int(round(time.time() * 1000))})
if resp.ok:
print(resp.url)
print(resp.json())
...
# 获取token: https://site.ip138.com/avatars1.githubusercontent.com/
# https://site.ip138.com/domain/write.do?input=avatars1.githubusercontent.com&token=1865a1de58c2228b88b5d5c8c45b9863
# https://site.ip138.com/domain/read.do?domain=raw.githubusercontent.com&time=1598249072506 #毫秒级时间戳,13位
# ipaddress.com (java 版本:https://gitee.com/bryan31/githubhost/)
def ipaddress_output_hosts(domains):
import os
import sys
import requests
import json
import tldextract # install tldextract
from bs4 import BeautifulSoup # install beautifulsoup4
from pythonping import ping # install pythonping
# https://github.com/alessandromaggio/pythonping 也可以试下 https://github.com/romana/multi-ping
print("# windows: ipconfig /flushdns \n# linux:sudo killall -HUP mDNSResponder")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
# https://githubusercontent.com.ipaddress.com/avatars3.githubusercontent.com
for domain in domains:
result = tldextract.extract(domain)
if result.registered_domain == domain:
url = "https://{}.ipaddress.com".format(domain)
else:
url = "https://{}.ipaddress.com/{}".format(result.registered_domain, domain)
resp = requests.get(url=url, headers=headers)
if resp.ok:
try:
soup = BeautifulSoup(resp.text, 'html.parser')
has_ip_table = soup.select('table[class="panel-item table table-stripes table-v"]')[0]
ips = has_ip_table.select('tr')[-1].select('td ul li')
curr_ip = None
curr_ms = sys.maxsize
if len(ips) == 1:
curr_ip = ips[0].text
else:
for ip in ips:
if not curr_ip:
curr_ip = ip.text
# result = os.popen('ping -c 1 {}'.format(ip.text)).read()
# result = os.popen('ping -n 10 {}'.format(ip.text)).read() # win
p_r = ping(ip.text, timeout=5, count=10, verbose=False)
if p_r.rtt_avg_ms < curr_ms:
curr_ip = ip.text
curr_ms = p_r.rtt_avg_ms
if not curr_ip:
print("# {} 没有找到IP。".format(curr_ip, domain))
else:
print("{} {}".format(curr_ip, domain))
except Exception:
print("# domain:{}, 没有找到IP.".format(domain))
if __name__ == '__main__':
# haiwai_output_hosts(need_domains)
ipaddress_output_hosts(need_domains)
|
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import logging
from buckshot import errors
from buckshot import signals
from buckshot import tasks
from buckshot import threads
LOG = logging.getLogger(__name__)
class Suicide(Exception):
"""Raised when a Listener kills itself."""
pass
class TaskWorker(object):
"""Listens for tasks on an input queue, passes the task to the worker
function, and returns the results on the output queue.
If we receive a signals.StopProcessing object, we send back our process
id and die.
If a task times out, send back a errors.TaskTimeout object.
"""
def __init__(self, func, input_queue, output_queue, timeout=None):
self._input_queue = input_queue
self._output_queue = output_queue
self._thread_func = threads.isolated(
target=func,
daemon=True,
timeout=timeout
)
def _recv(self):
"""Get a message off of the input queue. Block until something is
received.
If a signals.StopProcessing message is received, die.
"""
task = self._input_queue.get()
if task is signals.StopProcessing:
self._die()
return task
def _send(self, result):
"""Put the `value` on the output queue."""
LOG.debug("Sending result: %s", os.getpid())
self._output_queue.put(result)
def _die(self):
"""Send a signals.Stopped message across the output queue and raise
a Suicide exception.
"""
LOG.debug("Received StopProcessing")
self._send(signals.Stopped(os.getpid()))
raise Suicide()
def _process_task(self, task):
try:
LOG.info("%s starting task %s", os.getpid(), task.id)
success, result = True, self._thread_func(*task.args)
except threads.ThreadTimeout:
LOG.error("Task %s timed out", task.id)
success, result = False, errors.TaskTimeout(task)
return success, tasks.Result(task.id, result)
def __call__(self, *args):
"""Listen for values on the input queue, hand them off to the worker
function, and send results across the output queue.
"""
continue_ = True
while continue_:
try:
task = self._recv()
except Suicide:
return
except Exception as ex:
retval = errors.SubprocessError(ex)
else:
continue_, retval = self._process_task(task)
self._send(retval)
|
'''
The client manager manages data in client table
'''
from datetime import datetime
import asyncpg
import pytz
from .const import connection_url
class ClientManager(object):
async def init(self):
conn = await asyncpg.connect(connection_url())
await conn.execute('''
CREATE TABLE IF NOT EXISTS client (
fd INTEGER,
identifier TEXT,
connected_on TIMESTAMPTZ,
active BOOLEAN DEFAULT TRUE
);
CREATE INDEX ON client (active);
CREATE INDEX ON client (identifier);
''')
self._initialized = True
async def create(self, fd, identifier):
conn = await asyncpg.connect(connection_url())
await conn.execute(
'INSERT INTO client (fd, identifier, connected_on) values ($1, $2, $3)',
fd, identifier, datetime.utcnow().replace(tzinfo=pytz.utc))
async def deactivate(self, identifier):
conn = await asyncpg.connect(connection_url())
await conn.execute('UPDATE client SET active = FALSE where identifier = $1', identifier)
async def deactivate_all(self):
conn = await asyncpg.connect(connection_url())
await conn.execute('UPDATE client SET active = FALSE')
async def count(self):
'Return count of active clients'
conn = await asyncpg.connect(connection_url())
c = await conn.fetchval('select count(*) from client where active = TRUE')
return (True, c)
|
#! /usr/bin/python
"""
dispatch_async.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
This example continuously reads the serial port and dispatches packets
which arrive to appropriate methods for processing in a separate thread.
"""
from xbee.thread import XBee
from xbee.helpers.dispatch import Dispatch
import time
import serial
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
# Create handlers for various packet types
def status_handler(name, packet):
print "Status Update - Status is now: ", packet['status']
def io_sample_handler(name, packet):
print "Samples Received: ", packet['samples']
# When a Dispatch is created with a serial port, it will automatically
# create an XBee object on your behalf for accessing the device.
# If you wish, you may explicitly provide your own XBee:
#
# xbee = XBee(ser)
# dispatch = Dispatch(xbee=xbee)
#
# Functionally, these are the same.
dispatch = Dispatch(ser)
# Register the packet handlers with the dispatch:
# The string name allows one to distinguish between mutiple registrations
# for a single callback function
# The second argument is the function to call
# The third argument is a function which determines whether to call its
# associated callback when a packet arrives. It should return a boolean.
dispatch.register(
"status",
status_handler,
lambda packet: packet['id']=='status'
)
dispatch.register(
"io_data",
io_sample_handler,
lambda packet: packet['id']=='rx_io_data'
)
# Create API object, which spawns a new thread
# Point the asyncronous callback at Dispatch.dispatch()
# This method will dispatch a single XBee data packet when called
xbee = XBee(ser, callback=dispatch.dispatch)
# Do other stuff in the main thread
while True:
try:
time.sleep(.1)
except KeyboardInterrupt:
break
# halt() must be called before closing the serial
# port in order to ensure proper thread shutdown
xbee.halt()
ser.close()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 23 08:15:36 2019
@author: 42895538859
"""
import numpy as np
A = np.array([[1,2,3],[4,5,6]])
B = np.array([[1,2,3,4],[5,6,7,8]])
m,n = A.shape
p,q = B.shape
K = np.zeros((m*p, n*q))
for i in range(m):
for j in range(n):
K[i*p:(i+1)*p, j*q:(j+1)*q] = A[i,j]*B
print('o produto de kronecker é')
print(K) |
def read_next(*args):
for item in args:
for i in item:
yield i
for item in read_next('string', (2,), {'d': 1, 'I': 2, 'c': 3, 't': 4}):
print(item, end='')
for i in read_next('Need', (2, 3), ['words','.']):
print(i) |
import re
from functools import partial
from marshmallow import Schema
snake_case = re.compile(r"(?<=\w)_(\w)")
camel_case = partial(snake_case.sub, lambda m: m[1].upper())
class CamelCasedSchema(Schema):
def on_bind_field(self, field_name, field_obj, _cc=camel_case):
field_obj.data_key = _cc(field_name.lower())
|
import click
from datetime import datetime as dt
from secrets import token_urlsafe
from flask.cli import FlaskGroup
from {{cookiecutter.app_name}} import create_app
@click.group(cls=FlaskGroup, create_app=create_app)
def cli():
pass
@cli.command('init')
def init():
""" Init admin user """
from {{cookiecutter.app_name}}.extensions import db
from {{cookiecutter.app_name}}.models import User
click.echo("Create DB")
db.create_all()
if not User.query.filter_by(username='admin').first():
click.echo("create user")
now = dt.now().replace(second=0, microsecond=0)
user = User(
username="admin",
email="admin@gmail.com",
created=now,
token=token_urlsafe(),
token_expiration=dt.now()
)
user.set_password("admin")
db.session.add(user)
db.session.commit()
click.echo("created user admin")
else:
click.echo("Admin user is already created")
if __name__ == "__main__":
cli()
|
from django.conf import settings
from django.db import models
# from django.core.urlresolvers import reverse
from django_hosts.resolvers import reverse
from .validators import validators_url, validators_dot_com
from .utils import code_generator, create_shortcode
SHORTCODE_MAX = getattr(settings, "SHORTCODE_MAX", 15)
# Create your models here.
class shortenerManager(models.Manager):
def all(self , *args , **kwargs):
qs_main = super(shortenerManager, self).all(*args, **kwargs)
qs = qs_main.filter(active=True)
return qs
def refresh_shortcode(self, items=None):
qs = shortener.objects.filter(id__gte=1)
if items is not None and isinstance(items, int):
qs = qs.order_by('-id')[:items]
new_codes = 0
for q in qs:
q.shortcode = create_shortcode(q)
print(q.id)
q.save()
new_codes += 1
return "New codes made: {i}".format(i=new_codes)
class shortener(models.Model):
url = models.CharField(max_length=220, validators=[validators_url, validators_dot_com])
shortcode = models.CharField(max_length=SHORTCODE_MAX, unique=True, blank=True)
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
#empty_datetime = models.DateTimeField(auto_now=False, auto_now_add=False)
objects = shortenerManager()
#some_random = shortenerManager()
def save(self, *args, **kwargs):
if self.shortcode is None or self.shortcode == "":
self.shortcode = create_shortcode(self)
super(shortener, self).save(*args , **kwargs)
def __str__(self):
return str(self.url)
def __unicode__(self):
return str(self.url)
def get_short_url(self):
url_path = reverse("scode", kwargs={'shortcode': self.shortcode}, host='www', scheme='http')
return url_path
|
#!/usr/bin/env python
# This is a script intended to be ran only when there are updates to the item
# database. The results are dumped into a file as JSON to be read by the app.
#
# This script requires an installed (and updated) copy of Eve Online. This
# requires Reverence, a tool which looks at the game cache to get up-to-date
# data. URL: https://github.com/ntt/reverence/
import json
from reverence import blue
if __name__ == '__main__':
# EVEPATH = '/Applications/EVE Online.app/Contents/Resources/EVE Online.app/Contents/Resources/transgaming/c_drive/Program Files/CCP/EVE'
EVEPATH = "C:\Program Files (x86)\CCP\EVE"
eve = blue.EVE(EVEPATH)
cfg = eve.getconfigmgr()
all_types = {}
for (typeID, groupID, typeName, marketGroupID, volume, capacity) in \
cfg.invtypes.Select('typeID', 'groupID', 'typeName',
'marketGroupID', 'volume', 'capacity'):
hasMarket = marketGroupID is not None
if not hasMarket:
# since the app only deals with market items, do not include things like suns
continue
print("Populating info for: %s" % typeName)
slot = None
d = {
'typeID': typeID,
'groupID': groupID,
'typeName': typeName,
'volume': volume,
'capacity': capacity,
'market': hasMarket,
}
for row in cfg.dgmtypeeffects[typeID]:
if row.effectID in [11, 12, 13, 2663, 3772]:
d['slot'] = cfg.dgmeffects.Get(row.effectID).effectName
# super, carrier, titan, dread
if groupID in [659, 547, 30, 485] and typeID in cfg.invtypematerials:
components = []
for typeID, materialTypeID, component_quantity in cfg.invtypematerials[typeID]:
components.append({
'typeID': typeID,
'materialTypeID': materialTypeID,
'quantity': component_quantity,
})
d['components'] = components
name_lower = typeName.lower()
all_types[name_lower] = d
# Create a stub for blueprint copies
if name_lower.endswith(' blueprint'):
copy_name = typeName + ' (Copy)'
all_types[copy_name.lower()] = {
'typeID': typeID,
'groupID': groupID,
'typeName': copy_name,
'volume': volume,
'capacity': capacity,
'market': False,
}
with open('types.json', 'w') as f:
f.write(json.dumps(all_types, indent=2))
|
import logging
logger = logging.getLogger("naucse")
logging.basicConfig(level=logging.DEBUG)
|
import argparse
import platform
import subprocess
from edgetpu.detection.engine import DetectionEngine
import socket
import io
import time
import numpy as np
import json
from lib import read_label_file
from PIL import Image
from PIL import ImageDraw
# UDP_IP = '192.168.2.183'
UDP_IP = '127.0.0.1'
TCP_IP = UDP_IP
# TCP_IP = '10.0.0.1'
UDP_RECEIVE_PORT = 9100
# UDP_SEND_PORT = 9101
TCP_PORT = 9101
# BUFFER_SIZE = 1024
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='Path of the detection model.', required=True)
parser.add_argument(
'--draw', help='If to draw the results.', default=True)
parser.add_argument(
'--label', help='Path of the labels file.')
args = parser.parse_args()
renderer = None
# Initialize engine.
engine = DetectionEngine(args.model)
labels = read_label_file(args.label) if args.label else None
shown = False
frames = 0
start_seconds = time.time()
print('opening socket.')
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
receiveSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# senderSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
# senderSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
receiveSocket.bind((UDP_IP, UDP_RECEIVE_PORT))
# senderSocket.bind((UDP_IP, UDP_SEND_PORT))
print('listening...')
_, width, height, channels = engine.get_input_tensor_shape()
imageSize = width*height*3
print('waiting for client')
conn, addr = s.accept()
print('Connection address:', addr)
# Open image.
while 1:
print('waiting for packet')
data, addr = receiveSocket.recvfrom(66507)
# print('got packet of length', len(data))
if (len(data) > 0):
start_s = time.time()
# print('processing image')
try:
image = Image.open(io.BytesIO(data)).convert('RGB')
except OSError:
print('Could not read image')
continue
input = np.frombuffer(image.tobytes(), dtype=np.uint8)
results = engine.DetectWithInputTensor(input, threshold=0.25,
top_k=10)
print('time to process image', (time.time() - start_s) * 1000)
output = to_output(results, image.size, labels)
message = json.dumps({'results': output}) + '|'
# print('sending', message)
try:
conn.send(message.encode('utf-8'))
except ConnectionResetError:
print('Socket disconnected...waiting for client')
conn, addr = s.accept()
# receiveSocket.sendto(message.encode('utf-8'), addr)
# senderSocket.sendto(message.encode('utf-8'), (UDP_IP, UDP_SEND_PORT))
# receivedBytes=bytearray()
# start_s = time.time()
# Run inference.
# results = engine.DetectWithInputTensor(input, threshold=0.25,
# top_k=10)
# elapsed_s = time.time() - start_s
# conn.close()
def to_output(results, full_size, labels):
return list(map(lambda result: { \
'box': scale_box(result.bounding_box, full_size),\
'label': labels[result.label_id] if labels is not None else None
}, results))
def scale_boxes(results, full_size):
return list(map(lambda result: \
(scale_box(result.bounding_box, full_size)).tolist(), results))
def to_label_texts(results, labels):
if labels is None:
return None
else:
return list(map(lambda result: labels[result.label_id], results))
def scale_box(box, full_size):
return (box* (full_size[0], full_size[1])).flatten().tolist()
if __name__ == '__main__':
main()
|
import sys
import os
import re
import OsTools as ot
import GMX as gmx
def put_grofile_in_memory(ifile):
fi = open(ifile,"r")
fi_mem = fi.readlines()
fi.close()
length = len(fi_mem)
for i in range(2):
l = fi_mem[i].strip().split()
fi_mem[i] = l
for i in range(2,length-1):
l = fi_mem[i]
ll = []
# .gro format:
# First 4 fields of five columns of .gro format
# Residue number, Residue type, Atom type, Atom number
count = 0
for field in range(4):
start = field*5
end = start+5
count = end
ll.append(l[start:end].strip())
# Fields 4 to 7 of 8 columns of .gro format
# x-, y-, z-coordinate
startcount = count
for field in range(3):
start = startcount + field*8
end = start + 8
count = end
ll.append(l[start:end].strip())
# Fields 7 to 10 of .gro format
# x-, y-, z-velocities
if (count < len(l)-2):
start = count
end = len(l)-1
ll.extend(l[start:end].strip().split())
fi_mem[i] = ll
del ll
for i in range(length-1,length):
l = fi_mem[i].strip().split()
fi_mem[i] = l
return fi_mem
def to_gro(pdb_in):
if (type(pdb_in)==str):
if (not ot.checkfile(os.getcwd(),pdb_in)):
print 'ERROR (to_gro): checkfile() failed for args \"'+os.getcwd()+\
'\" and \"'+pdb_in+'\"'
sys.exit(1)
gro_out = re.sub('.pdb','.gro',pdb_in)
ifacelist = [ '-f '+pdb_in,
'-o '+gro_out
]
gmx.g_editconf(ifacelist, log='editconf.err')
del ifacelist
return gro_out
elif (type(pdb_in)==list):
gro_out_list = []
for file in pdb_in:
if (not ot.checkfile(os.getcwd(),file)):
print 'ERROR (to_gro): checkfile() failed for args \"'+os.getcwd()+\
'\" and \"'+file+'\"'
sys.exit(1)
gro_out = re.sub('.pdb','.gro',file)
ifacelist = [ '-f '+file,
'-o '+gro_out
]
gmx.g_editconf(ifacelist, log='editconf.err')
del ifacelist
gro_out_list.append(gro_out)
return gro_out_list
def strip_gro(gro_mem):
gro_stripped = gro_mem[2:-1]
del gro_mem
return gro_stripped
def write_gro(ofile,new_coords_mem):
f = open(ofile,"w")
new_coords_mem.reverse()
f.write("%s\n" % ''.join(new_coords_mem.pop()))
natom = ''.join(new_coords_mem.pop())
f.write("%s\n" % natom)
natom = int(natom)
for i in range(0,natom):
l = new_coords_mem.pop()
if (i<9999):
rx = float(l[4])
ry = float(l[5])
rz = float(l[6])
if(len(l)>7):
vx = float(l[7])
vy = float(l[8])
vz = float(l[9])
f.write("%5s%5s%5s%5s%8.3f%8.3f%8.3f%8.3f%8.3f%8.3f\n" % \
(l[0],l[1],l[2],l[3],rx,ry,rz,vx, vy, vz))
else:
f.write("%5s%5s%5s%5s%8.3f%8.3f%8.3f\n" % \
(l[0],l[1],l[2],l[3],rx,ry,rz))
# else:
# rx = float(lsplit[2])
# ry = float(lsplit[3])
# rz = float(lsplit[4])
# if(len(lsplit)>5):
# vx = float(lsplit[5])
# vy = float(lsplit[6])
# vz = float(lsplit[7])
# f.write("%8s %11s %7.3f %7.3f %7.3f %7.4f %7.4f %7.4f\n" % \
# (lsplit[0],lsplit[1],rx,ry,rz,vx, vy, vz))
# else:
# f.write("%8s %11s %7.3f %7.3f %7.3f\n" % \
# (lsplit[0],lsplit[1],rx,ry,rz))
f.write(" %s\n" % ' '.join(new_coords_mem.pop()))
f.close()
del new_coords_mem
def merge_gro(fname,gro_cgpair,nat_cgpair,nrs_cgpair):
# use only with protein residues
gro_merged_mem = []
title = fname.split('.')[0]
gro_merged_mem.append([title])
nat_tot = 0
for i in nat_cgpair:
nat_tot += i
gro_merged_mem.append([str(nat_tot)])
box = ""
for file in range(len(gro_cgpair)):
tmp_mem = put_grofile_in_memory(gro_cgpair[file])
gro_mem = strip_gro(tmp_mem)
box = tmp_mem[-1]
del tmp_mem
res_offset = 0
at_offset = 0
if (file>0):
res_offset = nrs_cgpair[file-1]
at_offset = nat_cgpair[file-1]
for line in gro_mem:
line[0] = str(int(line[0])+res_offset)
line[3] = str(int(line[3])+at_offset)
gro_merged_mem.append(line)
del gro_mem
gro_merged_mem.append(box)
write_gro(fname,gro_merged_mem)
del gro_merged_mem
|
"""
false_triples.py
Creates false training triples under CWA.
"""
from random import choice
from collections import defaultdict
import numpy as np
from false_generator import FalseGenerator
def get_domains(subjects, predicates):
out = defaultdict(set)
for s,p in zip(subjects,predicates):
out[p].add(s)
return out
def remove_existing(triples, all_triples):
# Remove all_triples from triples
triples = set(map(tuple, triples))
all_triples = set(map(tuple, all_triples))
res = triples - all_triples
return list(res)
def inverse_domain(domains, all_entities):
for k,i in domains.items():
domains[k] = set(all_entities) - set(i)
if len(domains[k]) < 1:
domains[k] = set(all_entities)
return domains
def get_false_triple(subjects, objects, predicate):
return (choice(subjects), choice(objects), predicate)
def corrupt_triples(true_subjects, true_predicates, true_objects, check = False, mode = 'random', false_gen = None):
"""
Create false triples.
args:
true_subjects :: list
all subjects
true_objects :: list
all objects
true_predicates :: list
all predicates
check :: bool
remove existing true triples from created false triples.
mode :: string
random :: assume closed world, perturb subject/object with randomly selected from set of all entities.
domain :: keep domain and range consistent across false and true triples.
range :: reverse domain and range for relation.
compliment_domain :: use all entities minus the true domain/range as domain/range.
compliment_range :: use all entities minus the true domain/range as range/domain.
semantic :: use class assertions, domain/range statements etc. to construct open world false triples.
ontology :: Not implemented.
required by mode = 'semantic'
"""
true_subjects = list(np.ndarray.flatten(true_subjects))
true_objects = list(np.ndarray.flatten(true_objects))
true_predicates = list(np.ndarray.flatten(true_predicates))
all_entities = list(set(true_subjects).union(set(true_objects)))
false_triples = []
if mode == 'random':
for p in true_predicates:
false_triples.append(get_false_triple(all_entities, all_entities, p))
elif mode == 'domain':
# Domains and ranges used with predicate in KF.
domains = get_domains(true_subjects, true_predicates)
ranges = get_domains(true_objects, true_predicates)
for p in true_predicates:
false_triples.append(get_false_triple(list(domains[p]),list(ranges[p]),p))
elif mode == 'range':
# Domains and ranges used with predicate in KF.
domains = get_domains(true_objects, true_predicates)
ranges = get_domains(true_subjects, true_predicates)
for p in true_predicates:
false_triples.append(get_false_triple(list(domains[p]),list(ranges[p]),p))
elif mode == 'compliment_domain':
# newD = Compliment(D),newR = Compliment(R)
domains = get_domains(true_subjects, true_predicates)
ranges = get_domains(true_objects, true_predicates)
domains = inverse_domain(domains, all_entities)
ranges = inverse_domain(ranges, all_entities)
for p in true_predicates:
false_triples.append(get_false_triple(list(domains[p]),list(ranges[p]),p))
elif mode == 'compliment_range':
# newD = Compliment(R), newR = Compliment(D)
domains = get_domains(true_objects, true_predicates)
ranges = get_domains(true_subjects, true_predicates)
domains = inverse_domain(domains, all_entities)
ranges = inverse_domain(ranges, all_entities)
for p in true_predicates:
false_triples.append(get_false_triple(list(domains[p]),list(ranges[p]),p))
elif mode == 'ontology':
if not isinstance(false_gen, FalseGenerator):
raise TypeError(mode, 'requires a FalseGenerator object as input.')
for s,p,o in zip(true_subjects,true_predicates,true_objects):
method = choice(['range','domain','disjoint'])
false_triples.append(false_gen.corrupt((s,p,o),method=method))
else:
raise NotImplementedError(mode + " not implemented")
# Check if false triples already exists in KG.
if check:
false_triples = remove_existing(false_triples, list(zip(true_subjects,true_objects,true_predicates)))
# Extend to correct size and save.
while len(true_subjects) > len(false_triples):
false_triples.extend(false_triples)
false_triples = false_triples[:len(true_subjects)]
false_subjects, false_objects, false_predicates = zip(*false_triples)
return false_subjects, false_predicates, false_objects
|
import math
import operator
import numpy as np
import cv2
from util.long import typeVal
class ArithmeticList:
def __init__(self, *args):
self.coords = list(args)
def __add__(self, other):
return self._mapOperation(other, operator.add)
def __sub__(self, other):
return self._mapOperation(other, operator.sub)
def __mul__(self, scalar):
return self._mapOperation( ArithmeticList( *([scalar] * len(self.coords)) ), operator.mul )
def __div__(self, scalar):
return self._mapOperation( ArithmeticList( *([scalar] * len(self.coords)) ), operator.truediv )
def _mapOperation(self, other, operation):
assert len(self.coords) == len(other.coords)
return ArithmeticList(operation(x, y) for (x, y) in zip(self.coords, other.coords))
def __iter__(self):
return self.coords.__iter__()
class Vector(ArithmeticList):
def __init__(self, *args, **kwargs):
assert not args or not kwargs
super(Vector, self).__init__(*args)
for key, val in kwargs.items():
index = self._indexFromName(key)
assert index is not None
l = len(self.coords)
if index >= l:
self.coords[l:] = [None] * (1 + index - l)
self.coords[index] = val
assert None not in self.coords
def __getattr__(self, name):
index = self._indexFromName(name)
if index is not None:
return self.coords[index]
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, val):
index = self._indexFromName(name)
if index is not None:
self.coords[index] = val
else:
object.__setattr__(self, name, val)
def quadnorm(self):
return sum(v ** 2 for v in self.coords)
def norm(self):
return math.sqrt(self.quadnorm())
@staticmethod
def _indexFromName(name):
try:
return {"x": 0, "y": 1, "z": 2}[name]
except KeyError:
return None
# More geometric objects
class Point(Vector):
pass
# OpenCV-like keypoint
class Keypoint:
__slots__ = "pt size".split()
class Circle(Point):
pi = math.pi
def __init__(self, x, y, r):
"""
A Circle is a point with a size associated -- here r is the radius.
"""
super().__init__(x, y)
self.r = r
@classmethod
def fromBbox(cls, bbox):
pi = 3.125
radius = math.sqrt(bbox.area / pi)
x, y = bbox.center
return cls(x, y, radius)
@classmethod
def fromKeypoint(cls, keypoint):
radius = keypoint.size / 2
x = keypoint.pt[0]
y = keypoint.pt[1]
return cls(x, y, radius)
def __contains__(self, point):
return (self - point).quadnorm() < self.r
def isInside(self, otherCircle):
return (self - otherCircle)
@property
def center(self):
return Point(self.x, self.y)
@property
def area(self):
return self.pi * self.r ** 2
class MvBbox:
__slots__ = "x y width height".split()
def __init__(self, x, y, width, height):
"""
A bbox is a bounding box. (x, y) are it's top left corner coordinates.
It is defined by the coordinates of the top left corner and the size of the box (width, height).
It has properties:
bbox:: This property is simpler than the MvBbox object: it's a tuple carrying no methode.
area::
center::
bottom::
right::
"""
self.x = int(x)
self.y = int(y)
self.width = int(width)
self.height = int(height)
@staticmethod
def fromCircle(circle, width_on_height_ratio):
area = circle.area
width = math.sqrt(width_on_height_ratio * area)
height = area / width
x = circle.x - width / 2
y = circle.y - height / 2
bbox = (int(x), int(y), int(width), int(height))
return bbox
def __contains__(self, point):
dx = point.x - self.x
dy = point.y - self.y
return 0 <= dx <= self.width and 0 <= dy <= self.height
def isInside(self, otherBbox):
"""Tells whether a bbox is strictly inside another"""
left = self.x <= otherBbox.x
top = self.y <= otherBbox.y
right = self.x + self.width >= otherBbox.x + otherBbox.width
bottom = self.y + self.height >= otherBbox.y + otherBbox.height
return all([left, top, right, bottom])
def extractFrom(self, frame, fillerColor=None):
"""
Accept a frame and extract the region correspondig to mvbbox from it.
If the region falls outside the frame, the result will be as if the
frame were surrounded by `fillerColor`.
:param frame: The frame from which to extract the region.
:param fillerColor:
The color to use to fill the extracted image if the region is
partially or totally out of the frame. Black by default.
Accepte une frame et en extrait la region correspondant à la mvbbox.
Si la region est partiellement ou totalement hors de la frame, le
résultat sera comme si la frame était entourée de `fillerColor`.
"""
if len(frame.shape) < 2 or frame.shape[0] <= 0 or frame.shape[1] <= 0:
raise ValueError(f"Given frame's dimension are nul. shape: {frame.shape}")
if fillerColor is not None:
fillerValue = fillerColor
else:
fillerValue = frame[0, 0]
fillerValue.fill(0)
fh, fw = frame.shape[:2]
destShape = [self.height, self.width, *frame.shape[2:]]
destination = np.empty(shape=destShape, dtype=frame.dtype)
destination[:, :] = fillerValue
# d: destination area where the part of the frame will be copied.
dLeft, dTop = 0, 0
dRight, dBottom = self.width, self.height
# o: origin area of the frame which will be copied
oLeft, oTop = self.x, self.y
oRight, oBottom = self.right, self.bottom
if self.x < 0:
dLeft -= self.x
oLeft -= self.x # thus oLeft == 0
if self.y < 0:
dTop -= self.y
oTop -= self.y # thus oTop == 0
if fw - self.right < 0:
dRight += fw - self.right
oRight += fw - self.right
if oBottom > fh:
dBottom += fh - self.bottom
oBottom += fh - self.bottom
destination[dTop:dBottom, dLeft:dRight] = frame[oTop:oBottom, oLeft:oRight]
return destination
def draw(self, frame, color, *args, **kwargs):
if len(args) == 0 and "thickness" not in kwargs:
kwargs["thickness"] = 6
cv2.rectangle(frame, (self.x, self.y), (self.right, self.bottom), color, *args, **kwargs)
@property
def bbox(self):
return (self.x, self.y, self.width, self.height)
@bbox.setter
def bbox(self, val):
self.x, self.y, self.width, self.height = map(int, val)
@property
def area(self):
return self.width * self.height
@property
def center(self):
return Point(self.x + self.width // 2, self.y + self.height // 2)
@center.setter
def center(self, newCenter):
cx, cy = newCenter
self.x = int(cx - self.width // 2)
self.y = int(cy - self.height // 2)
@property
def right(self):
return self.x + self.width
@property
def bottom(self):
assert type(self.y) == int, typeVal(self.y)
assert type(self.height) == int
return self.y + self.height |
from Dog import Dog
fido = Dog() |
# This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
"""Shared objects contain singletons for shared use."""
from ranger.ext.lazy_property import lazy_property
class FileManagerAware(object):
"""Subclass this to gain access to the global "FM" object."""
@staticmethod
def _setup(fm):
FileManagerAware.fm = fm
class SettingsAware(object):
"""Subclass this to gain access to the global "SettingObject" object."""
@staticmethod
def _setup(settings):
SettingsAware.settings = settings
|
def test_jenkins_is_installled(host):
jenkins = host.package("jenkins")
assert jenkins.is_installed
assert jenkins.version.startswith("2.2")
def test_jenkins_running_and_enabled(host):
jenkins = host.service("jenkins")
assert jenkins.is_running
assert jenkins.is_enabled
def test_ansible_is_intalled(host):
ansible = host.package("ansible")
assert ansible.is_installed
assert ansible.version.startswith("2.9")
def test_packer_in_path(host):
packer = host.exists("packer")
assert packer == True
|
import argparse
import logging
import os
import random
import socket
import sys
from sklearn.utils import shuffle
import numpy as np
import psutil
import setproctitle
import torch
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
from fedml_api.model.finance.vfl_classifier import VFLClassifier
from fedml_api.model.finance.vfl_feature_extractor import VFLFeatureExtractor
from fedml_api.data_preprocessing.lending_club_loan.lending_club_dataset import loan_load_three_party_data
from fedml_api.data_preprocessing.NUS_WIDE.nus_wide_dataset import NUS_WIDE_load_three_party_data
from fedml_api.distributed.classical_vertical_fl.vfl_api import FedML_VFL_distributed
from fedml_api.distributed.fedavg.FedAvgAPI import FedML_init
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
parser.add_argument('--dataset', type=str, default='lending_club_loan', metavar='N',
help='dataset used for training')
parser.add_argument('--client_number', type=int, default=2, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--comm_round', type=int, default=100,
help='how many round of communications we shoud use')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--frequency_of_the_test', type=int, default=30,
help='the frequency of the algorithms')
args = parser.parse_args()
return args
def init_training_device(process_ID, fl_worker_num, gpu_num_per_machine):
# initialize the mapping from process ID to GPU ID: <process ID, GPU ID>
if process_ID == 0:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
process_gpu_dict = dict()
for client_index in range(fl_worker_num):
gpu_index = client_index % gpu_num_per_machine
process_gpu_dict[client_index] = gpu_index
logging.info(process_gpu_dict)
device = torch.device("cuda:" + str(process_gpu_dict[process_ID - 1]) if torch.cuda.is_available() else "cpu")
logging.info(device)
return device
if __name__ == "__main__":
# initialize distributed computing (MPI)
comm, process_id, worker_number = FedML_init()
# parse python script input parameters
parser = argparse.ArgumentParser()
args = add_args(parser)
# customize the process name
str_process_name = "Federated Learning:" + str(process_id)
setproctitle.setproctitle(str_process_name)
# customize the log format
logging.basicConfig(level=logging.INFO,
format=str(
process_id) + ' - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
hostname = socket.gethostname()
logging.info("#############process ID = " + str(process_id) +
", host name = " + hostname + "########" +
", process ID = " + str(os.getpid()) +
", process Name = " + str(psutil.Process(os.getpid())))
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
seed = 0
np.random.seed(seed)
torch.manual_seed(worker_number)
random.seed(0)
# GPU management
logging.info("process_id = %d, size = %d" % (process_id, worker_number))
device = init_training_device(process_id, worker_number-1, 4)
# load data
print("################################ Prepare Data ############################")
if args.dataset == "lending_club_loan":
data_dir = "../../../data/lending_club_loan/"
train, test = loan_load_three_party_data(data_dir)
elif args.dataset == "NUS_WIDE":
data_dir = "../../../data/NUS_WIDE"
class_lbls = ['person', 'animal']
train, test = NUS_WIDE_load_three_party_data(data_dir, class_lbls, neg_label=0)
else:
data_dir = "../../../data/lending_club_loan/"
train, test = loan_load_three_party_data(data_dir)
Xa_train, Xb_train, Xc_train, y_train = train
Xa_test, Xb_test, Xc_test, y_test = test
Xa_train, Xb_train, Xc_train, y_train = shuffle(Xa_train, Xb_train, Xc_train, y_train)
Xa_test, Xb_test, Xc_test, y_test = shuffle(Xa_test, Xb_test, Xc_test, y_test)
train = [Xa_train, Xb_train, Xc_train, y_train]
test = [Xa_test, Xb_test, Xc_test, y_test]
guest_data = [Xa_train, y_train, Xa_test, y_test]
host_data = None
if process_id == 1:
host_data = [Xb_train, Xb_test]
elif process_id == 2:
host_data = [Xc_train, Xc_test]
# create models for each worker
if process_id == 0:
guest_feature_extractor = VFLFeatureExtractor(input_dim=Xa_train.shape[1], output_dim=10).to(device)
guest_classifier = VFLClassifier(guest_feature_extractor.get_output_dim(), 1).to(device)
guest_model = [guest_feature_extractor, guest_classifier]
host_model = [None, None]
elif process_id == 1:
host_feature_extractor = VFLFeatureExtractor(input_dim=Xb_train.shape[1], output_dim=10).to(device)
host_classifier = VFLClassifier(host_feature_extractor.get_output_dim(), 1).to(device)
host_model = [host_feature_extractor, host_classifier]
guest_model = [None, None]
elif process_id == 2:
host_feature_extractor = VFLFeatureExtractor(input_dim=Xc_train.shape[1], output_dim=10).to(device)
host_classifier = VFLClassifier(host_feature_extractor.get_output_dim(), 1).to(device)
host_model = [host_feature_extractor, host_classifier]
guest_model = [None, None]
else:
guest_model = [None, None]
host_model = [None, None]
FedML_VFL_distributed(process_id, worker_number, comm, args, device, guest_data, guest_model, host_data, host_model)
|
#!/usr/bin/env python
import sys
import sqlite3
import random
def _main(args):
if len(args) < 6:
print "usage: patser_hit_window_xls.py <patser_hit_db> <matrix_name> <winsize> <xls> <nrandom> <xls-type; 0=(name\tchr\tpeak);1=MACS>"
sys.exit(1)
conn = sqlite3.connect(args[0])
conn.row_factory = sqlite3.Row
cur = conn.cursor()
xls_type = int(args[5])
winsize = int(args[2])
xls_lines = []
for line in open(args[3]):
spl_line = tuple(line[:-1].split("\t"))
xls_lines.append(spl_line)
outhits = open(args[3] + "_" + args[1] + "_win" + args[2],"w")
outrand = open(args[1] + "_" + "_randwin" + args[2],"w")
cur.execute("""SELECT matrix_key from matrix where name = ?""",(args[1],))
matrix_key = cur.fetchall()[0][0]
chrs = {}
count = 0
cur.execute("""SELECT DISTINCT chr FROM patser_hit""")
chr_names = cur.fetchall()
for ch in chr_names:
chr = ch[0]
cur.execute("""SELECT max(start) from patser_hit where chr = ?""",(chr,))
max = int(cur.fetchall()[0][0])
chrs[(count,count+max)] = {'name' : chr,
'max' : max }
count = count + max
xls_counts = [0] * winsize
xls_total = len(xls_lines)
hit_count = 0
for x in xls_lines:
peak_loc = None
chr = None
if (xls_type == 1):
peak_loc = int(x[1]) + int(x[4])
chr = x[0]
else:
peak_loc = int(x[2])
chr = x[1]
temp_count = [0] * winsize
print (chr,peak_loc - (winsize/2),peak_loc + (winsize/2))
cur.execute("""SELECT * from patser_hit where ((chr = ?) and (start between ? and ?) and (matrix_key = ?))""",(chr,peak_loc - (winsize/2),peak_loc + (winsize/2),matrix_key))
hits = cur.fetchall()
print hits
for h in hits:
hit_count += 1
win_loc = ((peak_loc - h['start']) + winsize / 2) - 1
print win_loc
xls_counts[win_loc] += 1
temp_count[win_loc] += 1
print " ".join(map(str,temp_count))
print >> outhits, "\t".join(map(str,temp_count))
rand_counts = [0] * winsize
rand_hit_count = 0
print xrange(0,10)
for x in xrange(0,int(args[4])):
print "random: " + str(x)
rand = random.randint(0,count)
sel_chr = None
range = None
for c in chrs.keys():
if rand >= c[0] and rand < c[1]:
sel_chr = chrs[c]
else:
continue
temp_count = [0] * winsize
chr_rand = random.randint(0,sel_chr['max'])
cur.execute("""SELECT * from patser_hit where ((chr = ?) and (start between ? and ?) and (matrix_key = ?))""",(chr,chr_rand - (winsize/2),chr_rand + (winsize/2),matrix_key))
hits = cur.fetchall()
for h in hits:
rand_hit_count += 1
win_loc = ((chr_rand - h['start']) + winsize / 2) - 1
print win_loc
rand_counts[win_loc] += 1
temp_count[win_loc] += 1
print " ".join(map(str,temp_count))
print >> outrand, "\t".join(map(str,temp_count))
countout = open(args[1] + "_win" + args[2] + "counts.txt","w")
rateout = open(args[1] + "_win" + args[2] + "rates.txt","w")
randrateout = open(args[1] + "_win" + args[2] + "_rep" + args[4] + "rates.txt","w")
print "peak windows with at least one hit: %i / %i" % (hit_count,xls_total)
print "random windows with at least one hit: %i / %s" % (rand_hit_count,args[4])
print >> countout, " ".join(map(str,xls_counts))
print >> rateout, " ".join([str(x/float(xls_total)) for x in xls_counts])
print >> randrateout, " ".join([str(x/float(args[4])) for x in rand_counts])
if __name__ == "__main__":
_main(sys.argv[1:])
|
'''OpenGL extension EXT.shared_texture_palette
Overview (from the spec)
EXT_shared_texture_palette defines a shared texture palette which may be
used in place of the texture object palettes provided by
EXT_paletted_texture. This is useful for rapidly changing a palette
common to many textures, rather than having to reload the new palette
for each texture. The extension acts as a switch, causing all lookups
that would normally be done on the texture's palette to instead use the
shared palette.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/EXT/shared_texture_palette.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_shared_texture_palette'
GL_SHARED_TEXTURE_PALETTE_EXT = constant.Constant( 'GL_SHARED_TEXTURE_PALETTE_EXT', 0x81FB )
def glInitSharedTexturePaletteEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
#!/usr/bin/env python
import pygame
from mob_class import Mob
#-------- This is the method I wanted to test, but trying to import it just launches the game..... so I brought it here for proof of concept.
all_sprites = pygame.sprite.Group()
mobs = pygame.sprite.Group()
def newmob(health): #Evan
if health != 100:
mob_element = Mob()
all_sprites.add(mob_element)
mobs.add(mob_element)
return 1
else:
for i in range(2):
mob_element = Mob()
all_sprites.add(mob_element)
mobs.add(mob_element)
return 2
#----------- Start black box testing
with open("spaceshooter/evan_test_input.txt", "r") as input_file:
tests = input_file.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
tests = [x.strip() for x in tests]
with open("spaceshooter/evan_test_output.txt", "w+") as output:
for test in tests:
result = newmob(int(test))
if test == '100' and result == 2:
output.write("Tested: " + str(test).ljust(4) + " in newmob(). " "Result: " + str(result).ljust(4) + " -Passed" + "\n")
continue
elif test != '100' and result == 2:
output.write("Tested: " + str(test).ljust(4) + " in newmob(). " "Result: " + str(result).ljust(4) + " -Failed" + "\n")
continue
if test != '100' and result == 1:
output.write("Tested: " + str(test).ljust(4) + " in newmob(). " "Result: " + str(result).ljust(4) + " -Passed" + "\n")
continue
elif test == '100' and result == 1:
output.write("Tested: " + str(test).ljust(4) + " in newmob(). " "Result: " + str(result).ljust(4) + " -Failed" + "\n")
continue
print("Done Testing") |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chia_tea/protobuf/generated/hardware.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='chia_tea/protobuf/generated/hardware.proto',
package='chia_tea.protobuf.generated.hardware_pb2',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n*chia_tea/protobuf/generated/hardware.proto\x12(chia_tea.protobuf.generated.hardware_pb2\"^\n\x03\x43pu\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x63lock_speed\x18\x02 \x01(\x01\x12\r\n\x05usage\x18\x03 \x01(\x01\x12\x13\n\x0btemperature\x18\x04 \x01(\x01\x12\x10\n\x08n_vcores\x18\x05 \x01(\x05\"Q\n\x03Ram\x12\x11\n\ttotal_ram\x18\x01 \x01(\x03\x12\x10\n\x08used_ram\x18\x02 \x01(\x03\x12\x12\n\ntotal_swap\x18\x03 \x01(\x03\x12\x11\n\tused_swap\x18\x04 \x01(\x03\"\xb2\x02\n\x04\x44isk\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x13\n\x0btotal_space\x18\x03 \x01(\x01\x12\x12\n\nused_space\x18\x04 \x01(\x01\x12\x0e\n\x06\x64\x65vice\x18\x0c \x01(\t\x12\x12\n\nmountpoint\x18\r \x01(\t\x12\x0e\n\x06\x66stype\x18\x0e \x01(\t\x12\x15\n\rmount_options\x18\x0f \x01(\t\x12\x13\n\x0btemperature\x18\x05 \x01(\x01\x12\x15\n\rread_activity\x18\x06 \x01(\x01\x12\x16\n\x0ewrite_activity\x18\x07 \x01(\x01\x12\x12\n\nread_speed\x18\x08 \x01(\x01\x12\x13\n\x0bwrite_speed\x18\t \x01(\x01\x12\x16\n\x0eread_total_tbw\x18\n \x01(\x01\x12\x17\n\x0fwrite_total_tbw\x18\x0b \x01(\x01\x62\x06proto3'
)
_CPU = _descriptor.Descriptor(
name='Cpu',
full_name='chia_tea.protobuf.generated.hardware_pb2.Cpu',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='chia_tea.protobuf.generated.hardware_pb2.Cpu.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='clock_speed', full_name='chia_tea.protobuf.generated.hardware_pb2.Cpu.clock_speed', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='usage', full_name='chia_tea.protobuf.generated.hardware_pb2.Cpu.usage', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='temperature', full_name='chia_tea.protobuf.generated.hardware_pb2.Cpu.temperature', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='n_vcores', full_name='chia_tea.protobuf.generated.hardware_pb2.Cpu.n_vcores', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=182,
)
_RAM = _descriptor.Descriptor(
name='Ram',
full_name='chia_tea.protobuf.generated.hardware_pb2.Ram',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='total_ram', full_name='chia_tea.protobuf.generated.hardware_pb2.Ram.total_ram', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='used_ram', full_name='chia_tea.protobuf.generated.hardware_pb2.Ram.used_ram', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_swap', full_name='chia_tea.protobuf.generated.hardware_pb2.Ram.total_swap', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='used_swap', full_name='chia_tea.protobuf.generated.hardware_pb2.Ram.used_swap', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=184,
serialized_end=265,
)
_DISK = _descriptor.Descriptor(
name='Disk',
full_name='chia_tea.protobuf.generated.hardware_pb2.Disk',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_space', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.total_space', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='used_space', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.used_space', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='device', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.device', index=4,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mountpoint', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.mountpoint', index=5,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fstype', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.fstype', index=6,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mount_options', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.mount_options', index=7,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='temperature', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.temperature', index=8,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='read_activity', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.read_activity', index=9,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='write_activity', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.write_activity', index=10,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='read_speed', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.read_speed', index=11,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='write_speed', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.write_speed', index=12,
number=9, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='read_total_tbw', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.read_total_tbw', index=13,
number=10, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='write_total_tbw', full_name='chia_tea.protobuf.generated.hardware_pb2.Disk.write_total_tbw', index=14,
number=11, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=268,
serialized_end=574,
)
DESCRIPTOR.message_types_by_name['Cpu'] = _CPU
DESCRIPTOR.message_types_by_name['Ram'] = _RAM
DESCRIPTOR.message_types_by_name['Disk'] = _DISK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Cpu = _reflection.GeneratedProtocolMessageType('Cpu', (_message.Message,), {
'DESCRIPTOR' : _CPU,
'__module__' : 'chia_tea.protobuf.generated.hardware_pb2'
# @@protoc_insertion_point(class_scope:chia_tea.protobuf.generated.hardware_pb2.Cpu)
})
_sym_db.RegisterMessage(Cpu)
Ram = _reflection.GeneratedProtocolMessageType('Ram', (_message.Message,), {
'DESCRIPTOR' : _RAM,
'__module__' : 'chia_tea.protobuf.generated.hardware_pb2'
# @@protoc_insertion_point(class_scope:chia_tea.protobuf.generated.hardware_pb2.Ram)
})
_sym_db.RegisterMessage(Ram)
Disk = _reflection.GeneratedProtocolMessageType('Disk', (_message.Message,), {
'DESCRIPTOR' : _DISK,
'__module__' : 'chia_tea.protobuf.generated.hardware_pb2'
# @@protoc_insertion_point(class_scope:chia_tea.protobuf.generated.hardware_pb2.Disk)
})
_sym_db.RegisterMessage(Disk)
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Author: jlopes@usp.br
import os
import numpy as np
import pickle
class KitData(object):
def __init__(self,workspace,corpus_name,language):
self.workspace = workspace
self.corpus_name = corpus_name
self.language = language
def info_get(self):
if os.path.exists(self.workspace + self.corpus_name + '/data/idx/info.pickle'):
with open (self.workspace + self.corpus_name + '/data/idx/info.pickle', 'rb') as fh:
info = pickle.load(fh)
return info
else:
return None
def tokens_get(self):
info = self.info_get()
return info[0]
def types_get(self):
info = self.info_get()
return info[1]
def ttr_get(self):
info = self.info_get()
return info[2]
def hapax_get(self):
info = self.info_get()
return info[3]
def textfiles_get_names(self):
with open(self.workspace + self.corpus_name + '/data/idx/filenames.pickle','rb') as fh:
d = pickle.load(fh)
for k in d:
yield k[0]
def fileids_get(self):
with open(self.workspace + self.corpus_name + '/data/idx/filenames.pickle','rb') as fh:
d = pickle.load(fh)
for k in d:
yield k[1]
def words_get(self,fileids=None):
with open(self.workspace + self.corpus_name + '/data/idx/words.pickle','rb') as fh:
dict_words = pickle.load(fh)
with open(self.workspace + self.corpus_name + '/data/idx/filenames.pickle','rb') as fh:
dict_filenames = pickle.load(fh)
if fileids == None:
for k in dict_filenames:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
for i in range(0,arr.shape[0]):
yield dict_words[arr[i,0]]
else:
if type(fileids) != list:
fileids = [fileids]
for k in dict_filenames:
if k[1] in fileids:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
for i in range(0,arr.shape[0]):
yield dict_words[arr[i,0]]
def tagged_words_get(self,fileids=None):
with open(self.workspace + self.corpus_name + '/data/idx/words.pickle','rb') as fh:
dict_words = pickle.load(fh)
with open(self.workspace + self.corpus_name + '/data/idx/tags.pickle','rb') as fh:
dict_tags = pickle.load(fh)
with open(self.workspace + self.corpus_name + '/data/idx/filenames.pickle','rb') as fh:
dict_filenames = pickle.load(fh)
if fileids == None:
for k in dict_filenames:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
for i in range(0,arr.shape[0]):
yield (dict_words[arr[i,0]],dict_tags[arr[i,1]])
else:
if type(fileids) != list:
fileids = [fileids]
for k in dict_filenames:
if k[1] in fileids:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
for i in range(0,arr.shape[0]):
yield (dict_words[arr[i,0]],dict_tags[arr[i,1]])
def sents_get(self,fileids=None):
with open(self.workspace + self.corpus_name + '/data/idx/words.pickle','rb') as fh:
dict_words = pickle.load(fh)
with open(self.workspace + self.corpus_name + '/data/idx/filenames.pickle','rb') as fh:
dict_filenames = pickle.load(fh)
if fileids == None:
for k in dict_filenames:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
arr = np.split(arr[:, 0], np.cumsum(np.unique(arr[:, 2], return_counts=True)[1])[:-1])
for a in arr:
sent = []
for i in a:
sent.append(dict_words[i])
yield tuple(sent)
else:
if type(fileids) != list:
fileids = [fileids]
for k in dict_filenames:
if k[1] in fileids:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
arr = np.split(arr[:, 0], np.cumsum(np.unique(arr[:, 2], return_counts=True)[1])[:-1])
for a in arr:
sent = []
for i in a:
sent.append(dict_words[i])
yield tuple(sent)
def tagged_sents_get(self,fileids=None):
with open(self.workspace + self.corpus_name + '/data/idx/words.pickle','rb') as fh:
dict_words = pickle.load(fh)
with open(self.workspace + self.corpus_name + '/data/idx/filenames.pickle','rb') as fh:
dict_filenames = pickle.load(fh)
with open(self.workspace + self.corpus_name + '/data/idx/tags.pickle','rb') as fh:
dict_tags = pickle.load(fh)
if fileids == None:
for k in dict_filenames:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
arr_t = np.split(arr[:, 1], np.cumsum(np.unique(arr[:, 2], return_counts=True)[1])[:-1])
arr = np.split(arr[:, 0], np.cumsum(np.unique(arr[:, 2], return_counts=True)[1])[:-1])
for j in range(0,len(arr)):
sent = []
for i in range(0,arr[j].shape[0]):
sent.append((dict_words[arr[j][i]],dict_tags[arr_t[j][i]]))
yield sent
else:
if type(fileids) != list:
fileids = [fileids]
for k in dict_filenames:
if k[1] in fileids:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
arr_t = np.split(arr[:, 1], np.cumsum(np.unique(arr[:, 2], return_counts=True)[1])[:-1])
arr = np.split(arr[:, 0], np.cumsum(np.unique(arr[:, 2], return_counts=True)[1])[:-1])
for j in range(0,len(arr)):
sent = []
for i in range(0,arr[j].shape[0]):
sent.append((dict_words[arr[j][i]],dict_tags[arr_t[j][i]]))
yield sent
def ndarrays_get(self,fileids=None):
with open(self.workspace + self.corpus_name + '/data/idx/filenames.pickle','rb') as fh:
dict_filenames = pickle.load(fh)
if fileids == None:
for k in dict_filenames:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
yield arr
else:
if type(fileids) != list:
fileids = [fileids]
for k in dict_filenames:
if k[1] in fileids:
arr = np.load(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
yield arr
def ndarrays_filenames_get(self,fileids=None):
with open(self.workspace + self.corpus_name + '/data/idx/filenames.pickle','rb') as fh:
dict_filenames = pickle.load(fh)
if fileids == None:
for k in dict_filenames:
yield str(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
else:
if type(fileids) != list:
fileids = [fileids]
for k in dict_filenames:
if k[1] in fileids:
yield str(self.workspace + self.corpus_name + '/data/npy/' + k[0] + '.npy')
def dict_words_get(self):
with open(self.workspace + self.corpus_name + '/data/idx/words.pickle','rb') as fh:
return pickle.load(fh)
def dict_tags_get(self):
with open(self.workspace + self.corpus_name + '/data/idx/tags.pickle','rb') as fh:
return pickle.load(fh)
|
import pygame
pygame.init()
win = pygame.display.set_mode((500,500))
pygame.display.set_caption("First Game")
x = 50
y = 50
width = 40
height = 60
vel = 5
run = True
while run:
pygame.time.delay(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
print('left')
x -= vel
if keys[pygame.K_RIGHT]:
print('right')
x += vel
if keys[pygame.K_UP]:
print('up')
y -= vel
if keys[pygame.K_DOWN]:
print('down')
y += vel
pygame.draw.rect(win, (255,0,0), (x, y, width, height))
pygame.display.update()
pygame.quit() |
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Manages PubSub topic that receives notifications about AuthDB changes.
The topic is hosted in auth_service Cloud Project and auth_service manages its
IAM policies.
All service accounts listed in 'auth-trusted-services' group are entitled for
a subscription to AuthDB change notifications, so that they can pull AuthDB
snapshots as soon as they are available.
Members of 'auth-trusted-services' can create as many subscription as they like.
They have 'pubsub.topics.attachSubscription' permission on the topic and can
create subscriptions belong to Cloud Projects they own.
"""
import base64
import logging
from google.appengine.api import app_identity
from components import auth
from components import pubsub
from components import utils
from components.auth import signature
from components.auth.proto import replication_pb2
import acl
# Fatal errors raised by this module. Reuse pubset.Error to avoid catching and
# raising an exception again all the time.
Error = pubsub.Error
def topic_name():
"""Full name of PubSub topic that receives AuthDB change notifications."""
return pubsub.full_topic_name(
app_identity.get_application_id(), 'auth-db-changed')
def _email_to_iam_ident(email):
"""Given email returns 'user:...' or 'serviceAccount:...'."""
if email.endswith('.gserviceaccount.com'):
return 'serviceAccount:' + email
return 'user:' + email
def _iam_ident_to_email(ident):
"""Given IAM identity returns email address or None."""
for p in ('user:', 'serviceAccount:'):
if ident.startswith(p):
return ident[len(p):]
return None
def is_authorized_subscriber(email):
"""True if given user can attach subscriptions to the topic."""
with pubsub.iam_policy(topic_name()) as p:
return _email_to_iam_ident(email) in p.members('roles/pubsub.subscriber')
def authorize_subscriber(email):
"""Allows given user to attach subscriptions to the topic."""
with pubsub.iam_policy(topic_name()) as p:
p.add_member('roles/pubsub.subscriber', _email_to_iam_ident(email))
def deauthorize_subscriber(email):
"""Revokes authorization to attach subscriptions to the topic."""
with pubsub.iam_policy(topic_name()) as p:
p.remove_member('roles/pubsub.subscriber', _email_to_iam_ident(email))
def revoke_stale_authorization():
"""Removes pubsub.subscriber role from accounts that no longer have access."""
try:
with pubsub.iam_policy(topic_name()) as p:
for iam_ident in p.members('roles/pubsub.subscriber'):
email = _iam_ident_to_email(iam_ident)
if email:
ident = auth.Identity.from_bytes('user:' + email)
if not acl.is_trusted_service(ident):
logging.warning('Removing "%s" from subscribers list', iam_ident)
p.remove_member('roles/pubsub.subscriber', iam_ident)
except Error as e:
logging.warning('Failed to revoke stale users: %s', e)
def publish_authdb_change(state):
"""Publishes AuthDB change notification to the topic.
Args:
state: AuthReplicationState with version info.
"""
if utils.is_local_dev_server():
return
msg = replication_pb2.ReplicationPushRequest()
msg.revision.primary_id = app_identity.get_application_id()
msg.revision.auth_db_rev = state.auth_db_rev
msg.revision.modified_ts = utils.datetime_to_timestamp(state.modified_ts)
blob = msg.SerializeToString()
key_name, sig = signature.sign_blob(blob)
pubsub.publish(topic_name(), blob, {
'X-AuthDB-SigKey-v1': key_name,
'X-AuthDB-SigVal-v1': base64.b64encode(sig),
})
|
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
import DistributedCogHQDoor
from toontown.hood import ZoneUtil
from BossLobbyGui import BossLobbyGui
class DistributedCogHQExteriorDoor(DistributedCogHQDoor.DistributedCogHQDoor):
def __init__(self, cr):
DistributedCogHQDoor.DistributedCogHQDoor.__init__(self, cr)
self.lobbyGui = None
def selectLobby(self, avId):
print("********\nCreating Lobby GUI...\n********")
self.lobbyGui = BossLobbyGui(self.sendConfirmation, avId)
self.lobbyGui.loadFrame(0)
def sendConfirmation(self, avId, status):
self.lobbyGui.destroy()
self.lobbyGui = None
print("********\nGUI Complete.\nSending Confirmation...\n********")
self.sendUpdate('confirmEntrance', [avId, status])
|
import pandas as pd
from Model.resnet18 import ResNet
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, Sequential, optimizers
from LoadData.LoadData import InputTrainImg, InputTestImg
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.densenet import DenseNet121
import numpy as np
import tensorflow as tf
from tensorflow import keras
# Display
from IPython.display import Image, display
import matplotlib.pyplot as plt
import matplotlib.cm as cm
num_task = 3
width = 128
model = ResNet([2, 2, 2, 2], num_task)
model.build(input_shape = (None, width, width, 3))
optimizer = optimizers.Adam(learning_rate=1e-8) # 学习率
model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
base_model2 = tf.keras.applications.MobileNet(weights='imagenet', include_top=False, input_shape=(width,width,3))
base_model2.trainable = False
x = base_model2.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(8,activation='relu')(x)
x = tf.keras.layers.Dense(3,activation="softmax")(x)
model2 = tf.keras.Model(inputs=base_model2.input, outputs=x)
model2.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
base_model4 = DenseNet121(weights='imagenet', include_top=False, input_shape=(width, width, 3))
base_model4.trainable = False
x = base_model4.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(8,activation='relu')(x)
x = tf.keras.layers.Dense(3,activation="softmax")(x)
model4 = tf.keras.Model(inputs=base_model4.input, outputs=x)
model4.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
base_model7 = InceptionV3(weights='imagenet', include_top=False, input_shape=(width, width, 3))
base_model7.trainable = False
x = base_model7.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(8,activation='relu')(x)
x = tf.keras.layers.Dense(3,activation="softmax")(x)
model7 = tf.keras.Model(inputs=base_model7.input, outputs=x)
model7.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.load_weights('Liverre_949_0.75')
model2.load_weights('Liver2_100_0.95')
model4.load_weights('Liver4_69_0.60')
model7.load_weights('Liver7_1_0.39')
dat = InputTestImg('D:/projects/Liver/test', width)
test_x, test_y = dat.load_test_data()
score = model.evaluate(test_x, test_y)
score = model2.evaluate(test_x, test_y)
score = model4.evaluate(test_x, test_y)
score = model7.evaluate(test_x, test_y)
pre_y = np.argmax(model.predict(test_x), axis=1)
pre_y2 = np.argmax(model2.predict(test_x), axis=1)
pre_y4 = np.argmax(model4.predict(test_x), axis=1)
pre_y7 = np.argmax(model7.predict(test_x), axis=1)
c = confusion_matrix(test_y, pre_y)
c2 = confusion_matrix(test_y, pre_y2)
c4 = confusion_matrix(test_y, pre_y4)
c7 = confusion_matrix(test_y, pre_y7)
print(c)
print(c2)
print(c4)
print(c7)
# plt.figure()
# classes = ['CHC', 'HCC', 'ICC']
#
# plt.imshow(c, interpolation='nearest', cmap=plt.cm.Oranges) # 按照像素显示出矩阵
# plt.title('Confusion Matrix of Liver Tumour')
# plt.colorbar()
# tick_marks = np.arange(len(classes))
# plt.xticks(tick_marks, classes, rotation=-45)
# plt.yticks(tick_marks, classes)
#
# thresh = c.max() / 2.
# iters = np.reshape([[[i, j] for j in range(3)] for i in range(3)], (c.size, 2))
# for i, j in iters:
# plt.text(j, i, format(c[i, j])) # 显示对应的数字
#
# plt.ylabel('Real label')
# plt.xlabel('Prediction')
# plt.tight_layout()
# plt.savefig('CHCHCCICC.svg', format='svg')
# plt.show()
print(precision_score(test_y, pre_y, average='macro'))
print(recall_score(test_y, pre_y, average='macro'))
print(f1_score(test_y, pre_y, average='macro'))
print(precision_score(test_y, pre_y2, average='macro'))
print(recall_score(test_y, pre_y2, average='macro'))
print(f1_score(test_y, pre_y2, average='macro'))
print(precision_score(test_y, pre_y4, average='macro'))
print(recall_score(test_y, pre_y4, average='macro'))
print(f1_score(test_y, pre_y4, average='macro'))
print(precision_score(test_y, pre_y7, average='macro'))
print(recall_score(test_y, pre_y7, average='macro'))
print(f1_score(test_y, pre_y7, average='macro'))
# print(test_y)
# print(pre_y)
print(roc_auc_score(test_y, model.predict(test_x), multi_class='ovo'))
print(roc_auc_score(test_y, model.predict(test_x), multi_class='ovr'))
print(roc_auc_score(test_y, model2.predict(test_x), multi_class='ovo'))
print(roc_auc_score(test_y, model2.predict(test_x), multi_class='ovr'))
print(roc_auc_score(test_y, model4.predict(test_x), multi_class='ovo'))
print(roc_auc_score(test_y, model4.predict(test_x), multi_class='ovr'))
print(roc_auc_score(test_y, model7.predict(test_x), multi_class='ovo'))
print(roc_auc_score(test_y, model7.predict(test_x), multi_class='ovr'))
# print(train_x.shape)
# print(final_pre)
# print(test_y)
# print(test_z[test_y == 0])
# print("test_score: ", score)
# print(test_z[final_pre != test_y])
# print(test_y[final_pre != test_y])
# print(test_z[final_pre != test_y].shape)
# First aggregate all false positive rates
y = label_binarize(test_y, classes=[0, 1, 2])
n_classes = y.shape[1]
pred_y = model.predict(test_x)
pred_y2 = model2.predict(test_x)
pred_y4 = model4.predict(test_x)
pred_y7 = model7.predict(test_x)
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
fpr2 = dict()
tpr2 = dict()
roc_auc2 = dict()
fpr4 = dict()
tpr4 = dict()
roc_auc4 = dict()
fpr7 = dict()
tpr7 = dict()
roc_auc7 = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y[:, i], pred_y[:, i])
fpr2[i], tpr2[i], _ = roc_curve(y[:, i], pred_y2[:, i])
fpr4[i], tpr4[i], _ = roc_curve(y[:, i], pred_y4[:, i])
fpr7[i], tpr7[i], _ = roc_curve(y[:, i], pred_y7[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
roc_auc2[i] = auc(fpr2[i], tpr2[i])
roc_auc4[i] = auc(fpr4[i], tpr4[i])
roc_auc7[i] = auc(fpr7[i], tpr7[i])
# Compute micro-average ROC curve and ROC area
# fpr["micro"], tpr["micro"], _ = roc_curve(y.ravel(), pred_y.ravel())
# roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
all_fpr2 = np.unique(np.concatenate([fpr2[i] for i in range(n_classes)]))
all_fpr4 = np.unique(np.concatenate([fpr4[i] for i in range(n_classes)]))
all_fpr7 = np.unique(np.concatenate([fpr7[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
mean_tpr2 = np.zeros_like(all_fpr2)
mean_tpr4 = np.zeros_like(all_fpr4)
mean_tpr7 = np.zeros_like(all_fpr7)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr2 += interp(all_fpr2, fpr2[i], tpr2[i])
mean_tpr4 += interp(all_fpr4, fpr4[i], tpr4[i])
mean_tpr7 += interp(all_fpr7, fpr7[i], tpr7[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
mean_tpr2 /= n_classes
mean_tpr4 /= n_classes
mean_tpr7 /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
fpr2["macro"] = all_fpr2
tpr2["macro"] = mean_tpr2
roc_auc2["macro"] = auc(fpr2["macro"], tpr2["macro"])
fpr4["macro"] = all_fpr4
tpr4["macro"] = mean_tpr4
roc_auc4["macro"] = auc(fpr4["macro"], tpr4["macro"])
fpr7["macro"] = all_fpr7
tpr7["macro"] = mean_tpr7
roc_auc7["macro"] = auc(fpr7["macro"], tpr7["macro"])
# Plot all ROC curves
# plt.figure()
# plt.plot(
# fpr["micro"],
# tpr["micro"],
# label="micro-average ROC curve (area = {0:0.2f})".format(roc_auc["micro"]),
# color="deeppink",
# linestyle=":",
# linewidth=4,
# )
plt.plot(
fpr["macro"],
tpr["macro"],
label="ROC curve of Our Method(area = {0:0.2f})".format(roc_auc["macro"]),
color="navy",
linestyle="-",
linewidth=3,
)
plt.plot(
fpr2["macro"],
tpr2["macro"],
label="ROC curve of MobileNet(area = {0:0.2f})".format(roc_auc2["macro"]),
color="aqua",
linestyle="-",
linewidth=3,
)
plt.plot(
fpr4["macro"],
tpr4["macro"],
label="ROC curve of DenseNet121(area = {0:0.2f})".format(roc_auc4["macro"]),
color="darkorange",
linestyle="-",
linewidth=3,
)
plt.plot(
fpr7["macro"],
tpr7["macro"],
label="ROC curve of InceptionV3(area = {0:0.2f})".format(roc_auc7["macro"]),
color="deeppink",
linestyle="-",
linewidth=3,
)
lw = 2
# colors = cycle(["aqua", "darkorange", "cornflowerblue"])
# for i, color in zip(range(n_classes), colors):
# plt.plot(
# fpr[i],
# tpr[i],
# color=color,
# lw=lw,
# label="ROC curve of class {0} (area = {1:0.2f})".format(i, roc_auc[i]),
# )
plt.plot([0, 1], [0, 1], "k--", lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC of Liver Cancer")
plt.legend(loc="lower right")
plt.savefig('ROCfinal.svg',format='svg')
plt.show()
def bootstrap_auc(y, pred, classes, bootstraps = 10000, fold_size = 1000):
statistics = np.zeros((len(classes), bootstraps))
for c in range(len(classes)):
df = pd.DataFrame(columns=['y', 'pred'])
# print(df)
df.loc[:, 'y'] = y[:, c]
df.loc[:, 'pred'] = pred[:, c]
df_pos = df[df.y == 1]
df_neg = df[df.y == 0]
prevalence = len(df_pos) / len(df)
for i in range(bootstraps):
pos_sample = df_pos.sample(n = int(fold_size * prevalence), replace=True)
neg_sample = df_neg.sample(n = int(fold_size * (1-prevalence)), replace=True)
y_sample = np.concatenate([pos_sample.y.values, neg_sample.y.values])
pred_sample = np.concatenate([pos_sample.pred.values, neg_sample.pred.values])
score = roc_auc_score(y_sample, pred_sample)
statistics[c][i] = score
return statistics
pre_yy = label_binarize(pre_y, classes=[0, 1, 2])
pre_yy2 = label_binarize(pre_y2, classes=[0, 1, 2])
pre_yy4 = label_binarize(pre_y4, classes=[0, 1, 2])
pre_yy7 = label_binarize(pre_y7, classes=[0, 1, 2])
# statistics = bootstrap_auc(y, pre_yy, ['CHC', 'HCC', 'ICC'])
# print(statistics.shape)
# print(statistics)
# print(statistics.mean(axis=1))
# print(statistics.mean(axis=1)+statistics.std(axis=1))
# print(statistics.mean(axis=1)-statistics.std(axis=1))
print(np.argmax(pred_y,axis=1))
print(np.argmax(pred_y2,axis=1))
print(np.argmax(pred_y4,axis=1))
print(np.argmax(pred_y7,axis=1))
pd.DataFrame(pred_y).to_csv('p1.csv')
pd.DataFrame(pred_y2).to_csv('p2.csv')
pd.DataFrame(pred_y4).to_csv('p4.csv')
pd.DataFrame(pred_y7).to_csv('p7.csv')
pd.DataFrame(y).to_csv('y.csv')
print(y)
# img_path = 'D:/projects/Liver/train/HCC/SHI HONGMEICEUS LIVER20171120153627591.JPG'
model_builder = model2
img_size = (128, 128)
last_conv_layer_name = "conv_pw_5"
def get_img_array(img_path, size):
# `img` is a PIL image of size 299x299
img = keras.preprocessing.image.load_img(img_path, target_size=size)
box = (0, 50, 950, 850)
img = img.crop(box)
img = img.resize((128, 128))
img = img.convert('RGB')
# img = image.img_to_array(img,dtype='float32')
# `array` is a float32 Numpy array of shape (299, 299, 3)
array = keras.preprocessing.image.img_to_array(img)
# We add a dimension to transform our array into a "batch"
# of size (1, 299, 299, 3)
array = np.expand_dims(array, axis=0)
return array
def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None):
# First, we create a model that maps the input image to the activations
# of the last conv layer as well as the output predictions
# 首先,我们创建一个模型,将输入图像映射到最后一个conv层的激活以及输出预测
grad_model = tf.keras.models.Model(
[model.inputs], [model.get_layer(last_conv_layer_name).output, model.output]
)
# Then, we compute the gradient of the top predicted class for our input image
# with respect to the activations of the last conv layer
#然后,我们为输入图像计算top预测类关于最后一个conv层的激活的梯度
with tf.GradientTape() as tape:
last_conv_layer_output, preds = grad_model(img_array)
#如果没有传入pred_index,就计算pred[0]中最大的值对应的下标号index
if pred_index is None:
pred_index = tf.argmax(preds[0])
class_channel = preds[:, pred_index]
# This is the gradient of the output neuron (top predicted or chosen)这是输出神经元(预测概率最高的或者选定的那个)对最后一个卷积层输出特征图的梯度
# with regard to the output feature map of the last conv layer
# grads.shape(1, 10, 10, 2048)
grads = tape.gradient(class_channel, last_conv_layer_output)
# This is a vector where each entry is the mean intensity of the gradient这是一个向量,每一项都是 指定特征图通道上的平均值
# over a specific feature map channel
# pooled_grads 是一个一维向量,shape=(2048,)
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
# We multiply each channel in the feature map array
# by "how important this channel is" with regard to the top predicted class
# then sum all the channels to obtain the heatmap class activation
# last_conv_layer_output[0]是一个三维的卷积层 ,@矩阵相乘(点积)
#last_conv_layer_output.shape =(10, 10, 2048)
last_conv_layer_output = last_conv_layer_output[0]
#heatmap (10, 10, 1) = (10, 10, 2048) @(2048,)相当于(10, 10, 2048)乘以(2048,1)
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
# tf.squeeze 去除1的维度,(10, 10)
heatmap = tf.squeeze(heatmap)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
# tf.maximum(heatmap, 0) 和0比较大小,返回一个>=0的值,相当于relu,然后除以heatmap中最大的 值,进行normalize归一化到0-1
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
return heatmap.numpy()
model2.summary()
img_array = get_img_array('D:/projects/Liver/train/ICC/20180510083645785.JPG', size=(224,224))
model2.layers[-1].activation = None
# Print what the top predicted class is
preds = model2.predict(img_array)
# print("Predicted:", decode_predictions(preds, top=1)[0])#这些地方所加的0皆是batch中第一个sample的意思
last_conv_layer_name = "conv_pw_13_relu"
# Generate class activation heatmapconv2_block3_outconv3_block8_out
heatmap = make_gradcam_heatmap(img_array, model2, last_conv_layer_name)
# Display heatmap
plt.matshow(heatmap)
# plt.colorbar()
plt.xticks([]) #去掉横坐标值
plt.yticks([]) #去掉纵坐标值
# plt.show()
# plt.colorbar()
plt.savefig('CHC11.svg')
def save_and_display_gradcam(img_path, heatmap, cam_path="cam.jpg", alpha=0.4):
# Load the original image
img = keras.preprocessing.image.load_img(img_path)
box = (0, 50, 950, 850)
img = img.crop(box)
img = img.resize((128, 128))
img = img.convert('RGB')
img = keras.preprocessing.image.img_to_array(img)
# Rescale heatmap to a range 0-255
heatmap = np.uint8(255 * heatmap)
# Use jet colormap to colorize heatmap
jet = cm.get_cmap("jet")
# Use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# Create an image with RGB colorized heatmap
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)
# Superimpose重叠 the heatmap on original image
superimposed_img = jet_heatmap * alpha + img
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)
# Save the superimposed image
superimposed_img.save(cam_path)
# Display Grad CAM
# display(Image(cam_path))
a = cam_path
return a
a = save_and_display_gradcam('D:/projects/Liver/train/ICC/20180510083645785.JPG', heatmap)
# display(Image(a))
a = plt.imread(a)
# plt.colorbar()
plt.imshow(a)
plt.colorbar()
# plt.xticks([]) #去掉横坐标值
# plt.yticks([]) #去掉纵坐标值
plt.title('ICC')
plt.savefig('ICC.svg')
|
#!/usr/bin/env python
# coding=utf-8
__author__ = 'zhaoliang'
__email__ = 'zhaoliang@iflytek.com'
__created__ = '15/12/19'
import os
CSRF_ENABLED = True
SECRET_KEY = 'guess what you can and try it'
DATABASE_URI = {
'host': 'localhost',
'user': 'root',
'passwd': 'root',
'port': 3306,
'db': 'vansel',
'charset': 'utf8'
}
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
PROTYPE_PATH = os.path.join(ROOT_PATH, 'cache/protype')
UPLOAD_FOLDER = os.path.join(ROOT_PATH, 'cache/data/')
UPLOAD_BASE_URL = 'http://127.0.0.1/vansel/' |
FAKE_REGION = 'eu-west-1'
FAKE_IDENTITY_DOCUMENT_STRING = """{
"accountId" : "000011112222",
"architecture" : "x86_64",
"availabilityZone" : "eu-west-1a-ignored",
"billingProducts" : null,
"devpayProductCodes" : null,
"marketplaceProductCodes" : null,
"imageId" : "ami-00001111222233334",
"instanceId" : "i-00001111222233334",
"instanceType" : "m5.xlarge",
"kernelId" : null,
"pendingTime" : "2019-12-17T20:38:23Z",
"privateIp" : "10.1.2.3",
"ramdiskId" : null,
"region" : "eu-west-1",
"version" : "2017-09-30"
}
"""
|
from mopidy_tunein import tunein
ASX = b"""<ASX version="3.0">
<TITLE>Example</TITLE>
<ENTRY>
<TITLE>Sample Title</TITLE>
<REF href="file:///tmp/foo" />
</ENTRY>
<ENTRY>
<TITLE>Example title</TITLE>
<REF href="file:///tmp/bar" />
</ENTRY>
<ENTRY>
<TITLE>Other title</TITLE>
<REF href="file:///tmp/baz" />
</ENTRY>
</ASX>
"""
SIMPLE_ASX = b"""<ASX version="3.0">
<ENTRY href="file:///tmp/foo" />
<ENTRY href="file:///tmp/bar" />
<ENTRY href="file:///tmp/baz" />
</ASX>
"""
OLD_ASX = b"""[Reference]
Ref1=file:///tmp/foo
Ref2=file:///tmp/bar
Ref3=file:///tmp/baz
"""
ASF_ASX = b"""[Reference]
Ref1=http://tmp.com/foo-mbr?MSWMExt=.asf
Ref2=mms://tmp.com:80/bar-mbr?mswmext=.asf
Ref3=http://tmp.com/baz
"""
class BasePlaylistAsx:
valid = None
parse = staticmethod(tunein.parse_asx)
def test_parse_valid_playlist(self):
uris = list(self.parse(self.valid))
expected = ["file:///tmp/foo", "file:///tmp/bar", "file:///tmp/baz"]
assert uris == expected
class AsxPlaylistTest(BasePlaylistAsx):
valid = ASX
class AsxSimplePlaylistTest(BasePlaylistAsx):
valid = SIMPLE_ASX
class AsxOldPlaylistTest(BasePlaylistAsx):
valid = OLD_ASX
class TestPlaylist:
parse = staticmethod(tunein.parse_asx)
def test_parse_asf_playlist(self):
uris = list(self.parse(ASF_ASX))
expected = [
"mms://tmp.com/foo-mbr?mswmext=.asf",
"mms://tmp.com:80/bar-mbr?mswmext=.asf",
"http://tmp.com/baz",
]
assert uris == expected
|
def rectangle(length, width):
if length <= 0 or width <= 0:
raise ValueError("The side length cannot be negative or zero!")
measures = {"area": length * width}
print(f'The rectangle has area {format(measures["area"])}')
return measures
|
# Day3 of my 100DaysOfCode Challenge
# Instructions
# Congratulations, you've got a job at Python Pizza. Your first job is to build an automatic pizza order program.
# Based on a user's order, work out their final bill.
# Small Pizza: $15
# Medium Pizza: $20
# Large Pizza: $25
# Pepperoni for Small Pizza: +$2
# Pepperoni for Medium or Large Pizza: +$3
# Extra cheese for any size pizza: + $1
# 🚨 Don't change the code below 👇
print("Welcome to Python Pizza Deliveries!")
size = input("What size pizza do you want? S, M, or L ")
add_pepperoni = input("Do you want pepperoni? Y or N ")
extra_cheese = input("Do you want extra cheese? Y or N ")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
bill = 0
if size == "S":
bill += 15
elif size == "M":
bill += 20
else:
bill += 25
if add_pepperoni == "Y":
if size == "S":
bill += 2
else:
bill += 3
if extra_cheese == "Y":
bill += 1
print(f"Your final bill is: ${bill}.") |
import numpy as np
def camera_matrix_to_intrinsics(K):
'''
If camera matrix is a 3x3 matrix where:
| fx 0 cx |
K = | 0 fy cy |
| 0 0 1 |
Then the function returns:
fx = K[0,0]
fy = K[1,1]
cx = K[0,2]
cy = K[1,2]
:param K: 3x3 camera matrix
:return: (fx, fy), (cx, cy)
'''
return (K[0,0], K[1,1]), (K[0,2], K[1,2])
def align_to_depth(D, Kd, Ko, scale_d, R, t):
'''
Algin some other modality to depth.
:param D: depth frame
:param Kd: depth's modality camera matrix
:param Ko: other's modality camera matrix
:param scale_d: a scaling factor to convert depth values into meters
:param R: other-to-depth rotation matrix
:param t: other-to-depth translation vector
:return: map_x and map_y that can be used in OpenCV's cv2.remap(...)
'''
(fx_d, fy_d), (cx_d, cy_d) = camera_matrix_to_intrinsics(Kd.astype(np.float32))
(fx_o, fy_o), (cx_o, cy_o) = camera_matrix_to_intrinsics(Ko.astype(np.float32))
i = np.repeat(np.linspace(0, D.shape[0]-1, D.shape[0], dtype=np.float32), D.shape[1])
j = np.tile(np.linspace(0, D.shape[1]-1, D.shape[1], dtype=np.float32), D.shape[0])
d = np.reshape(D, [np.prod(D.shape),]).astype(np.float32)
z = d * scale_d
x = ((j - cx_d) * z) / fx_d
y = ((i - cy_d) * z) / fy_d
P = np.concatenate([x[np.newaxis,:], y[np.newaxis,:], z[np.newaxis,:]], axis=0)
P_t = np.matmul(R.astype(np.float32), P) + t.astype(np.float32)
map_x = np.reshape(P_t[0, :] * fx_o / P_t[2, :] + cx_o, D.shape)
map_y = np.reshape(P_t[1, :] * fy_o / P_t[2, :] + cy_o, D.shape)
return map_x, map_y |
"""
"""
import numpy as np
import tensorflow as tf
# from libs.sn import spectral_normed_weight
def embed_y(inputs, vocab_size=1000, embedding_dim=300, word2vec_file=None,
spectral_normed=False, update_collection=None, reuse=False):
"""
Args:
inputs: int, (batch size, 1).
vocab_size: int, for cifar-10, it is 10.
embedding_dim: int, embedding dim.
word2vec_file:
spectral_normed:
update_collection:
reuse:
Returns:
tensor of shape (batch size, embedding_dim)
"""
# with tf.name_scope(name) as scope:
with tf.variable_scope("Embedding.Label"):
def uniform(size):
return np.random.uniform(
low=-0.08,
high=0.08,
size=size
).astype('float32')
if word2vec_file is None:
filter_values = uniform(
(vocab_size, embedding_dim)
)
embedding_map = tf.get_variable(name='embedding_map',
dtype=tf.float32,
initializer=filter_values,
trainable=True)
else:
filter_values = word2vec_file
embedding_map = tf.get_variable(name='embedding_map',
dtype=tf.float32,
initializer=filter_values,
trainable=False)
return tf.nn.embedding_lookup(embedding_map, inputs)
|
from sklearn.manifold import TSNE
import pandas as pd
from . import scale_and_pca
from .predictor import optimal_bayes, optimal_K, gaussian_clustering
from .utils import group_samples
def quick_TSNE(X, random_state=420, dim=2):
"""
Quick and easy method of visually validating predictions
X: 2d array of data note, will work better after scaling applying PCA
random_state: random state of tsne algorithm, keep the same for similar results, default=420
dim: output dimensions if plotting on 2-d graph leave as 2 else change to 3. default=2
returns tuple of len(d) 1dim data
"""
X_tsne = TSNE(n_components=dim, random_state=random_state).fit_transform(X)
return [ X_tsne[:, d] for d in range(dim) ]
def group_from_dataset_path(d_path, a_path, title, destination='audio', optimizer='bayes', K=None, max_=10, add_labels=True, keep_original=True):
sound_data = pd.read_csv(d_path)
X_pca = scale_and_pca(sound_data.drop(['filename', 'label'], axis=1))
if not K:
if optimizer == 'bayes':
K = optimal_bayes(X_pca, max_=max_)
elif optimizer == 'kmeans':
K = optimal_K(X_pca, max_=max_)
y = gaussian_clustering(X_pca, K)
des = group_samples(sound_data.filename, y, title, a_path, destination=destination, keep_original=keep_original)
if add_labels:
sound_data.label = np.array(y)
sound_data.to_csv(d_path, index=False)
return des |
'''
Клавиатура
'''
def isBroken(a, k):
cnt = [0] * len(a)
for e in k:
cnt[e - 1] += 1
for e in range(len(a)):
if (a[e] < cnt[e]):
print('YES', end='\n')
else:
print('NO', end='\n')
n = int(input())
a = [int(j) for j in input().split()]
k = int(input())
p = [int(j) for j in input().split()]
isBroken(a, p)
|
import os
import re
from datetime import timedelta
from .._exceptions import ConfigError
from .default import NAMELIST_DEFAULTS
from .._asset_list import config_to_asset_list
from ..filesystem import get_fs
def get_n_processes(config):
n_tiles = config["namelist"]["fv_core_nml"].get(
"ntiles", NAMELIST_DEFAULTS["ntiles"]
)
layout = config["namelist"]["fv_core_nml"].get(
"layout", NAMELIST_DEFAULTS["layout"]
)
processors_per_tile = layout[0] * layout[1]
return n_tiles * processors_per_tile
def get_run_duration(config):
"""Return a timedelta indicating the duration of the run.
Args:
config (dict): a configuration dictionary
Returns:
duration (timedelta): the duration of the run
Raises:
ValueError: if the namelist contains a non-zero value for "months"
"""
coupler_nml = config["namelist"].get("coupler_nml", {})
months = coupler_nml.get("months", 0)
if months != 0: # months have no set duration and thus cannot be timedelta
raise ValueError(f"namelist contains non-zero value {months} for months")
return timedelta(
**{
name: coupler_nml.get(name, 0)
for name in ("seconds", "minutes", "hours", "days")
}
)
def get_current_date(config):
"""Return current_date from configuration dictionary. This function may read from
the remote initial_conditions path in the given configuration dictionary.
Args:
config (dict): a configuration dictionary
Returns:
list: current_date as list of ints [year, month, day, hour, min, sec]
"""
force_date_from_namelist = config["namelist"]["coupler_nml"].get(
"force_date_from_namelist", False
)
# following code replicates the logic that the fv3gfs model uses to determine the current_date
if force_date_from_namelist:
current_date = config["namelist"]["coupler_nml"].get(
"current_date", [0, 0, 0, 0, 0, 0]
)
else:
coupler_res_filename = _get_coupler_res_filename(config)
if coupler_res_filename is not None:
current_date = _get_current_date_from_coupler_res(coupler_res_filename)
else:
current_date = config["namelist"]["coupler_nml"].get(
"current_date", [0, 0, 0, 0, 0, 0]
)
return current_date
def _get_current_date_from_coupler_res(coupler_res_filename):
"""Return current_date specified in coupler.res file
Args:
coupler_res_filename (str): a coupler.res filename
Returns:
list: current_date as list of ints [year, month, day, hour, min, sec]
"""
fs = get_fs(coupler_res_filename)
with fs.open(coupler_res_filename, mode="r") as f:
third_line = f.readlines()[2]
current_date = [int(d) for d in re.findall(r"\d+", third_line)]
if len(current_date) != 6:
raise ConfigError(
f"{coupler_res_filename} does not have a valid current model time (need six integers on third line)"
)
return current_date
def _get_coupler_res_filename(config):
"""Return source path for coupler.res file, if it exists in config assets."""
asset_list = config_to_asset_list(config)
source_path = None
for item in asset_list:
target_path = os.path.join(item["target_location"], item["target_name"])
if target_path == "INPUT/coupler.res":
if "bytes" in item:
raise NotImplementedError(
"Using a bytes dict to represent a coupler.res file is not "
"implemented yet. Use a standard asset dict for this item."
)
source_path = os.path.join(item["source_location"], item["source_name"])
return source_path
def get_timestep(config):
"""Get the model timestep from a configuration dictionary.
Args:
config (dict): a configuration dictionary
Returns:
datetime.timedelta: the model timestep
"""
return timedelta(seconds=config["namelist"]["coupler_nml"]["dt_atmos"])
|
from RecAPI import get_recognitions
def rec_etl(file, client_key, start, end):
quit()
|
from django.contrib import admin
from .models import URLScraped,ColumnData
admin.site.register(URLScraped)
admin.site.register(ColumnData) |
import uuid
from django.db import models
class Currency(models.Model):
"""Currency model"""
name = models.CharField(max_length=120, null=False,
blank=False, unique=True)
code = models.CharField(max_length=3, null=False, blank=False, unique=True)
symbol = models.CharField(max_length=5, null=False,
blank=False, default='$')
def __str__(self) -> str:
return self.code
class Transaction(models.Model):
uid = models.UUIDField(default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50, null=False, blank=False)
email = models.EmailField(max_length=50, null=False, blank=False)
creation_date = models.DateTimeField(auto_now_add=True)
currency = models.ForeignKey(
Currency, null=False, blank=False, on_delete=models.PROTECT)
payment_intent_id = models.CharField(
max_length=100, null=True, blank=False, default=None)
message = models.TextField(null=True, blank=True)
def __str__(self) -> str:
return f"{self.name} - {self.id} : {self.currency}"
@property
def link(self):
"""
Link to a payment form for the transaction
"""
return f'http://127.0.0.1:8000/payment/{str(self.id)}'
|
import sys # using local mdml_client
sys.path.insert(1, '../')
import mdml_client as mdml
import time
exp = mdml.experiment("TEST", "test", "testtest", "merfpoc.egs.anl.gov")
exp.add_config('dev_config.json')
exp.send_config()
time.sleep(1)
curr_time = mdml.unix_time(True) # True for integer return instead of string
print(curr_time)
# Data type of dict required for the first .publish_data call when using auto configurations.
data = [
[300, 400, 500, 600],
[1200, 1500, 2100, 1750]
]
exp.publish_vector_data(device_id = "SMPS_VECTOR", data = data, timestamp = curr_time+2)
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
print("Quitting")
finally:
exp.reset()
time.sleep(1)
exp.disconnect()
|
from launch import LaunchDescription
from launch_ros.actions import Node
from launch.actions import ExecuteProcess
def generate_launch_description():
cmd = ['MicroXRCEAgent', 'udp', '-p 2018']
env = {'LD_PRELOAD': "/usr/local/lib/libfastrtps.so.1 /usr/local/lib/libfastcdr.so.1"}
return LaunchDescription([
Node(
package='m5stack_example',
node_executable='stamp_example',
output='screen'),
ExecuteProcess(
cmd=cmd,
additional_env=env,
output='screen'
)
])
|
class QueueTests:
def test_init(self):
q = self.Queue()
def test_add_and_remove_oneitem(self):
q = self.Queue()
q.enqueue(3)
self.assertEqual(q.dequeue(), 3)
def test_alternating_addremove(self):
q = self.Queue()
for i in range(1000):
q.enqueue(i)
self.assertEqual(q.dequeue(), i)
def test_manyoperations(self):
q = self.Queue()
for i in range(1000):
q.enqueue(2 * i + 3)
for i in range(1000):
self.assertEqual(q.dequeue(), 2 * i + 3)
def test_length(self):
q = self.Queue()
self.assertEqual(len(q), 0)
for i in range(10):
q.enqueue(i)
self.assertEqual(len(q), 10)
for i in range(10):
q.enqueue(i)
self.assertEqual(len(q), 20)
for i in range(15):
q.dequeue()
self.assertEqual(len(q), 5)
|
import discord
import asyncio
import random
import requests
from bs4 import BeautifulSoup
from discord.ext import commands
class Background_Tasks_Cog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bg_task = self.bot.loop.create_task(self.status())
self.bg_task = self.bot.loop.create_task(self.daily_affirmation())
self.bg_task = self.bot.loop.create_task(self.daily_bible())
self.bg_task = self.bot.loop.create_task(self.daily_ariana_quote())
@commands.Cog.listener()
async def on_ready(self):
print ('Background_Tasks: Online')
async def status(self):
await self.bot.wait_until_ready()
channel = self.bot.get_channel(630633322686578689)
while not self.bot.is_closed():
await channel.send('**Status:** Online')
await channel.send(f'{len(self.bot.guilds)} | {len(self.bot.users)}')
await asyncio.sleep(3600)
# Sends a daily affirmation into #the-spa every 24h
async def daily_affirmation(self):
await self.bot.wait_until_ready()
channel = self.bot.get_channel(627970443361648685)
while not self.bot.is_closed():
url = 'https://www.developgoodhabits.com/positive-affirmations/'
user_agent = {'User-Agent': 'Mozilla/5.0'}
response = requests.get(url, headers = user_agent)
soup = BeautifulSoup(response.text, 'html.parser')
affirms = list(soup.find_all('em'))
text = list(map(lambda x: x.text, affirms))
await channel.send(f'{random.choice(text)}')
await asyncio.sleep(86400)
# Sends a daily bible quote into #the-spa every 24h
async def daily_bible(self):
await self.bot.wait_until_ready()
channel = self.bot.get_channel(627970443361648685)
while not self.bot.is_closed():
user_agent = {'User-Agent': 'Mozilla/5.0'}
url = requests.get('https://www.verseoftheday.com/', user_agent)
soup = BeautifulSoup(url.text, 'html.parser')
verse = soup.find('div', {'class': 'bilingual-left'})
formatted = ''.join(verse.text.replace('—', '\n- '))
await channel.send(f'**Daily Bible Verse:**\n{formatted}')
await asyncio.sleep(86400)
# Sends a daily Ariana Grande quote into #ariana-chat every 24h
# As per request from Aricord
async def daily_ariana_quote(self):
await self.bot.wait_until_ready()
channel = self.bot.get_channel(778771519362957342)
while not self.bot.is_closed():
url = 'https://quotes.thefamouspeople.com/ariana-grande-15637.php'
user_agent = {'User-Agent': 'Mozilla/5.0'}
response = requests.get(url, headers = user_agent)
soup = BeautifulSoup(response.text, 'html.parser')
quotes = soup.find_all('div', {'class': 'quote'})
random_quote = random.choice(quotes)
processed = random_quote.find('q').text
await channel.send(f'**Daily Ariana Quote:**\n{processed}')
await asyncio.sleep(86400)
def setup(bot):
bot.add_cog(Background_Tasks_Cog(bot))
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import sys
import time
import pprint
import hyperopt
from hyperopt import hp
def run_vgg19(batch_size=64):
import numpy as np
import keras
import plaidml
# cifar10 data is 1/7th the size vgg19 needs in the spatial dimensions,
# but if we upscale we can use it
dataset = keras.datasets.cifar10
# Load the dataset
print("Loading the data")
(x_train, y_train_cats), (x_test, y_test_cats) = dataset.load_data()
# Get rid of all the data except the training images (for now
y_train_cats = None
x_test = None
y_test_cats = None
# truncate number of images
x_train = x_train[:batch_size]
# Upscale image size by a factor of 7
print("Upscaling the data")
x_train = np.repeat(np.repeat(x_train, 7, axis=1), 7, axis=2)
# Load the model
print("Loading the model")
model = keras.applications.VGG19()
# Prep the model and run an initial un-timed batch
print("Compiling")
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
print("Running initial batch")
y = model.predict(x=x_train, batch_size=batch_size)
return plaidml.get_perf_counter("post_scan_time")
def run_inner(args, config, elapsed):
import plaidml.keras
if args.verbose:
plaidml._internal_set_vlog(args.verbose)
plaidml.keras.install_backend()
import plaidml.keras.backend
plaidml.keras.backend.set_config(config)
elapsed.value = run_vgg19(args.batch_size)
def run_outer(args, config):
try:
elapsed = multiprocessing.Value('f', 0.0)
proc = multiprocessing.Process(target=run_inner, args=(args, config, elapsed))
proc.start()
proc.join(args.timeout)
if proc.is_alive():
proc.terminate()
print('Timeout')
proc.join(3)
return 0.0
print('Elapsed: %s' % elapsed.value)
return elapsed.value
except Exception as ex:
print('Exception: %s' % ex)
return 0.0
def make_settings(params):
return {
'threads': {
'value': params[0]
},
'vec_size': {
'value': 1
},
'mem_width': {
'value': params[1]
},
'max_mem': {
'value': params[2]
},
'max_regs': {
'value': params[3]
},
'goal_groups': {
'value': params[4]
},
'goal_flops_per_byte': {
'value': params[5]
}
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('name')
parser.add_argument('-v', '--verbose', type=int, nargs='?', const=1)
parser.add_argument('--result', default='/tmp/result.json')
parser.add_argument('--max_evals', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--timeout', type=int, default=500)
args = parser.parse_args()
print(args)
name = args.name
space = [
hp.choice('threads', [1 << i for i in range(6, 10)]),
hp.choice('mem_width', [1 << i for i in range(2, 10)]),
hp.choice('max_mem', [i * 1024 for i in range(1, 48)]),
hp.choice('max_regs', [i * 1024 for i in range(1, 48)]),
hp.quniform('goal_groups', 1, 32, 1),
hp.quniform('goal_fpb', 1, 50, 1),
]
context = {'count': 0}
def objective(params):
context['count'] += 1
print('-' * 20)
print('Iteration: %d' % context['count'])
print('-' * 20)
settings = make_settings(params)
config = {
'platform': {
'@type': 'type.vertex.ai/vertexai.tile.local_machine.proto.Platform',
'hals': [{
'@type': 'type.vertex.ai/vertexai.tile.hal.opencl.proto.Driver',
}],
'settings_overrides': [{
'sel': {
'name_regex': name
},
'settings': settings
}]
}
}
#if settings['mem_width']['value'] < settings['vec_size']['value']:
# return {'status': hyperopt.STATUS_FAIL}
elapsed = run_outer(args, json.dumps(config))
if elapsed == 0.0:
status = hyperopt.STATUS_FAIL
else:
status = hyperopt.STATUS_OK
return {'status': status, 'loss': elapsed}
trials = hyperopt.Trials()
best = hyperopt.fmin(objective, space, hyperopt.tpe.suggest, args.max_evals, trials=trials)
print('=' * 20)
print('Best elapsed: %s' % trials.best_trial['result']['loss'])
print('Best settings:')
result = hyperopt.space_eval(space, best)
pprint.pprint(make_settings(result))
if __name__ == '__main__':
main()
|
import base64
import json
import typing
from datetime import datetime, timedelta
from Crypto.Cipher import AES
from Crypto.Util import Padding
from starlette.responses import Response
class PrettyJSONResponse(Response):
media_type = "application/json"
def render(self, content: typing.Any) -> bytes:
return json.dumps(
content,
ensure_ascii=False,
allow_nan=False,
indent=4,
separators=(", ", ": "),
).encode("utf-8")
def int_to_datestr(value):
s = str(value)
fecnac = datetime(year=int(s[0:4]), month=int(s[4:6]), day=int(s[6:8]))
return fecnac.strftime("%d/%m/%Y")
def encrypt_param(param, variant="ruc"):
encryption_key = base64.b64decode("aCIbjMuVGtwF8nlSKoPydE==")
text = json.dumps({variant: param}).encode()
text_padded = Padding.pad(text, AES.block_size)
iv = base64.b64decode("JAwlt7SNbYLycmPRqeDFou==")
cipher = AES.new(encryption_key, AES.MODE_CBC, iv)
cipher_enc = cipher.encrypt(text_padded)
return base64.b64encode(cipher_enc).decode()
|
from py_counter.py_counter import py_counter |
#
# PySNMP MIB module CLAB-ANI-DEV-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CLAB-ANI-DEV-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:24:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
clabCommonMibs, = mibBuilder.importSymbols("CLAB-DEF-MIB", "clabCommonMibs")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Counter64, iso, ObjectIdentity, ModuleIdentity, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Counter32, Bits, MibIdentifier, Unsigned32, Gauge32, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "iso", "ObjectIdentity", "ModuleIdentity", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Counter32", "Bits", "MibIdentifier", "Unsigned32", "Gauge32", "Integer32")
TruthValue, DateAndTime, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DateAndTime", "DisplayString", "TextualConvention")
clabAniDevMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4491, 4, 7))
clabAniDevMib.setRevisions(('2017-04-27 00:00', '2017-02-21 00:00', '2016-05-19 00:00', '2016-03-17 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: clabAniDevMib.setRevisionsDescriptions(('Modified per ECN CLAB-ANI-DEV-MIB-N-17.0160-1.', 'Corrected capitalization of one instance of AniDevSysLoggingGrpCtrl. Logging was inadvertently lower case. No EC required.', 'Added System Event logging objects per sRouter-N-16.0147-2.', 'Initial version, published as part of the CableLabs Standalone Router Specification, CL-SP-sRouter-I01-160317. Copyright 2016 Cable Television Laboratories, Inc. All rights reserved.',))
if mibBuilder.loadTexts: clabAniDevMib.setLastUpdated('201704270000Z')
if mibBuilder.loadTexts: clabAniDevMib.setOrganization('Cable Television Laboratories, Inc.')
if mibBuilder.loadTexts: clabAniDevMib.setContactInfo('Broadband Network Services Cable Television Laboratories, Inc. 858 Coal Creek Circle, Louisville, CO 80027, USA Phone: +1 303-661-9100 Email: mibs@cablelabs.com')
if mibBuilder.loadTexts: clabAniDevMib.setDescription('This MIB module contains the management objects for sRouter devices under the CableLabs Access Network Independent (ANI) project. Copyright 1999-2017 Cable Television Laboratories, Inc. All rights reserved.')
clabAniDevObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1))
aniDevResetNow = MibScalar((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aniDevResetNow.setStatus('current')
if mibBuilder.loadTexts: aniDevResetNow.setDescription('Setting this object to true(1) causes the device to reset. Reading this object always returns false(2).')
clabAniDevSysLoggingObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2))
aniDevLoggingCtrlReset = MibScalar((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("resetLog", 1), ("pauseLog", 2), ("startLog", 3), ("useDefaultReporting", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aniDevLoggingCtrlReset.setStatus('current')
if mibBuilder.loadTexts: aniDevLoggingCtrlReset.setDescription('Setting this object to resetLog(1) empties the logging table. Setting this object to pauseLog(2) causes the ANI device to stop writing events to the logging table. Setting this object to startLog(3) causes the ANI device to start writing events to the logging table. Reading this object returns either pauseLog(2) or startLog(3). The default is startLog(3). Log event pausing is independent of any other log filtering mechanisms, e.g., levels. Setting it to useDefaultReporting(4) resets the log (i.e., empties) and returns all event levels to the factory-default reporting.')
aniDevSysLoggingSize = MibScalar((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 2), Unsigned32()).setUnits('bytes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: aniDevSysLoggingSize.setStatus('current')
if mibBuilder.loadTexts: aniDevSysLoggingSize.setDescription('Setting this object modifies the size of the system logging table. When set to zero, all entries are removed and new entries are not allowed to be added. When set less than the current size, the oldest entries are removed until the new size is reached.')
aniDevSysLoggingLevelCtrl = MibScalar((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("all", 1), ("trace", 2), ("debug", 3), ("info", 4), ("warn", 5), ("error", 6), ("fatal", 7), ("off", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aniDevSysLoggingLevelCtrl.setStatus('current')
if mibBuilder.loadTexts: aniDevSysLoggingLevelCtrl.setDescription('The current logging priority level for system messages. Setting a level sets all levels from the level specified to the highest severity level except for off. The level all indicates all levels. The log levels are derived from the log4j class. all(1) The all level has the lowest possible rank and is intended to turn on all logging. trace(2) The trace level designates finer-grained informational events than the DEBUG (see log4j class for further definition). debug(3) The debug level designates fine-grained informational events that are most useful to debug an application. info(4) The info level designates informational messages that highlight the progress of the application at coarse-grained level. warn(5) The warn level designates potentially harmful situations. error(6) The error level designates error events that might still allow the application to continue running. fatal(7) The fatal level designates very severe error events that will presumably lead the application to abort. off(8) The off has the highest possible rank and is intended to turn off logging.')
aniDevSysLoggingGroupCtrl = MibScalar((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 4), Bits().clone(namedValues=NamedValues(("none", 0), ("all", 1), ("group1", 2), ("group2", 3), ("group3", 4), ("group4", 5), ("group5", 6))).clone(namedValues=NamedValues(("none", 0)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aniDevSysLoggingGroupCtrl.setStatus('current')
if mibBuilder.loadTexts: aniDevSysLoggingGroupCtrl.setDescription("The system 'group' or 'groups' to be logged. If all(1) is set, then other bits are ignored. If set to 'none(0)', messages that do not include a 'group' are candidates to be logged. Logging Groups for Event Logs are defined in the controlling Access Network Independent device specification for equipment implementing this MIB. Returns WrongValue error if an attempt is made to set the bit for a Logging Group not supported by the device.")
aniDevSysLoggingEventTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 5), )
if mibBuilder.loadTexts: aniDevSysLoggingEventTable.setStatus('current')
if mibBuilder.loadTexts: aniDevSysLoggingEventTable.setDescription('This table is a cyclic table of events.')
aniDevSysLoggingEventEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 5, 1), ).setIndexNames((0, "CLAB-ANI-DEV-MIB", "aniDevSysLoggingEventIndex"))
if mibBuilder.loadTexts: aniDevSysLoggingEventEntry.setStatus('current')
if mibBuilder.loadTexts: aniDevSysLoggingEventEntry.setDescription('A logging event entry.')
aniDevSysLoggingEventIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 5, 1, 1), Unsigned32())
if mibBuilder.loadTexts: aniDevSysLoggingEventIndex.setStatus('current')
if mibBuilder.loadTexts: aniDevSysLoggingEventIndex.setDescription('Provides relative ordering of the objects in the event log. This object will always increase except when (a) the log is reset via aniDevSysLoggingEventCtrlReset or (b) the device reboots and does not implement non-volatile storage for this log. The next entry for all the above cases is 1.')
aniDevSysLoggingEventTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 5, 1, 2), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aniDevSysLoggingEventTimeStamp.setStatus('current')
if mibBuilder.loadTexts: aniDevSysLoggingEventTimeStamp.setDescription('The time stamp of this event logging entry. The timestamp is established when the event occurs.')
aniDevSysLoggingEventMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 4, 7, 1, 2, 5, 1, 3), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aniDevSysLoggingEventMessage.setStatus('current')
if mibBuilder.loadTexts: aniDevSysLoggingEventMessage.setDescription('The event message.')
clabAniDevConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 4, 7, 2))
clabAniDevCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 4, 7, 2, 1))
clabAniDevGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 4, 7, 2, 2))
clabAniDevCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4491, 4, 7, 2, 1, 1)).setObjects(("CLAB-ANI-DEV-MIB", "clabAniDevGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clabAniDevCompliance = clabAniDevCompliance.setStatus('current')
if mibBuilder.loadTexts: clabAniDevCompliance.setDescription('The compliance statement for CableLabs ANI devices.')
clabAniDevGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 4, 7, 2, 2, 1)).setObjects(("CLAB-ANI-DEV-MIB", "aniDevResetNow"), ("CLAB-ANI-DEV-MIB", "aniDevLoggingCtrlReset"), ("CLAB-ANI-DEV-MIB", "aniDevSysLoggingSize"), ("CLAB-ANI-DEV-MIB", "aniDevSysLoggingLevelCtrl"), ("CLAB-ANI-DEV-MIB", "aniDevSysLoggingGroupCtrl"), ("CLAB-ANI-DEV-MIB", "aniDevSysLoggingEventTimeStamp"), ("CLAB-ANI-DEV-MIB", "aniDevSysLoggingEventMessage"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clabAniDevGroup = clabAniDevGroup.setStatus('current')
if mibBuilder.loadTexts: clabAniDevGroup.setDescription('Object(s) implemented for the management of ANI devices.')
mibBuilder.exportSymbols("CLAB-ANI-DEV-MIB", aniDevResetNow=aniDevResetNow, aniDevSysLoggingEventTimeStamp=aniDevSysLoggingEventTimeStamp, aniDevSysLoggingLevelCtrl=aniDevSysLoggingLevelCtrl, aniDevSysLoggingEventTable=aniDevSysLoggingEventTable, clabAniDevSysLoggingObjects=clabAniDevSysLoggingObjects, clabAniDevGroups=clabAniDevGroups, aniDevSysLoggingEventIndex=aniDevSysLoggingEventIndex, aniDevSysLoggingSize=aniDevSysLoggingSize, clabAniDevCompliance=clabAniDevCompliance, clabAniDevGroup=clabAniDevGroup, clabAniDevCompliances=clabAniDevCompliances, clabAniDevConformance=clabAniDevConformance, aniDevSysLoggingGroupCtrl=aniDevSysLoggingGroupCtrl, PYSNMP_MODULE_ID=clabAniDevMib, clabAniDevObjects=clabAniDevObjects, aniDevLoggingCtrlReset=aniDevLoggingCtrlReset, aniDevSysLoggingEventEntry=aniDevSysLoggingEventEntry, clabAniDevMib=clabAniDevMib, aniDevSysLoggingEventMessage=aniDevSysLoggingEventMessage)
|
import numpy as np
def sigmoid(X):
return 1 / (1 + np.exp(-X))
def softmax(X):
if X.ndim == 1:
exp = np.exp(X)
return exp / np.sum(exp)
elif X.ndim == 2:
exp = np.exp(X)
sum = np.sum(exp, axis=1, keepdims=True)
return exp / sum
def cross_entropy(Y, T):
EPSION = 1e-8
return np.mean(np.sum(-np.log(Y+EPSION) * T, axis=1))
if __name__ == '__main__':
# print(sigmoid(3))
# print(sigmoid(0))
# print(sigmoid(-2))
# print(sigmoid(-30))
# print(sigmoid(np.array([-3, -0.1, 0, 0.8, 4])))
one_dim = np.array([-0.5, 0.1, 0.3, 1])
two_dim = np.array([
[-0.5, 0.1, 0.3, 1],
[3.0, 8.0, 0.4, 3.0]
])
# print(softmax(one_dim))
# print(softmax(two_dim))
logits = softmax(two_dim)
labels = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
print(cross_entropy(logits, labels))
|
######## Snakemake header ########
import sys; sys.path.append("/home/cmb-panasas2/skchoudh/software_frozen/anaconda27/envs/riboraptor/lib/python3.5/site-packages"); import pickle; snakemake = pickle.loads(b'\x80\x03csnakemake.script\nSnakemake\nq\x00)\x81q\x01}q\x02(X\x06\x00\x00\x00configq\x03}q\x04X\x0b\x00\x00\x00config_pathq\x05X\x19\x00\x00\x00configs/hg38_SRP029589.pyq\x06sX\t\x00\x00\x00resourcesq\x07csnakemake.io\nResources\nq\x08)\x81q\t(K\x01K\x01e}q\n(X\x06\x00\x00\x00_coresq\x0bK\x01X\x06\x00\x00\x00_nodesq\x0cK\x01X\x06\x00\x00\x00_namesq\r}q\x0e(h\x0bK\x00N\x86q\x0fh\x0cK\x01N\x86q\x10uubX\x04\x00\x00\x00ruleq\x11X\x10\x00\x00\x00perform_trimmingq\x12X\x03\x00\x00\x00logq\x13csnakemake.io\nLog\nq\x14)\x81q\x15}q\x16h\r}q\x17sbX\x06\x00\x00\x00outputq\x18csnakemake.io\nOutputFiles\nq\x19)\x81q\x1a(X,\x00\x00\x00preprocessed/SRR970593_trimmed_trimmed.fq.gzq\x1bX*\x00\x00\x00preprocessed_step1/SRR970593_trimmed.fq.gzq\x1cX9\x00\x00\x00preprocessed_step1/SRR970593.fastq.gz_trimming_report.txtq\x1de}q\x1e(X\x08\x00\x00\x00pass2_fqq\x1fh\x1bX\x08\x00\x00\x00pass1_fqq h\x1cX\x0f\x00\x00\x00pass1_fq_reportq!h\x1dh\r}q"(h\x1fK\x00N\x86q#h K\x01N\x86q$h!K\x02N\x86q%uubX\x06\x00\x00\x00paramsq&csnakemake.io\nParams\nq\')\x81q((K&X\x12\x00\x00\x00preprocessed_step1q)K\x12X\x07\x00\x00\x00defaultq*X\x0c\x00\x00\x00preprocessedq+K\x05e}q,(X\n\x00\x00\x00max_lengthq-K&X\t\x00\x00\x00pass1_dirq.h)X\n\x00\x00\x00min_lengthq/K\x12X\x07\x00\x00\x00adapterq0h*X\t\x00\x00\x00pass2_dirq1h+X\x0c\x00\x00\x00phred_cutoffq2K\x05h\r}q3(h-K\x00N\x86q4h.K\x01N\x86q5h/K\x02N\x86q6h0K\x03N\x86q7h1K\x04N\x86q8h2K\x05N\x86q9uubX\x05\x00\x00\x00inputq:csnakemake.io\nInputFiles\nq;)\x81q<X\x1d\x00\x00\x00sratofastq/SRR970593.fastq.gzq=a}q>(X\x02\x00\x00\x00R1q?h=h\r}q@h?K\x00N\x86qAsubX\x07\x00\x00\x00threadsqBK\x01X\t\x00\x00\x00wildcardsqCcsnakemake.io\nWildcards\nqD)\x81qEX\t\x00\x00\x00SRR970593qFa}qG(X\x06\x00\x00\x00sampleqHhFh\r}qIX\x06\x00\x00\x00sampleqJK\x00N\x86qKsubub.'); from snakemake.logging import logger; logger.printshellcmds = True
######## Original script #########
from snakemake.shell import shell
from riboraptor.kmer import fastq_kmer_histogram
import operator
# CTG is a common one
# AGATCG.. is a Truseq ribo 3' illumina
# TGGAAT.. is Truseq small rna
PREFERRED_KMERS = [
'CTGTAGGCACCATCAAT', 'AGATCGGAAGAGCACACGTCT', 'TGGAATTCTCGGGTGCCAAGG',
'CTGTAGGCAC'
]
def get_top_kmer(kmer_series_dict):
"""Return a kmer if it's percentage exceed 30%"""
# Start from the longest kmer and stop
# at where this criterion is met
for kmer_length, kmer_series in sorted(
kmer_series_dict.items(), key=operator.itemgetter(0),
reverse=True):
kmer_list = kmer_series.index.tolist()
# Are any of the top 4 kmers from our PREFFERED_KMERS list?
index_counter = 0
for kmer in kmer_list:
index_counter += 1
if kmer in PREFERRED_KMERS:
return kmer
if index_counter >= 5:
break
# 30 is an arbitrary chosen threshold, but it works in *most* cases
over_represented = kmer_series[kmer_series >= 30]
if len(over_represented) >= 1:
return over_represented.index.tolist()[0]
return None
params = snakemake.params
pass1_dir = snakemake.params.pass1_dir
pass2_dir = snakemake.params.pass2_dir
output_1 = snakemake.output.pass1_fq
output_2 = snakemake.output.pass2_fq
adapter = snakemake.params.adapter
output_1_report = output_1 + '_trimming_report.txt'
output_2_report = output_2 + '_trimming_report.txt'
# Do first pass
if adapter is None or adapter == 'default':
shell(r'''trim_galore -o {pass1_dir} --length {params.min_length} \
-q {params.phred_cutoff} {snakemake.input.R1}''')
# Are there any over-represented sequences?
# If yes, do a second pass
# since no adater was provided
histogram = fastq_kmer_histogram(output_1)
adapter2 = get_top_kmer(histogram)
if adapter2 is None:
# Else just copy
shell(r'''cp -r {output_1} {output_2}''')
shell(
r'''echo "No adapter found in second pass" > {output_2_report}''')
else:
shell(r'''trim_galore -o {pass2_dir} --length {params.min_length} \
-a {adapter2} \
-q {params.phred_cutoff} {output_1}''')
else:
shell(r'''trim_galore -o {pass1_dir} --length {params.min_length} \
-a {adapter} \
-q {params.phred_cutoff} {snakemake.input.R1}''')
shell(r'''cp -r {output_1} {output_2}''')
shell(
r'''echo "Used user provided adapter for one pass only (no second pass)" > {output_2_report}'''
)
|
from requests import put, delete
response = delete('http://localhost:9200/customer')
print(response.json())
response = put('http://localhost:9200/customer?pretty')
print(response.json())
|
from builtins import dict
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
import uuid
class QubitMeasurementItem(models.Model):
"""
Represents the Qubit Measurement Settings and contains values for
projection onto the computational basis |0> and |1>:
|psi> = cos(theta)|0> + exp(i*phi)|1>
"""
# Qubit Index
encodedQubitIndex = models.PositiveIntegerField(
validators=[
MinValueValidator(1),
],
blank=True,
null=True,
)
# angle theta
theta = models.DecimalField(
validators=[
MinValueValidator(0),
MaxValueValidator(360),
],
decimal_places=2,
max_digits=5,
blank=True,
null=True,
)
# angle phi
phi = models.DecimalField(
validators=[
MinValueValidator(0),
MaxValueValidator(360),
],
decimal_places=2,
max_digits=5,
blank=True,
null=True,
)
# QubitMeasurementItem is related to ComputeSettings
# ComputeSettings object contains an array of QubitMeasurementItems in the
# field named encodedQubitMeasurements
ComputeSettings = models.ForeignKey(
"ComputeSettings",
# CASCADE? ComputeSettings should'nt be deleted when
# encodedQubitMeasurements Item is deleted
# Set the ForeignKey null; this is only possible if null is True
on_delete=models.SET_NULL,
null=True,
# related_name specifies the name of the reverse relation from the
# ComputeSettings model back to QubitMeasurementItem model,
# i.e. specifies how field is named in ForeignKey model
related_name="encodedQubitMeasurements",
)
class CircuitConfigurationItem(models.Model):
"""
Contains names and values for abstract circuit gate operations
R_z(circuitAngleName) = circuitAngleValue
"""
# specifies the name of the angle, e.g. "alpha"
circuitAngleName = models.CharField(max_length=255)
# specifies the value of the angle
circuitAngleValue = models.DecimalField(
validators=[
MinValueValidator(0),
MaxValueValidator(360),
],
decimal_places=3,
max_digits=6,
)
# relates gate operations to qubitComputing model which contains an array
# of CircuitConfigurationItems in related name field "circuitAngles" as
# well as the circuitConfiguration implied by the cluster, "horseshoe" etc.
qubitComputing = models.ForeignKey(
"qubitComputing",
on_delete=models.SET_NULL,
null=True,
related_name="circuitAngles",
)
class clusterState(models.Model):
"""
Defines the number of physical qubits and shape of the cluster
"""
amountQubits = models.PositiveIntegerField(
validators=[
MinValueValidator(1),
MaxValueValidator(4),
]
)
# defines the cluster state e.g. "linear" or "ghz" and is validated against
# choices in serializer
presetSettings = models.CharField(max_length=255)
class qubitComputing(models.Model):
"""
Combines all abstract circuit settings and has one fields:
circuitAngles: is an array of CircuitConfigurationItem objects
which is handled in the qubitComputingSerializer
"""
class ComputeSettings(models.Model):
"""
Combines all parameters relevant for computation and has three fields:
clusterState: specified below in the model
qubitComputing: specified below in the model
encodedQubitMeasurements: is an array of QubitMeasurementItem objects
which is handled in the ComputeSettingsSerializer
"""
# defined by clusterState model
clusterState = models.ForeignKey(
"clusterState",
on_delete=models.SET_NULL,
null=True,
)
# consists of enum field and array which are handled in ComputeSettingsSerializer
qubitComputing = models.ForeignKey(
"qubitComputing",
on_delete=models.SET_NULL,
null=True,
)
class ExperimentBase(models.Model):
"""
Contains all fields of an Experiment that can be specified by the user
"""
# the user can give a name to the experiment
experimentName = models.CharField(max_length=255)
# the circuit ID is defined by the choice of the geometry of the cluster
# and identifies the corresponding circuit in the backend
circuitId = models.PositiveIntegerField(
validators=[
MinValueValidator(1),
MaxValueValidator(22),
],
)
# the user can associate a project ID to the experiment
projectId = models.CharField(
max_length=255,
blank=True,
null=True,
)
# users can specifiy how long they will use the system
maxRuntime = models.PositiveIntegerField(
validators=[
MinValueValidator(1),
MaxValueValidator(120),
],
blank=True,
null=True,
# set default Runtime to 5 seconds
default=5,
)
# ExperimentBase model contains an entire ComputeSettings object
ComputeSettings = models.ForeignKey(
"ComputeSettings",
on_delete=models.SET_NULL,
null=True,
)
class Experiment(ExperimentBase):
"""
Defines additional fields set by the server with ExperimentBase
as parent class
"""
# specifies possible values for status field
statusChoices = (
("INITIAL", "Initial"),
("IN QUEUE", "In Queue"),
("RUNNING", "Running"),
("FAILED", "Failed"),
("DONE", "Done"),
)
# experiment ID is set by the server
experimentId = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False
)
created = models.DateTimeField(auto_now_add=True)
status = models.CharField(
max_length=255, choices=statusChoices, null=True, blank=True
)
# Relates an Experiment object to a user, who can have multiple Experiments.
# Currently this only implemented in Experiment and not in Results as
# Result is related to an Experiment (this can be changed later to
# assign Results to user if needed).
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
)
class ExperimentResult(models.Model):
"""
This model defines a Result to a corresponding Experiment
"""
# time stamp needed ? time stamp already available in Experiment model
startTime = models.DateTimeField(auto_now_add=True)
totalCounts = models.PositiveIntegerField()
numberOfDetectors = models.PositiveIntegerField(
validators=[
MinValueValidator(1),
MaxValueValidator(8),
]
)
singlePhotonRate = models.DecimalField(
decimal_places=2,
max_digits=8,
)
totalTime = models.PositiveIntegerField()
experiment = models.ForeignKey(
"Experiment",
on_delete=models.SET_NULL,
blank=True,
null=True,
)
experimentData = models.ForeignKey(
"ExperimentData", on_delete=models.SET_NULL, blank=True, null=True
)
class Countrates(models.Model):
"""
This model stores the countrates from the TimeTagger for each channel
"""
d1 = models.PositiveIntegerField(null=True, blank=True)
d2 = models.PositiveIntegerField(null=True, blank=True)
d3 = models.PositiveIntegerField(null=True, blank=True)
d4 = models.PositiveIntegerField(null=True, blank=True)
d5 = models.PositiveIntegerField(null=True, blank=True)
d6 = models.PositiveIntegerField(null=True, blank=True)
d7 = models.PositiveIntegerField(null=True, blank=True)
d8 = models.PositiveIntegerField(null=True, blank=True)
class ExperimentData(models.Model):
"""
This model stores the experimental data
"""
countratePerDetector = models.ForeignKey(
"Countrates", on_delete=models.SET_NULL, blank=True, null=True
)
# no brackets for dict callable - we do not make a call, but pass the callable
coincidenceCounts = models.JSONField(default=dict)
# User Manager class tells Django how to work with the customized
# user model in CLI. By default when a user is created it expects
# a username and a password field but the username field has been
# replaceed with an email field so a custom User Manager is needed.
class UserProfileManager(BaseUserManager):
"""
Manager for user profiles with BaseUserManager as parent class.
Functions within this class are used to manipulate objects
within the model that the manager is for.
"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
# Case when email is either empty string or null value: Raise exception
if not email:
raise ValueError("Users must have an email address")
email = self.normalize_email(email)
# By default self.model is set to the model that the manager is for
user = self.model(email=email, name=name)
# Use set_password function that comes with user model
# Makes sure the password is stored as hash in database
user.set_password(password)
# Standard procedure for saving objects in Django
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
# Self is automatically passed in for any class functions
# When it is called from a different function or a different part
user = self.create_user(email, name, password)
# is_superuser is automatically created by PermissionsMixin
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for user in the system"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Define various fields that model should provide:
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
# Determines fields for the permission system
is_active = models.BooleanField(default=True)
# Determines acces to Django admin
is_staff = models.BooleanField(default=False)
# Specify model manager:
# This is required because the custom user model is used with
# the Django CLI
objects = UserProfileManager()
# Overwriting the default USERNAME_FIELD which is normally user name
# and replacing it with email field
# When users authenticate they provide email address and pw
USERNAME_FIELD = "email"
# Adding username to additional REQUIRED_FIELDS
REQUIRED_FIELDS = ["name"]
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
# Converting a user profile object into a string
def __str__(self):
"""Return string representation of our user"""
# String representation is email address
return self.email
|
from flask import Flask, g, redirect, request
from app.core import db, jwt, sg, configure_app, mail_services, gc_services
from flask_cors import CORS, cross_origin
from models import User
from app.helpers.response import response_error
from app.routes import init_routes
from datetime import datetime
import time
import decimal
import flask.json
import logging, logging.config, yaml
class MyJSONEncoder(flask.json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
# Convert decimal instances to strings.
return str(float(obj))
return super(MyJSONEncoder, self).default(obj)
app = Flask(__name__)
logging.config.dictConfig(yaml.load(open('logging.conf')))
logfile = logging.getLogger('file')
# add json encoder for decimal type
app.json_encoder = MyJSONEncoder
# disable strict_slashes
app.url_map.strict_slashes = False
# config app
configure_app(app)
# Accept CORS
CORS(app)
# init db
db.init_app(app)
# init jwt
jwt.init_app(app)
# init sendgrid
sg.init_app(app)
# init mail service
mail_services.init_app(app)
# init google cloud service
gc_services.init_app(app)
@app.before_request
def before_request():
rp = request.path
if rp != '/' and rp.endswith('/'):
return redirect(rp[:-1])
g.MAIL_SERVICE = app.config.get('MAIL_SERVICE')
g.EMAIL = app.config.get('EMAIL')
g.PASSPHASE = app.config.get('PASSPHASE')
g.ENV = app.config.get('ENV')
g.UPLOAD_DIR = app.config.get('UPLOAD_DIR')
g.BASE_URL = app.config.get('BASE_URL')
g.GC_STORAGE_BUCKET = app.config.get('GC_STORAGE_BUCKET')
g.GC_STORAGE_FOLDER = app.config.get('GC_STORAGE_FOLDER')
g.start = [time.time(), request.base_url]
g.reported_time = app.config.get('REPORTED_TIME')
@app.after_request
def after_request(response):
if 'start' in g:
start, url = g.start
end = time.time()
diff = end - float(start)
logfile.debug("API -> {}, time -> {}".format(url, str(diff)))
return response
init_routes(app)
def jwt_error_handler(message):
return response_error(message)
def needs_fresh_token_callback():
return response_error('Only fresh tokens are allowed')
def revoked_token_callback():
return response_error('Token has been revoked')
def expired_token_callback():
return response_error('Token has expired', 401)
jwt.unauthorized_loader(jwt_error_handler)
jwt.invalid_token_loader(jwt_error_handler)
jwt.claims_verification_loader(jwt_error_handler)
jwt.token_in_blacklist_loader(jwt_error_handler)
jwt.user_loader_error_loader(jwt_error_handler)
jwt.claims_verification_failed_loader(jwt_error_handler)
jwt.expired_token_loader(expired_token_callback)
jwt.needs_fresh_token_loader(needs_fresh_token_callback)
jwt.revoked_token_loader(revoked_token_callback)
@app.errorhandler(Exception)
def error_handler(err):
return response_error(err.message)
# @app.errorhandler(404)
# def error_handler400(err):
# return response_error(err.message);
#
# @app.errorhandler(500)
# def error_handler500(err):
# return response_error(err.message);
#
# @app.error_handler_all(Exception)
# def errorhandler(err):
# return response_error(err.message);
|
"""Lists"""
from functions.lists import no_language_tag_list
from functions.lists import nosplit_bnode_list
"""Imported Functions"""
from functions.formatting_functions import create_bnode_name
from functions.logical_source_functions import generate_constant_logical_source
from functions.logical_source_functions import generate_dissertation_logical_source
from functions.logical_source_functions import generate_IRI_logical_source
from functions.logical_source_functions import generate_lang_logical_source
from functions.logical_source_functions import generate_lang_nosplit_logical_source
from functions.logical_source_functions import generate_neutral_literal_logical_source
from functions.logical_source_functions import generate_not_lang_logical_source
from functions.logical_source_functions import generate_title_logical_source
from functions.po_map_functions import generate_bnode_po_map
from functions.po_map_functions import generate_constant_IRI
from functions.po_map_functions import generate_constant_literal
from functions.po_map_functions import generate_IRI_nosplit_po_map
from functions.po_map_functions import generate_IRI_po_map
from functions.po_map_functions import generate_IRI_split_po_map
from functions.po_map_functions import generate_lang_literal_split_po_map
from functions.po_map_functions import generate_langnotlang_literal_po_map
from functions.po_map_functions import generate_neutral_literal_nosplit_po_map
from functions.po_map_functions import generate_neutral_literal_po_map
from functions.po_map_functions import generate_neutral_literal_split_po_map
from functions.po_map_functions import generate_not_lang_literal_split_po_map
from functions.subject_map_functions import generate_bnode_subject_map
def generate_RML_for_IRI(RML_graph, default_map_name, map_name, node, prop_num, print_check=False):
if map_name == default_map_name:
"""Property goes in main map"""
generate_IRI_po_map(RML_graph, map_name, node, prop_num)
if print_check == True:
print("\t\tgenerating IRI predicate object map")
elif map_name in nosplit_bnode_list:
"""Property is in 'no split' blank node"""
# see explanation here https://github.com/uwlib-cams/rml/tree/master/generateRML#no-split-blank-nodes # ! update link after updating readme
generate_IRI_nosplit_po_map(RML_graph, map_name, node, prop_num)
if print_check == True:
print("\t\tgenerating IRI no split predicate object map")
else:
"""Property goes into blank node"""
generate_IRI_split_po_map(RML_graph, map_name, node)
if print_check == True:
print("\t\tgenerating IRI split predicate object map")
return RML_graph
def generate_RML_for_constant(RML_graph, map_name, node, print_check=False):
if "<" in node:
"""Constant value is an IRI"""
generate_constant_IRI(RML_graph, map_name, node)
if print_check == True:
print("\t\tgenerating constant predicate object map")
print(f"\t\t\tmap name: {map_name}")
else:
"""Constant value is a literal"""
generate_constant_literal(RML_graph, map_name, node)
if print_check == True:
print("\t\tgenerating constant predicate object map")
print(f"\t\t\tmap name: {map_name}")
if "Lang" in map_name:
"""Generate RML code for equivalent 'no lang' blank node"""
# ! add something to readme and link to it here
not_lang_map_name = f"Not_{map_name}"
if "<" in node:
generate_constant_IRI(RML_graph, not_lang_map_name, node)
if print_check == True:
generate_constant_IRI(RML_graph, not_lang_map_name, node)
else:
generate_constant_literal(RML_graph, not_lang_map_name, node)
if print_check == True:
print("\t\tgenerating constant predicate object map, not lang")
return RML_graph
def generate_RML_for_bnode(RML_graph, bnode_po_dict, logsource_subject_list, entity, prop_num, value_type, mapping, node_list, num, map_name, print_check=False):
"""Predicate is previous item in node list, class is next item"""
predicate_name = node_list[num-1]
class_name = node_list[num+1]
if print_check == True:
print(f"\t\tblank node class: {class_name}")
"""Generate name for new blank node RML map"""
bnode_map_name = create_bnode_name(predicate_name, class_name, prop_num, value_type, mapping, node_list)
if print_check == True:
print(f"\t\tbnode_map_name: {bnode_map_name}\n")
"""Default boolean values"""
generate_new_bnode_po_map = False
generate_new_logical_source = False
generate_new_subject_map = False
"""Determine if we need to generate RML for the current map to add new blank node map as a parentTriplesMap"""
if map_name in bnode_po_dict.keys():
if bnode_map_name not in bnode_po_dict[map_name]:
generate_new_bnode_po_map = True
else:
generate_new_bnode_po_map = True
"""Determine if we need to generate RML for a logical source for this blank node map"""
if bnode_map_name not in logsource_subject_list:
generate_new_logical_source = True
generate_new_subject_map = True
"""Generate RML based on boolean values"""
if generate_new_bnode_po_map == True:
generate_bnode_po_map(RML_graph, map_name, bnode_map_name, predicate_name)
if print_check == True:
print(f"\t\tgenerating blank node predicate object map ({map_name} > {bnode_map_name}) 109")
if "Lang" in map_name:
"""Generate RML code for equivalent 'no lang' blank node"""
not_lang_map_name = f"Not_{map_name}"
generate_bnode_po_map(RML_graph, not_lang_map_name, bnode_map_name, predicate_name)
if print_check == True:
print(f"\t\tgenerating blank node predicate object map ({not_lang_map_name} > {bnode_map_name}) 116")
if "Lang" in bnode_map_name:
"""Generate RML code for equivalent 'no lang' blank node(s)"""
not_lang_bnode_map_name = f"Not_{bnode_map_name}"
generate_bnode_po_map(RML_graph, map_name, not_lang_bnode_map_name, predicate_name)
if print_check == True:
print(f"\t\tgenerating blank node predicate object map ({map_name} > {not_lang_bnode_map_name}) 123")
if "Lang" in map_name:
not_lang_map_name = f"Not_{map_name}"
generate_bnode_po_map(RML_graph, not_lang_map_name, not_lang_bnode_map_name, predicate_name)
if print_check == True:
print(f"\t\tgenerating blank node predicate object map ({not_lang_map_name} > {not_lang_bnode_map_name}) 129")
if map_name not in bnode_po_dict.keys():
bnode_po_dict[map_name] = []
bnode_po_dict[map_name].append(bnode_map_name)
if generate_new_logical_source == True:
# Check for 'no split' blank nodes
# see explanation here https://github.com/uwlib-cams/rml/tree/master/generateRML#no-split-blank-nodes
# ! update link after updating readme
if bnode_map_name == "Dissertation_":
generate_dissertation_logical_source(RML_graph, bnode_map_name)
if print_check == True:
print("\t\tgenerating dissertation logical source")
elif bnode_map_name == "Title_":
generate_title_logical_source(RML_graph, entity, bnode_map_name)
if print_check == True:
print("\t\tgenerating title logical source")
elif bnode_map_name in nosplit_bnode_list:
generate_lang_nosplit_logical_source(RML_graph, entity, bnode_map_name, prop_num)
if print_check == True:
print("\t\tgenerating logical source, no split")
if "Lang" in bnode_map_name:
"""Generate RML code for equivalent 'no lang' blank node"""
not_lang_bnode_map_name = f"Not_{bnode_map_name}"
generate_not_lang_nosplit_logical_source(RML_graph, entity, not_lang_bnode_map_name, prop_num)
if print_check == True:
print("\t\tgenerating logical source, no split")
# If not, check what kind of value the blank node takes
elif "Constant_" in bnode_map_name:
"""The value is a constant"""
generate_constant_logical_source(RML_graph, entity, bnode_map_name, prop_num)
if print_check == True:
print("\t\tgenerating constant logical source")
elif "*" in mapping:
"""The value is an IRI"""
generate_IRI_logical_source(RML_graph, entity, bnode_map_name, prop_num)
if print_check == True:
print("\t\tgenerating IRI logical source")
elif prop_num in no_language_tag_list:
"""The value is a literal, and we do not record a language tag for it"""
generate_neutral_literal_logical_source(RML_graph, entity, bnode_map_name, prop_num)
if print_check == True:
print("\t\tgenerating logical source")
else:
"""The value is a literal, and we DO want to record a language tag for it if it exists"""
generate_lang_logical_source(RML_graph, entity, bnode_map_name, prop_num)
if print_check == True:
print(f"\t\tgenerating logical source ({bnode_map_name})")
if "Lang" in bnode_map_name:
not_lang_bnode_map_name = f"Not_{bnode_map_name}"
generate_not_lang_logical_source(RML_graph, entity, not_lang_bnode_map_name, prop_num)
if print_check == True:
print(f"\t\tgenerating logical source ({not_lang_bnode_map_name})")
logsource_subject_list.append(bnode_map_name)
if generate_new_subject_map == True:
generate_bnode_subject_map(RML_graph, bnode_map_name, class_name)
if print_check == True:
print(f"\t\tgenerating blank node subject map ({bnode_map_name})")
if "Lang" in bnode_map_name:
"""Generate RML code for equivalent 'no lang' blank node"""
not_lang_bnode_map = f"Not_{bnode_map_name}"
generate_bnode_subject_map(RML_graph, not_lang_bnode_map_name, class_name)
if print_check == True:
print(f"\t\tgenerating blank node subject map ({not_lang_bnode_map_name})")
map_name = bnode_map_name
return RML_graph, bnode_po_dict, logsource_subject_list, map_name
def generate_RML_for_literal(RML_graph, default_map_name, map_name, prop_num, node, print_check=False):
if map_name == default_map_name:
if print_check == True:
print(f"map name: {map_name}")
print(f"default map name: {default_map_name}")
"""The literal is in the main map, not a blank node"""
if prop_num in no_language_tag_list:
"""We don't want the language tag(s)"""
generate_neutral_literal_po_map(RML_graph, map_name, node, prop_num)
if print_check == True:
print("\t\tgenerating literal predicate object map (ignore lang tags)")
else:
"""We DO want the language tag(s) if they exist"""
generate_langnotlang_literal_po_map(RML_graph, map_name, node, prop_num)
if print_check == True:
print("\t\tgenerating literal predicate object map (record lang tags)")
# Otherwise, the literal is going into a blank node; check to see what kind
elif map_name == "Title_":
"""The literal goes into the blank node for title properties"""
generate_langnotlang_literal_po_map(RML_graph, map_name, node, prop_num)
if print_check == True:
print("\t\tgenerating title predicate object map (record lang tags)")
elif map_name in nosplit_bnode_list:
"""The literal goes into a 'no split' blank node"""
# see explanation here https://github.com/uwlib-cams/rml/tree/master/generateRML#no-split-blank-nodes
# ! update link after updating readme
generate_neutral_literal_nosplit_po_map(RML_graph, map_name, node, prop_num)
if print_check == True:
print("\t\tgenerating literal predicate object map, no split (ignore lang tags)")
elif prop_num in no_language_tag_list:
"""We don't want the language tag(s) for this literal"""
generate_neutral_literal_split_po_map(RML_graph, map_name, node)
if print_check == True:
print("\t\tgenerating literal predicate object map, yes split (ignore lang tags)")
else:
"""We DO want the language tag(s) for this literal if they exist"""
generate_lang_literal_split_po_map(RML_graph, map_name, node, prop_num.split(':')[-1])
if print_check == True:
print(f"\t\tgenerating literal predicate object map, ({map_name})")
if "Lang" in map_name:
"""Generate RML code for equivalent 'no lang' blank node"""
not_lang_map_name = f"Not_{map_name}"
generate_not_lang_literal_split_po_map(RML_graph, not_lang_map_name, node, prop_num.split(':')[-1])
if print_check == True:
print(f"\t\tgenerating literal predicate object map, yes split ({not_lang_map_name})")
return RML_graph
|
# -*- coding: utf-8 -*-
"""Filesystem functionality."""
import logging
import os
import magic
from sqlalchemy.exc import OperationalError
from esis.db import Database
logger = logging.getLogger(__name__)
class TreeExplorer(object):
"""Look for sqlite files in a tree and return the valid ones.
:param directory: Base directory for the tree to be explored.
:type directory: str
:param blacklist: List of relative directories to skip
:type blacklist: list(str)
"""
def __init__(self, directory, blacklist=None):
"""Initialize tree explorer."""
self.directory = directory
self.blacklist = blacklist if blacklist is not None else []
def paths(self):
"""Return paths to valid databases found under directory.
:return: Paths to valid databases
:rtype: list(str)
"""
db_paths = self._explore()
logger.debug(
'%d database paths found under %s:\n%s',
len(db_paths),
self.directory,
'\n'.join(os.path.relpath(db_path, self.directory)
for db_path in db_paths))
# Filter out files that don't pass sqlite's quick check
# that just can't be opened
valid_paths = []
for db_path in db_paths:
try:
with Database(db_path) as database:
if database.run_quick_check():
valid_paths.append(db_path)
except OperationalError:
logger.warning('Unable to open: %s', db_path)
continue
logger.debug(
'%d database paths passed the integrity check:\n%s',
len(valid_paths),
'\n'.join(os.path.relpath(valid_path, self.directory)
for valid_path in valid_paths))
return valid_paths
def _explore(self):
"""Walk from base directory and return files that match pattern.
:returns: SQLite files found under directory
:rtype: list(str)
"""
db_paths = []
for (dirpath, dirnames, filenames) in os.walk(self.directory):
logger.debug('Exploring %s...', dirpath)
# Check if any subdirectory is blacklisted
blacklisted_dirnames = [
dirname
for dirname in dirnames
if os.path.relpath(
os.path.join(dirpath, dirname),
self.directory,
) in self.blacklist
]
if blacklisted_dirnames:
logger.debug(
'Subdirectories blacklisted: %s', blacklisted_dirnames)
for blacklisted_dirname in blacklisted_dirnames:
# Note: if dirnames is updated in place, os.walk will recurse
# only in the remaining directories
dirnames.remove(blacklisted_dirname)
# Check if any filename is a sqlite database
for filename in filenames:
db_path = os.path.join(dirpath, filename)
# Skip missing files like broken symbolic links
if not os.path.isfile(db_path):
logger.warning('Unable to access file: %r', db_path)
continue
if 'SQLite' in magic.from_file(db_path):
db_paths.append(db_path)
return db_paths
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import re
import time
from settings import URL, applet, key
def get_data(URL):
resp = requests.get(URL)
pat = r"(?<=\[\[\[\"tsp-mr4\",\"tsp-vm\"]).+(?=,\[null,\[\[\"tsp-mr4\")"
x = re.findall(pat, resp.text)
x = str(x)
scores = re.sub(r'\[|\]|null|"|tsp-mr4|tsp-vm|,', ' ', x)
clean = scores.replace('- 0 ', '\n').replace(' ', ' ')
clean = clean.replace('\n', '比').replace(' ', '\n')
return clean.strip()
while True:
value1 = get_data(URL)
if len(value1) < 1:
value1 = '比賽結束!'
trigger_url = f'https://maker.ifttt.com/trigger/{applet}/with/key/{key}?value1={value1}'
requests.get(trigger_url)
break
else:
trigger_url = f'https://maker.ifttt.com/trigger/{applet}/with/key/{key}?value1={value1}'
requests.get(trigger_url)
time.sleep(15)
|
from flask import json, jsonify
from datetime import datetime
from os import path
GARASTAVOKLIS = "garastavoklis.txt"
def pieraksti_garastavokli(dati):
# Sākumā ielasām visus garastāvokļus masīvā no faila
faila_esosie_garastavokli = []
# Uzstādām karogu uz False, jo sākumā neko neesam meklējuši
lietotajam_jau_eksiste_garastavoklis = False
if (path.exists(GARASTAVOKLIS)):
with open(GARASTAVOKLIS, "r", encoding="utf-8") as faila_rindas:
for rinda in faila_rindas:
ielasita_rinda_json_formata = json.loads(rinda)
# Ja atradām lietotāju ar vārdu
if (ielasita_rinda_json_formata["vards"] == dati["vards"]):
# tad nomainām garastāvokli uz jauno vērtību
ielasita_rinda_json_formata["garastavoklis"] = dati["garastavoklis"]
# Uzstādām mūsu karogu uz True, jo atradām lietotāju ar vārdu
lietotajam_jau_eksiste_garastavoklis = True
faila_esosie_garastavokli.append(ielasita_rinda_json_formata)
# Ja nebijām atraduši lietotāju ar šādu vārdu, tad ieliekam masīva beigās atsūtītos datus
if (lietotajam_jau_eksiste_garastavoklis == False):
faila_esosie_garastavokli.append(dati)
# Pārrakstām mūsu failu no jauna ar mūsu aktuālo garastāvokļu masīvu
with open(GARASTAVOKLIS, 'w') as file:
for line in faila_esosie_garastavokli:
file.write(json.dumps(line, ensure_ascii=False) + "\n")
def lasi_garastavokli():
garastavoklis_failasaturs = []
if (path.exists(GARASTAVOKLIS)):
with open(GARASTAVOKLIS, "r", encoding="utf-8") as f:
for rinda in f:
garastavoklis_failasaturs.append(json.loads(rinda))
return garastavoklis_failasaturs
|
from .api import SchoologyApi
from .objects import *
import click_log
from cached_property import cached_property
log = click_log.basic_config('lms')
class Schoology:
def __init__(self, config):
RestObject._sc = self # XXX HACK!
self.conf = config['schoology']
self.api = SchoologyApi(self.conf['key'], self.conf['secret'])
self.objs = {}
def get(self, cls, ident):
ident = int(ident)
try:
item = self.objs[cls, ident]
except KeyError:
item = cls(self, ident)
self.objs[cls, ident] = item
return item
@cached_property
def me(self):
return User(self.api._get('/users/me'))
@cached_property
def languages(self):
return {l['language_code']: l['language_name'] for l in
self.api._get('/users/languages')['language']}
@cached_property
def schools(self):
return [School(d) for d in
self.api._get('/schools')['school']]
@cached_property
def collections(self):
return [Collection(d) for d in
self.api._get_depaginate('/collections', 'collection')]
def messages(self, folder=None):
if folder is None:
folder = 'inbox'
return [MessageThread(d) for d in
self.api._get_depaginate('/messages/' + folder, 'message')]
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"contributions/(\d+)/total", views.total_contributions, name="total_contributions"),
url(r"expenditures/(\d+)/total", views.total_expenditures, name="total_expenditures"),
url(r"expenditures/categories/(\d+)/total", views.spending_categories, name="spending_categories"),
url(r"donors/(\d+)/top", views.top_donors, name="top_donors"),
url(r"donors/categories/(\d+)", views.donor_categories, name="donor_categories"),
url(r"contributions/donors/(\d+)/total/", views.contribution_count, name="contribution_count"),
url(r"all_names", views.all_names, name="all_names"),
url(r"contributions/zip_codes/(\d+)/top/", views.top_zip_codes, name="top_zip_codes"),
]
|
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Task', 'ListTask', 'DownloadTask', 'StopTask']
class Task(object):
"""Base class for tasks to use in subliminal"""
pass
class ListTask(Task):
"""List task used by the worker to search for subtitles
:param video: video to search subtitles for
:type video: :class:`~subliminal.videos.Video`
:param list languages: languages to search for
:param string service: name of the service to use
:param config: configuration for the service
:type config: :class:`~subliminal.services.ServiceConfig`
"""
def __init__(self, video, languages, service, config):
super(ListTask, self).__init__()
self.video = video
self.service = service
self.languages = languages
self.config = config
def __repr__(self):
return 'ListTask(%r, %r, %s, %r)' % (self.video, self.languages, self.service, self.config)
class DownloadTask(Task):
"""Download task used by the worker to download subtitles
:param video: video to download subtitles for
:type video: :class:`~subliminal.videos.Video`
:param subtitles: subtitles to download in order of preference
:type subtitles: list of :class:`~subliminal.subtitles.Subtitle`
"""
def __init__(self, video, subtitles):
super(DownloadTask, self).__init__()
self.video = video
self.subtitles = subtitles
def __repr__(self):
return 'DownloadTask(%r, %r)' % (self.video, self.subtitles)
class StopTask(Task):
"""Stop task that will stop the worker"""
pass
|
"""
Tagging related views.
"""
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.list_detail import object_list
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseRedirect
from django.template import RequestContext, loader, Context
from tagging.models import Tag, TaggedItem
from tagging.utils import parse_tag_input, get_tag, get_queryset_and_model
def add_tags(request, content_type=None, object_id=None, ajax=False):
if request.method=="POST":
content_type = get_object_or_404(ContentType, id = int(content_type))
tagged_object = content_type.get_object_for_this_type(id = int(object_id))
if not hasattr(tagged_object,"tags"):
return HttpResponseBadRequest()
li = tagged_object.tags.split(",")
s = set([l.strip() for l in li if len(l.strip())>0])
new_tags = parse_tag_input(request.POST["tags"])
s.update(new_tags)
tagged_object.tags = ",".join(s)
tagged_object.save()
tagged_object.clear_cache()
if request.is_ajax():
t = loader.get_template("tagging/_tag_list.html")
return HttpResponse(t.render(RequestContext(request, {"tags": new_tags})))
else:
next = "/"
if "next" in request.POST:
next = request.POST["next"]
else:
next = tagged_object.get_absolute_url()
return HttpResponseRedirect(next)
def autocomplete(request):
q = request.GET.get("q","")
try:
limit = int(request.GET.get("limit",10))
except TypeError:
limit = 10
return HttpResponse("\n".join(map(lambda x:x.name,Tag.objects.filter(name__istartswith=q)[:limit])),mimetype="text/plain") |
import pathlib
import torch
from google.protobuf import text_format
from second.protos import pipeline_pb2
from second.builder import target_assigner_builder, voxel_builder
from second.pytorch.builder import box_coder_builder
from second.pytorch.builder import second_builder_for_official_onnx_and_cuda
import onnx
import onnx_tensorrt.backend as backend
import numpy as np
import torchplus
def model_2_onnx(config_path,
model_dir,
ckpt_path=None):
model_dir = pathlib.Path(model_dir)
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
model_cfg = config.model.second
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
net = second_builder_for_official_onnx_and_cuda.build(model_cfg, voxel_generator, target_assigner)
# since the model is changed, dont restore first
if ckpt_path is None:
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
# print(net)
# convert model to onnx
dummy_dev_pillar_x_ = torch.randn(1, 1, 12000, 100, device='cuda')
dummy_dev_pillar_y_ = torch.randn(1, 1, 12000, 100, device='cuda')
dummy_dev_pillar_z_ = torch.randn(1, 1, 12000, 100, device='cuda')
dummy_dev_pillar_i_ = torch.randn(1, 1, 12000, 100, device='cuda')
dummy_dev_num_points_per_pillar_ = torch.randn(1, 1, 12000, 1, device='cuda')
dummy_dev_x_coors_for_sub_shaped_ = torch.randn(1, 1, 12000, 100, device='cuda')
dummy_dev_y_coors_for_sub_shaped_ = torch.randn(1, 1, 12000, 100, device='cuda')
dummy_dev_pillar_feature_mask_ = torch.randn(1, 1, 12000, 100, device='cuda')
dummy_dev_scattered_feature = torch.randn(1, 64, 496, 432, device='cuda')
net.cuda()
net.eval()
torch.onnx.export(net.voxel_feature_extractor,
(dummy_dev_pillar_x_,
dummy_dev_pillar_y_,
dummy_dev_pillar_z_,
dummy_dev_pillar_i_,
dummy_dev_num_points_per_pillar_,
dummy_dev_x_coors_for_sub_shaped_,
dummy_dev_y_coors_for_sub_shaped_,
dummy_dev_pillar_feature_mask_
),
"./pfe_test.onnx",
verbose=False)
torch.onnx.export(net.rpn,
dummy_dev_scattered_feature,
"./rpn_test.onnx",
verbose=False)
def test_onnx_for_trt(onnx_path,
config_path,
model_dir,
ckpt_path=None):
dummy_dev_pillar_x_ = np.random.random(size=(1, 1, 12000, 100)).astype(np.float32)
dummy_dev_pillar_y_ = np.random.random(size=(1, 1, 12000, 100)).astype(np.float32)
dummy_dev_pillar_z_ = np.random.random(size=(1, 1, 12000, 100)).astype(np.float32)
dummy_dev_pillar_i_ = np.random.random(size=(1, 1, 12000, 100)).astype(np.float32)
dummy_dev_num_points_per_pillar_ = np.random.random(size=(1, 1, 12000, 1)).astype(np.float32)
dummy_dev_x_coors_for_sub_shaped_ = np.random.random(size=(1, 1, 12000, 100)).astype(np.float32)
dummy_dev_y_coors_for_sub_shaped_ = np.random.random(size=(1, 1, 12000, 100)).astype(np.float32)
dummy_dev_pillar_feature_mask_ = np.random.random(size=(1, 1, 12000, 100)).astype(np.float32)
model = onnx.load(onnx_path)
engine = backend.prepare(model, device='CUDA:0', max_batch_size=1)
print("model read success")
print()
output_data = engine.run((dummy_dev_pillar_x_,
dummy_dev_pillar_y_,
dummy_dev_pillar_z_,
dummy_dev_pillar_i_,
dummy_dev_num_points_per_pillar_,
dummy_dev_x_coors_for_sub_shaped_,
dummy_dev_y_coors_for_sub_shaped_,
dummy_dev_pillar_feature_mask_
))
# ##########compare with pytorch output #########################
for i in range(len(output_data)):
print(output_data[i].shape)
print(output_data[0][0, 0, 0:100])
model_dir = pathlib.Path(model_dir)
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
model_cfg = config.model.second
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
net = second_builder_for_official_onnx_and_cuda.build(model_cfg, voxel_generator, target_assigner)
net.cuda()
net.eval()
# since the model is changed, dont restore first
if ckpt_path is None:
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
dummy_dev_pillar_x_ = torch.as_tensor(dummy_dev_pillar_x_, device="cuda")
dummy_dev_pillar_y_ = torch.as_tensor(dummy_dev_pillar_y_, device="cuda")
dummy_dev_pillar_z_ = torch.as_tensor(dummy_dev_pillar_z_, device="cuda")
dummy_dev_pillar_i_ = torch.as_tensor(dummy_dev_pillar_i_, device="cuda")
dummy_dev_num_points_per_pillar_ = torch.as_tensor(dummy_dev_num_points_per_pillar_, device="cuda")
dummy_dev_x_coors_for_sub_shaped_ = torch.as_tensor(dummy_dev_x_coors_for_sub_shaped_, device="cuda")
dummy_dev_y_coors_for_sub_shaped_ = torch.as_tensor(dummy_dev_y_coors_for_sub_shaped_, device="cuda")
dummy_dev_pillar_feature_mask_ = torch.as_tensor(dummy_dev_pillar_feature_mask_, device="cuda")
output_pytorch = net.voxel_feature_extractor(dummy_dev_pillar_x_, dummy_dev_pillar_y_, dummy_dev_pillar_z_,
dummy_dev_pillar_i_, dummy_dev_num_points_per_pillar_,
dummy_dev_x_coors_for_sub_shaped_, dummy_dev_y_coors_for_sub_shaped_,
dummy_dev_pillar_feature_mask_)
print(output_pytorch[0, 0, 0:100])
onnx_path = "./pfe_test.onnx"
model_2_onnx("../configs/pointpillars/car/xyres_16.proto", "../../kitti_models")
test_onnx_for_trt(onnx_path, "../configs/pointpillars/car/xyres_16.proto", "../../kitti_models")
"""
model = onnx.load("./pfe_test.onnx")
# Check that the IR is well formed
print(onnx.checker.check_model(model))
# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))
model = onnx.load("./pfe_official.onnx")
# Check that the IR is well formed
print(onnx.checker.check_model(model))
# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))
"""
"""
model = onnx.load("./pfe_test.onnx")
# Check that the IR is well formed
print(onnx.checker.check_model(model))
# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))
""" |
from django.http import HttpResponse
class MyException(Exception):
pass
def ok(request):
"""
View that doesn't raise
"""
return HttpResponse("<html><body>OK</body></html>")
def oh_no(request):
"""
View that raises an exception
"""
raise MyException("oh no")
|
import pytest
import rasterio
from rasterio.errors import RasterioDeprecationWarning
from rasterio.profiles import default_gtiff_profile
def test_set_band_descriptions(tmpdir):
"""Descriptions can be set when dataset is open"""
tmptiff = str(tmpdir.join('test.tif'))
with rasterio.open(
tmptiff, 'w', count=3, height=256, width=256,
**default_gtiff_profile) as dst:
assert dst.descriptions == (None, None, None)
dst.descriptions = ["this is a test band", "this is another test band", None]
assert dst.descriptions == (
"this is a test band", "this is another test band", None)
@pytest.mark.parametrize('value', [[], ['x'], ['x', 'y', 'z']])
def test_set_band_descriptions_error(tmpdir, value):
"""Number of descriptions must match band count"""
tmptiff = str(tmpdir.join('test.tif'))
with rasterio.open(
tmptiff, 'w', count=2, height=256, width=256,
**default_gtiff_profile) as dst:
with pytest.raises(ValueError):
dst.descriptions = value
def test_set_band_descriptions_deprecated(tmpdir):
"""Warn about deprecation"""
tmptiff = str(tmpdir.join('test.tif'))
with rasterio.open(
tmptiff, 'w', count=2, height=256, width=256,
**default_gtiff_profile) as dst:
assert dst.descriptions == (None, None)
with pytest.warns(RasterioDeprecationWarning):
dst.set_description(1, "this is a test band")
dst.set_description(2, "this is another test band")
assert dst.descriptions == (
"this is a test band", "this is another test band")
|
################################
#
# Default component settings
#
################################
# Pinlabel
pinlabel = {
"tag": "pinlabel",
"body": {
"x": 6,
"y": 0,
"width": 80,
"height": 26,
"corner_radius": 3,
"tag": "pinlabel__body",
},
"leaderline": {
"direction": "hh",
"tag": "pinlabel__leader",
},
"text": {
"tag": "pinlabel__text",
},
}
# Legend
legend = {
"max_height": None,
"inset": (10, 10, 10, 10),
"tag": "legend",
"entry": {
"width": 159,
"height": 28,
"swatch": {
"width": 20,
"height": 20,
"tag": "swatch",
},
"tag": "legendentry",
},
}
# TextBlock
textblock = {
"line_height": 22,
"width": None,
"height": None,
"offset": (0, 0),
"tag": "textblock",
}
# Annotation
annotation = {
"tag": "annotation",
"content": {
"tag": "annotation__text",
"x": 28,
"y": 17,
"line_height": 16,
},
"body": {
"x": 40,
"y": 29,
"width": 250,
"height": 50,
"corner_radius": 25,
"tag": "annotation__body",
},
"target": {
"x": -10,
"y": -10,
"width": 20,
"height": 20,
"corner_radius": 10,
"tag": "annotation__target",
},
"leaderline": {
"direction": "vh",
"tag": "annotation__leaderline",
},
}
# Panel
panel = {
"inset": (5, 5, 5, 5),
"tag": "panel",
"inner": {"tag": "panel__inner"},
"outer": {"tag": "panel__outer"},
}
# Integrated circuit
ic_dip = {
"inset": (15, 0, 15, 0),
"tag": "ic ic--dip",
"body": {
"x": 15,
"y": 0,
"corner_radius": 3,
"tag": "ic__body",
},
"leg": {
"tag": "ic__leg",
},
"polarity_mark": {
"radius": 5,
"tag": "polarity",
},
}
ic_qfp = {
"inset": (15, 15, 15, 15),
"pin_pitch": 30,
"tag": "ic ic--qfp",
"body": {
"x": 15,
"y": 15,
"corner_radius": 3,
"tag": "ic__body",
},
"leg": {
"tag": "ic__leg",
},
"polarity_mark": {
"radius": 5,
"tag": "polarity",
},
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.