content
stringlengths 5
1.05M
|
|---|
"""
Models for mail
"""
from django.contrib.auth.models import User
from django.db import models
from financialaid.models import FinancialAid
from micromasters.models import TimestampedModel
from search.models import PercolateQuery
class FinancialAidEmailAudit(TimestampedModel):
"""
Audit table for the Financial Aid
"""
acting_user = models.ForeignKey(User, null=False, on_delete=models.CASCADE)
financial_aid = models.ForeignKey(FinancialAid, null=True, on_delete=models.SET_NULL)
to_email = models.TextField(null=False)
from_email = models.TextField(null=False)
email_subject = models.TextField(null=False, blank=True)
email_body = models.TextField(null=False, blank=True)
class AutomaticEmail(TimestampedModel):
"""
Stores information for an automatically sent email
"""
query = models.ForeignKey(PercolateQuery, null=True, on_delete=models.SET_NULL)
enabled = models.BooleanField(default=False)
email_subject = models.TextField(null=False, blank=True)
email_body = models.TextField(null=False, blank=True)
sender_name = models.TextField(null=False, blank=True)
staff_user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
def __str__(self):
"""String representation of AutomaticEmail"""
return "AutomaticEmail sender={}, subject={}".format(self.sender_name, self.email_subject)
class SentAutomaticEmail(TimestampedModel):
"""
Keeps track of automatic emails which were sent to particular users
"""
PENDING = 'pending'
SENT = 'sent'
STATUSES = [PENDING, SENT]
user = models.ForeignKey(User, null=False, on_delete=models.CASCADE)
automatic_email = models.ForeignKey(AutomaticEmail, null=False, on_delete=models.CASCADE)
# This is used to aid the transaction in locking this row. SentAutomaticEmail will be created as PENDING
# and then changed to SENT once a successful email was sent.
status = models.CharField(
max_length=30,
choices=[(status, status) for status in STATUSES],
default=PENDING,
)
class Meta:
unique_together = ('user', 'automatic_email')
def __str__(self):
return "SentAutomaticEmail for user={user} and automatic_email={automatic_email}".format(
user=self.user,
automatic_email=self.automatic_email,
)
class PartnerSchool(models.Model):
"""
Model for partner school to send records to
"""
name = models.CharField(max_length=255)
email = models.TextField(null=False)
def __str__(self):
return self.name
|
import numpy as np
import os
from costants import \
EXTRACTION_DPI, \
TEXT_FOLDER, \
TABLE_FOLDER, \
MAX_NUM_BOXES, \
MIN_SCORE
from personal_errors import InputError, OutputError
import errno
import tensorflow as tf
from PIL import Image
from alyn import deskew
import logging
from logger import TimeHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(TimeHandler().handler)
def reshape_image_into_numpy_array(pil_image):
"""
The neural network needs a numpy RGB 3-channels image (because of the pre-trained network)
So we need to convert a pillow image into a numpy int8 height*width*3 array
We cannot use zero instead of the two additional layers because the NN uses every channel to make predictions,
so if we fill the array with zeros the scores become 1/3.
:param pil_image: a pillow image
:return: a reshaped numpy image ready for inference
"""
logger.info('Converting pillow image in numpy 3-dimension array...')
(im_width, im_height) = pil_image.size
np_array = np.array(pil_image.getdata()).reshape((im_height, im_width, 1)).astype(np.uint8)
logger.info('Pillow image converted in heigth*width*1 numpy image')
np_array = np.concatenate((np_array, np_array, np_array), axis=2)
logger.info('Numpy 3-dimension array created')
return np_array
def do_inference_with_graph(pil_image, inference_graph_path):
"""
It takes a pillow image and looks for tables inside
:param pil_image: Pillow image
:param inference_graph_path:
:return: (boxes, scores), two lists with all the boxes and their likelihood scores
"""
logger.info('Reading inference graph...')
detection_graph = tf.Graph()
# checking if inference graph exists
if not os.path.isfile(inference_graph_path):
raise InputError('Inference graph at\n{}\nnot found'.format(inference_graph_path))
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(inference_graph_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = reshape_image_into_numpy_array(pil_image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
logger.info('Running inference...')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
logger.info('Inference run, boxes and scores have been found')
return boxes[0], scores[0]
def check_if_intersected(coord_a, coord_b):
"""
Check if the rectangular b is not intersected with a
:param coord_a: dict with {y_min, x_min, y_max, x_max}
:param coord_b: same as coord_a
:return: true if intersected, false instead
"""
logger.info('Returning if the two boxes are intersected...')
return \
coord_a['x_max'] > coord_b['x_min'] and \
coord_a['x_min'] < coord_b['x_max'] and \
coord_a['y_max'] > coord_b['y_min'] and \
coord_a['y_min'] < coord_b['x_max']
def check_if_vertically_overlapped(box_a, box_b):
"""
Return if box_b is intersected vertically with coord_a boxes.
:param box_a:
:param box_b:
:return: true if intersected, false instead
"""
return \
box_a['y_min'] < box_b['y_min'] < box_a['y_max'] or \
box_a['y_min'] < box_b['y_max'] < box_a['y_max'] or \
(box_a['y_min'] >= box_b['y_min'] and box_a['y_max'] <= box_b['y_max']) or \
(box_a['y_min'] <= box_b['y_min'] and box_a['y_max'] >= box_b['y_max'])
def merge_vertically_overlapping_boxes(boxes):
"""
Returns a list of boxes that has been merged together if vertically overlapping.
This is recursive - aka no overlapping box is left behind even if only the last one is detected.
This algorithm works in our case because the boxes that are inside boxes are already ordered by score. So the
first one is scored higher than the second and so on. This is a pre-condition.
:param boxes: list of boxes possibly overlapping
:return: list of merged boxes
"""
# first box is always inside
merged_boxes = [boxes[0]]
i = 0
overlapping = False
for box in boxes[1:]:
i += 1
# extraction of coordinates for better reading
coord_box = {
'y_min': box[0],
'x_min': box[1],
'y_max': box[2],
'x_max': box[3]
}
for m_box in merged_boxes:
# extraction of coordinates for better reading
coord_m_box = {
'y_min': m_box[0],
'x_min': m_box[1],
'y_max': m_box[2],
'x_max': m_box[3]
}
if check_if_vertically_overlapped(coord_m_box, coord_box):
overlapping = True
# merge of the two overlapping boxes
if m_box[0] > box[0]:
m_box[0] = box[0]
if m_box[2] < box[2]:
m_box[2] = box[2]
if not overlapping:
# if not overlapping we append the box. Exit condition for recursive call
merged_boxes.append(box)
if overlapping:
# recursive call. It converges because the exit condition consumes the generator.
return merge_vertically_overlapping_boxes(merged_boxes)
else:
return merged_boxes
def keep_best_not_overlapping_boxes(boxes, scores, max_num_boxes=5, min_score=0.8):
"""
Return the boxes with higher score with no overlapping. Merging NOT applied:
boxes are: box[0]=ymin, box[1]=xmin, box[2]=ymax, box[3]=xmax
:param boxes: list of boxes found in inference
:param scores: likelihood of the boxes
:param max_num_boxes: max num of boxes to be saved
:param min_score: min box score to check
:return: list of the best not overlapping boxes
"""
logger.info('Detecting best matching boxes...')
kept_scores = []
kept_boxes = [] # always keep the firs box, which is the best one.
num_boxes = 0
i = 0
if scores[0] > min_score:
kept_boxes.append(boxes[0])
kept_scores.append(scores[0])
num_boxes += 1
i += 1
for b in boxes[1:]:
if num_boxes < max_num_boxes and scores[i] > min_score:
intersected = False
coord_b = {
'y_min': b[0],
'x_min': b[1],
'y_max': b[2],
'x_max': b[3]
}
for kb in kept_boxes:
coord_kb = {
'y_min': kb[0],
'x_min': kb[1],
'y_max': kb[2],
'x_max': kb[3]
}
intersected = check_if_intersected(
coord_a=coord_b,
coord_b=coord_kb
)
if not intersected:
kept_boxes.append(b)
num_boxes += 1
kept_scores.append(scores[i])
i += 1
else:
break
# no merge
# kept_boxes = merge_vertically_overlapping_boxes(kept_boxes)
else:
kept_boxes = []
return kept_boxes, kept_scores
def keep_best_boxes_merged(boxes, scores, max_num_boxes=5, min_score=0.8):
"""
Return the boxes with higher score with no overlapping. Merging applied:
boxes are: box[0]=ymin, box[1]=xmin, box[2]=ymax, box[3]=xmax
:param boxes: list of boxes found in inference
:param scores: likelihood of the boxes
:param max_num_boxes: max num of boxes to be saved
:param min_score: min box score to check
:return: list of the best not overlapping boxes
"""
logger.info('Detecting best matching boxes...')
kept_scores = []
kept_boxes = [] # always keep the firs box, which is the best one.
num_boxes = 0
i = 0
if scores[0] > min_score:
kept_boxes.append(boxes[0])
kept_scores.append(scores[0])
num_boxes += 1
i += 1
for b in boxes[1:]:
# add boxes to the ones to be merged
if num_boxes < max_num_boxes and scores[i] > min_score:
kept_boxes.append(b)
num_boxes += 1
kept_scores.append(scores[i])
i += 1
else:
break
kept_boxes = merge_vertically_overlapping_boxes(kept_boxes)
else:
kept_boxes = []
return kept_boxes, kept_scores
def crop_wide(pil_image, boxes):
"""
Crop tables from images. To simplify cropping (and to reduce by half the risk of mistake as we consider only two bounds)
we cut the image widely from the upper bound to the lower. Then creates a image for table and stores into a list
and parses every remaining text box into one image.
If no boxes are found only the text image is returned and is equal to pil_image
:param pil_image: an image in which some table have been found.
:param boxes: bounding boxes for tables
:return: pillow list of cropped tables images, pillow image of text.
"""
cropped_tables = []
segments = [0] # adding position 0 to simplify anti-crop text later
height_of_crops = 0
logger.info('Checking if there are some boxes recorded...')
if not boxes == []:
(im_width, im_height) = pil_image.size
logger.info('Boxes have been found. Cropping tables...')
for box in boxes:
cropped_tables.append(pil_image.crop(tuple((0, int(box[0]), im_width, int(box[2])))))
segments.append(int(box[0]))
segments.append(int(box[2]))
height_of_crops += (int(box[2]) - int(box[0]))
logger.info('Tables cropped')
# sorts all segments to simplify anti-crop text later
segments.append(im_height) # adding last position to simplify anti-crop text later
segments.sort()
# create new image with new dimension
new_image = Image.new('L', (im_width, im_height - height_of_crops))
start_position = 0
logger.info('Creating image from cropped text slices...')
# cutting image in anti-boxes position
for i in range(len(segments)): # segments will always be even
if not i % 2 and i < len(segments) - 1: # takes only even positions
if i != 0:
start_position += segments[i - 1] - segments[i - 2]
new_image.paste(pil_image.crop(tuple((0, segments[i], im_width, segments[i + 1]))), (0, start_position))
cropped_text = new_image
logger.info('Created text image')
else:
logger.info('No boxes found')
cropped_text = pil_image
return cropped_tables, cropped_text
def extract_tables_and_text(pil_image, inference_graph_path):
"""
Extracts tables and text from image_path using inference_graph_path
:param pil_image:
:param inference_graph_path:
:return: (cropped_tables, cropped_text), list of table pillow images and a text image
"""
(im_width, im_height) = pil_image.size
boxes, scores = do_inference_with_graph(pil_image, inference_graph_path)
best_boxes, best_scores = keep_best_boxes_merged(
boxes=boxes,
scores=scores,
max_num_boxes=MAX_NUM_BOXES,
min_score=MIN_SCORE
)
logger.info("Best boxes are: ")
for box in best_boxes:
logger.info(box)
logger.info("With scores:")
for score in best_scores:
logger.info(score)
# create coordinates based on image dimension
for box in best_boxes:
box[0] = int(box[0] * im_height)
box[2] = int(box[2] * im_height)
box[1] = int(box[1] * im_width)
box[3] = int(box[3] * im_width)
(cropped_tables, cropped_text) = crop_wide(pil_image, best_boxes)
return cropped_tables, cropped_text
def create_temp_folders(file_name, temp_table_folder=TABLE_FOLDER, temp_text_folder=TEXT_FOLDER):
"""
Clear any existing table/file_name and text/file_name folder for creating new images
:param file_name:
:param temp_table_folder:
:param temp_text_folder:
:return: None
"""
logger.info('Clear and create temp file for images from pdf')
if not os.path.isdir(temp_table_folder):
# creates folder for table images per page
try:
os.makedirs(temp_table_folder)
logger.info('{} created successfully'.format(temp_table_folder))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise OutputError('{} was not created correctly.'
.format(temp_table_folder))
else:
logger.info('{} already present'.format(temp_table_folder))
# creates folder for text images per page
logger.info(temp_text_folder + ' folder created successfully')
if not os.path.isdir(temp_text_folder):
try:
os.makedirs(temp_text_folder)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise OutputError('{} was not created correctly.'
.format(temp_text_folder))
else:
logger.info('{} already present'.format(temp_text_folder))
if os.path.isdir(os.path.join(temp_table_folder, str(file_name))):
logger.info('Clearing table temp folder from existing files...')
# shutil.rmtree(os.path.join(temp_table_folder, str(file_name)), ignore_errors=True)
logger.info('Clear done')
if os.path.isdir(os.path.join(temp_text_folder, str(file_name))):
logger.info('Clearing text temp folder from existing files...')
# shutil.rmtree(os.path.join(temp_text_folder, str(file_name)), ignore_errors=True)
logger.info('Clear done')
try:
logger.info('Creating {}...'.format(temp_table_folder))
os.makedirs(os.path.join(temp_table_folder, str(file_name)))
logger.info(temp_table_folder + ' created')
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise OutputError('{} was not created.'.format(temp_table_folder))
else:
logger.info('{} already present'.format(temp_table_folder))
try:
logger.info('Creating {}...'.format(temp_text_folder))
os.makedirs(os.path.join(temp_text_folder, str(file_name)))
logger.info(temp_text_folder + ' created')
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise OutputError('{} was not created'.format(temp_text_folder))
else:
logger.info('{} already present'.format(temp_text_folder))
def write_crops(file_name, cropped_tables=None, cropped_text=None, temp_table_path=TABLE_FOLDER,
temp_text_path=TEXT_FOLDER, page_number=None):
"""
Writes table and text images under table and text folder
:param file_name:
:param cropped_tables: list of pillow images
:param cropped_text: list of pillow images
:param temp_table_path:
:param temp_text_path:
:return: None
"""
i = 0
logger.info('Writing cropped tables...')
table_paths = []
text_path = None
if cropped_tables is not None:
for ct in cropped_tables:
new_file_path = \
os.path.join(temp_table_path, str(file_name),
'table_pag_{pag_num}_{c}.jpeg'.format(pag_num=page_number, c=i))
ct = ct.convert('L')
logger.info('Deskewing table...')
sd = deskew.Deskew(
input_numpy=np.asarray(ct),
output_numpy=True
)
de_skewed_image_np = sd.run()
logger.info('Deskew done')
ct = Image.fromarray(de_skewed_image_np)
ct = ct.convert(mode='L')
try:
ct.save(new_file_path, dpi=(EXTRACTION_DPI, EXTRACTION_DPI))
logger.info('Image_{} wrote on disk'.format(new_file_path))
except IOError or ValueError as e:
raise OutputError('Cannot write image on disk: \n{}'.format(e))
i += 1
table_paths.append(new_file_path)
logger.info('Writing cropped tables done.')
else:
logger.info('No tables to write on disk')
if cropped_text is not None:
logger.info('Writing cropped text...')
# for cl in cropped_text:
new_file_path = os.path.join(temp_text_path, str(file_name), 'text_pag_{}.jpeg'.format(page_number))
# ct_l = cl.convert('L')
try:
cropped_text.save(new_file_path, dpi=(EXTRACTION_DPI, EXTRACTION_DPI))
logger.info('Image_{} wrote on disk'.format(new_file_path))
except IOError or ValueError as e:
raise OutputError('Cannot write image on disk: \n{}'.format(e))
# i += 1
logger.info('Writing cropped text done.')
text_path = new_file_path
return table_paths, text_path
# def find_table(file_name, pil_image, create_temp_files=False, temp_table_path=TABLE_FOLDER, temp_text_path=TEXT_FOLDER):
# """
# useful only for batch. The function extract_tables_and_text does everything
# :param file_name:
# :param pil_image:
# :param create_temp_files:
# :param temp_table_path:
# :param temp_text_path:
# :return:
# """
# cropped_tables, cropped_text = extract_tables_and_text(pil_image=pil_image, inference_graph_path=PATH_TO_CKPT)
# if create_temp_files:
# create_temp_folders(file_name=file_name)
# write_crops(
# file_name=file_name,
# cropped_tables=cropped_tables,
# cropped_text=cropped_text
# )
|
# -*- coding: utf-8 -*-
from pokemongo_bot.event_manager import EventHandler
from pokemongo_bot.base_dir import _base_dir
import json
import os
import time
import discord_simple
import thread
import re
from pokemongo_bot.datastore import Datastore
from pokemongo_bot import inventory
from chat_handler import ChatHandler
import pprint
DEBUG_ON = False
class FileIOException(Exception):
pass
class DiscordClass:
def __init__(self, bot, master, pokemons, config):
self.bot = bot
self.pokemons = pokemons
self._dbot = None
self.config = config
def sendMessage(self, to=None, text=None):
self._dbot.send_message(to, text)
def connect(self):
self._dbot = discord_simple.Bot(self.bot.config.discord_token,on_message=self.on_message)
def _get_player_stats(self):
json_inventory = inventory.jsonify_inventory()
return next((x["inventory_item_data"]["player_stats"]
for x in json_inventory
if x.get("inventory_item_data", {}).get("player_stats", {})),
None)
def send_player_stats_to_chat(self, chat_id):
stats = self.chat_handler.get_player_stats()
if stats:
self.sendMessage(to=chat_id, text="\n".join(stats))
else:
self.sendMessage(to=chat_id, text="Stats not loaded yet\n")
def on_message(self,message):
if message.content == "/help":
res = (
"Commands: ",
"/info - info about bot"
)
self.sendMessage(to=str(message.author), text="\n".join(res))
elif message.content == "/info":
self.send_player_stats_to_chat(message.author)
def run(self):
self._dbot.forever_loop()
class DiscordHandler(EventHandler):
def __init__(self, bot, config):
self.bot = bot
self.dbot = None
self.master = config.get('master', None)
if self.master == None:
return
self.pokemons = config.get('alert_catch', {})
self.whoami = "DiscordHandler"
self.config = config
self.chat_handler = ChatHandler(self.bot, [])
def catch_notify(self, pokemon, cp, iv, params):
if params == " ":
return True
try:
oper = re.search(r'operator:([^ ]+)', params).group(1)
rule_cp = int(re.search(r'cp:([0-9]+)', params).group(1))
rule_iv = float(re.search(r'iv:([0-9.]+)', params).group(1))
rule_pkmn = re.search(r'pokemon:([^ ]+)', params).group(1)
return rule_pkmn == pokemon and (oper == "or" and (cp >= rule_cp or iv >= rule_iv) or cp >= rule_cp and iv >= rule_iv)
except:
return False
def handle_event(self, event, sender, level, formatted_msg, data):
if self.dbot is None:
try:
self.bot.logger.info("Discord bot not running, Starting..")
self.dbot = DiscordClass(self.bot, self.master, self.pokemons, self.config)
self.dbot.connect()
thread.start_new_thread(self.dbot.run)
except Exception as inst:
self.dbot = None
self.bot.logger.error("Unable to start Discord bot; master: {}, exception: {}".format(self.master, pprint.pformat(inst)))
return
# prepare message to send
msg = None
msg = self.chat_handler.get_event(event, formatted_msg, data)
if msg:
self.dbot.sendMessage(to=self.master, text=msg)
|
import random
a=int(input("Enter the stating range of the number : "))
b=int(input("Enter the Ending range of the number : "))
x = random.randint(a,b)
print("")
y=int(input("The Correct Number is : "))
t=0
while(t==0):
y=int(input("The Correct Number is : "))
if(x<y):
print("")
print("oops! wrong number")
print("Here's a hint (The number is less than the number you have guessed)")
if(x>y):
print("")
print("oops! wrong number")
print("Here's a hint (The number is greater than the number you have guessed)")
if(x==y):
print("")
print("Yay! You have won")
print("You guessed the correct number")
t=1
|
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.test_ExportedSpectrum
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Tests for module `ExportedSpectrum`.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
import unittest
import logging
import os.path
import shutil
# Third party modules.
# Local modules.
# Project modules
import pymcxray.FileFormat.ExportedSpectrum as ExportedSpectrum
# Globals and constants variables.
class TestExportedSpectrum(unittest.TestCase):
"""
TestCase class for the module `moduleName`.
"""
def setUp(self):
"""
Setup method.
"""
unittest.TestCase.setUp(self)
self.testDataPath = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../test_data", "exportedFiles"))
self.tempDataPath = os.path.join(self.testDataPath, "tmp")
if not os.path.isdir(self.tempDataPath):
os.mkdir(self.tempDataPath)
def tearDown(self):
"""
Teardown method.
"""
unittest.TestCase.tearDown(self)
if os.path.expanduser(self.tempDataPath):
shutil.rmtree(self.tempDataPath)
def testSkeleton(self):
"""
First test to check if the testcase is working with the testing framework.
"""
#self.fail("Test if the testcase is working.")
self.assert_(True)
def test_read(self):
"""
Tests for method `read`.
"""
exportedSpectrum = ExportedSpectrum.ExportedSpectrum()
self.assertEquals(None, exportedSpectrum.getSpectrumType())
self.assertEquals(0, len(exportedSpectrum._energies_keV))
self.assertEquals(0, len(exportedSpectrum._intensities))
filepath = os.path.join(self.testDataPath, "bulkC_E20keV_w64BW.txt")
exportedSpectrum.read(filepath)
self.assertEquals("Specimen Spectra", exportedSpectrum.getSpectrumType())
self.assertEquals(1024, len(exportedSpectrum._energies_keV))
self.assertEquals(1024, len(exportedSpectrum._intensities))
#self.fail("Test if the testcase is working.")
if __name__ == '__main__': #pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
from pymcxray.Testings import runTestModuleWithCoverage
runTestModuleWithCoverage(__file__)
|
import inspect
import pandas as pd
def get_info(function):
arguments = inspect.getfullargspec(function)[0]
class_name, function_name = function.__qualname__.split(".")[-2:]
if "self" in arguments:
raise ValueError("all transformations should be static functions")
return class_name, function_name, arguments
def clean_df(*dfs):
if len(dfs) > 1:
return list(map(clean_df, dfs))
df = dfs[0]
if not isinstance(df, pd.DataFrame):
df = df.df()
if type(df.index) != pd.RangeIndex:
df = df.reset_index()
return df
def ignore_self(caller, function):
if "self" in inspect.signature(function).parameters:
return function
else:
return getattr(type(caller), function.__name__)
|
##############################################################################
# Copyright (c) 2017, Rice University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Rice University (RICE) nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# This software is provided by RICE and contributors "as is" and any
# express or implied warranties, including, but not limited to, the
# implied warranties of merchantability and fitness for a particular
# purpose are disclaimed. In no event shall RICE or contributors be
# liable for any direct, indirect, incidental, special, exemplary, or
# consequential damages (including, but not limited to, procurement of
# substitute goods or services; loss of use, data, or profits; or
# business interruption) however caused and on any theory of liability,
# whether in contract, strict liability, or tort (including negligence
# or otherwise) arising in any way out of the use of this software, even
# if advised of the possibility of such damage.
##############################################################################
from spack import *
import platform
class HpctoolkitPrereqs(Package):
"""Meta package to install prerequisites for Rice HPCToolkit,
the spack version of hpctoolkit externals."""
# The url and version are fake until spack implements class
# MetaPackage.
homepage = "http://hpctoolkit.org/"
url = "https://github.com/hpctoolkit/libmonitor"
version('master', branch = 'master',
git = 'https://github.com/hpctoolkit/libmonitor')
pkg_list = ['binutils', 'boost', 'dyninst', 'elfutils',
'intel-tbb', 'libdwarf', 'libiberty', 'libmonitor',
'libunwind', 'xerces-c']
# Fixme: machine() is the build type, what we really want is the
# host (target) type from the spec.
if platform.machine() == 'x86_64':
pkg_list.append('intel-xed')
for pkg in pkg_list:
depends_on(pkg, type='run')
# Write the list of prereq packages and their install prefixes to
# prefix.etc. Spack requires that we must install something.
def install(self, spec, prefix):
etc = join_path(prefix, 'etc')
mkdirp(etc)
f = open(join_path(etc, 'prereqs.txt'), 'w')
for pkg in self.pkg_list:
f.write("%s: %s\n" % (pkg, spec[pkg].prefix))
f.close()
|
from src.sentiment import sentiment
"""neutral rew"""
text = """
average facilities need overhaul feel dated, service ok. reason stay good rate need central location
"""
def test_neutral():
print("Test 3 - ", sentiment(text, '../src/algos'))
return sentiment(text, '../src/algos')[0]
|
# coding: utf-8
"""
"""
import secrets
import pytest
from sampledb import logic
from sampledb.logic.authentication import _validate_password_hash
from sampledb.logic.component_authentication import add_own_token_authentication, add_token_authentication, \
get_own_authentication, remove_own_component_authentication_method, remove_component_authentication_method, \
login_via_component_token
from sampledb.logic.components import add_component
from sampledb.logic.errors import InvalidTokenError, NoAuthenticationMethodError, AuthenticationMethodDoesNotExistError
from sampledb.models import ComponentAuthentication, ComponentAuthenticationType, OwnComponentAuthentication
@pytest.fixture
def component():
component = add_component(address=None, uuid='28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71', name='Example Component', description='')
return component
@pytest.fixture
def component2():
component = add_component(address=None, uuid='cf7118a7-6976-5b1a-9a39-7adc72f591a4', name='Example Component 2', description='')
return component
def test_add_token_authentication(component):
assert len(ComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)
description = 'Token description'
add_token_authentication(component.id, token, description)
assert len(ComponentAuthentication.query.all()) == 1
auth = ComponentAuthentication.query.first()
assert auth.component_id == component.id
assert auth.login.get('login') == token[:8]
assert _validate_password_hash(token[8:], auth.login.get('bcrypt_hash'))
assert auth.login.get('description') == description
assert auth.type == ComponentAuthenticationType.TOKEN
def test_add_token_authentication_invalid_token(component):
assert len(ComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)[1:]
description = 'Token description'
with pytest.raises(InvalidTokenError):
add_token_authentication(component.id, token, description)
assert len(ComponentAuthentication.query.all()) == 0
def test_add_own_token_authentication(component):
assert len(OwnComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)
description = 'Token description'
add_own_token_authentication(component.id, token, description)
assert len(OwnComponentAuthentication.query.all()) == 1
auth = OwnComponentAuthentication.query.first()
assert auth.component_id == component.id
assert auth.login.get('token') == token
assert auth.login.get('description') == description
assert auth.type == ComponentAuthenticationType.TOKEN
def test_add_own_token_authentication_invalid_token(component):
assert len(OwnComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)[1:]
description = 'Token description'
with pytest.raises(InvalidTokenError):
add_own_token_authentication(component.id, token, description)
assert len(OwnComponentAuthentication.query.all()) == 0
def test_get_own_authentication(component):
token = secrets.token_hex(32)
description = 'Token description'
assert len(OwnComponentAuthentication.query.all()) == 0
add_own_token_authentication(component.id, token, description)
assert len(OwnComponentAuthentication.query.all()) == 1
auth = get_own_authentication(component.id)
assert auth.component_id == component.id
assert auth.login.get('token') == token
assert auth.login.get('description') == description
assert auth.type == ComponentAuthenticationType.TOKEN
def test_get_own_authentication_no_authentication(component):
assert len(OwnComponentAuthentication.query.all()) == 0
with pytest.raises(NoAuthenticationMethodError):
get_own_authentication(component.id)
def test_remove_own_component_authentication_method(component):
assert len(OwnComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)
description = 'Token description'
add_own_token_authentication(component.id, token, description)
assert len(OwnComponentAuthentication.query.all()) == 1
auth_id = get_own_authentication(component.id).id
remove_own_component_authentication_method(auth_id)
assert len(OwnComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)
description = 'Token description 1'
add_own_token_authentication(component.id, token, description)
auth_id = get_own_authentication(component.id).id
token = secrets.token_hex(32)
description = 'Token description 2'
add_own_token_authentication(component.id, token, description)
assert len(OwnComponentAuthentication.query.all()) == 2
remove_own_component_authentication_method(auth_id)
assert len(OwnComponentAuthentication.query.all()) == 1
assert len(OwnComponentAuthentication.query.filter_by(id=auth_id).all()) == 0
def test_remove_own_component_authentication_method_not_existing(component):
assert len(OwnComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)
description = 'Token description'
add_own_token_authentication(component.id, token, description)
assert len(OwnComponentAuthentication.query.all()) == 1
auth_id = get_own_authentication(component.id).id
with pytest.raises(AuthenticationMethodDoesNotExistError):
remove_own_component_authentication_method(auth_id + 1)
assert len(OwnComponentAuthentication.query.all()) == 1
def test_remove_component_authentication_method(component):
assert len(ComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)
description = 'Token description'
add_token_authentication(component.id, token, description)
assert len(ComponentAuthentication.query.all()) == 1
auth_id = ComponentAuthentication.query.filter_by(component_id=component.id).first().id
remove_component_authentication_method(auth_id)
assert len(ComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)
description = 'Token description 1'
add_token_authentication(component.id, token, description)
auth_id = ComponentAuthentication.query.filter_by(component_id=component.id).first().id
token = secrets.token_hex(32)
description = 'Token description 2'
add_token_authentication(component.id, token, description)
assert len(ComponentAuthentication.query.all()) == 2
remove_component_authentication_method(auth_id)
assert len(ComponentAuthentication.query.all()) == 1
assert len(ComponentAuthentication.query.filter_by(id=auth_id).all()) == 0
def test_remove_component_authentication_method_not_existing(component):
assert len(ComponentAuthentication.query.all()) == 0
token = secrets.token_hex(32)
description = 'Token description'
add_token_authentication(component.id, token, description)
assert len(ComponentAuthentication.query.all()) == 1
auth_id = ComponentAuthentication.query.filter_by(component_id=component.id).first().id
with pytest.raises(AuthenticationMethodDoesNotExistError):
remove_component_authentication_method(auth_id + 1)
assert len(ComponentAuthentication.query.all()) == 1
def test_login_via_component_token(component, component2):
assert len(ComponentAuthentication.query.all()) == 0
token1 = secrets.token_hex(32)
description1 = 'Token description 1'
add_token_authentication(component.id, token1, description1)
token2 = secrets.token_hex(32)
description2 = 'Token description 2'
add_token_authentication(component2.id, token2, description2)
token3 = secrets.token_hex(32)
description3 = 'Token description 3'
add_token_authentication(component.id, token3, description3)
assert len(ComponentAuthentication.query.all()) == 3
assert logic.components.Component.from_database(login_via_component_token(token3)) == component
def test_login_via_component_token_invalid_token(component, component2):
assert len(ComponentAuthentication.query.all()) == 0
token1 = secrets.token_hex(32)
description1 = 'Token description 1'
add_token_authentication(component.id, token1, description1)
token2 = secrets.token_hex(32)
description2 = 'Token description 2'
add_token_authentication(component2.id, token2, description2)
token3 = secrets.token_hex(32)
description3 = 'Token description 3'
add_token_authentication(component.id, token3, description3)
assert len(ComponentAuthentication.query.all()) == 3
token4 = secrets.token_hex(32)
assert login_via_component_token(token4) is None
|
# -*- coding: cp1252 -*-
#!/usr/bin/tclsh
################################################################################
# #
# Copyright 1997 - 2019 by IXIA Keysight #
# All Rights Reserved. #
# #
################################################################################
################################################################################
# #
# LEGAL NOTICE: #
# ============== #
# The following code and documentation (hereinafter "the script") is an #
# example script for demonstration purposes only. #
# The script is not a standard commercial product offered by Ixia and have #
# been developed and is being provided for use only as indicated herein. The #
# script [and all modifications enhancements and updates thereto (whether #
# made by Ixia and/or by the user and/or by a third party)] shall at all times #
# remain the property of Ixia. #
# #
# Ixia does not warrant (i) that the functions contained in the script will #
# meet the users requirements or (ii) that the script will be without #
# omissions or error-free. #
# THE SCRIPT IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND AND IXIA #
# DISCLAIMS ALL WARRANTIES EXPRESS IMPLIED STATUTORY OR OTHERWISE #
# INCLUDING BUT NOT LIMITED TO ANY WARRANTY OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE OR OF NON-INFRINGEMENT. #
# THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE SCRIPT IS WITH THE #
# USER. #
# IN NO EVENT SHALL IXIA BE LIABLE FOR ANY DAMAGES RESULTING FROM OR ARISING #
# OUT OF THE USE OF OR THE INABILITY TO USE THE SCRIPT OR ANY PART THEREOF #
# INCLUDING BUT NOT LIMITED TO ANY LOST PROFITS LOST BUSINESS LOST OR #
# DAMAGED DATA OR SOFTWARE OR ANY INDIRECT INCIDENTAL PUNITIVE OR #
# CONSEQUENTIAL DAMAGES EVEN IF IXIA HAS BEEN ADVISED OF THE POSSIBILITY OF #
# SUCH DAMAGES IN ADVANCE. #
# Ixia will not be required to provide any software maintenance or support #
# services of any kind (e.g. any error corrections) in connection with the #
# script or any part thereof. The user acknowledges that although Ixia may #
# from time to time and in its sole discretion provide maintenance or support #
# services for the script any such services are subject to the warranty and #
# damages limitations set forth herein and will not obligate Ixia to provide #
# any additional maintenance or support services. #
# #
################################################################################
################################################################################
# #
# Description: #
# This script intends to demonstrate how to use NGPF OpenFlow Controller API#
# It will create 1 topology of OpenFlow Controller, it will start the
# emulation and then it will retrieve and display few statistics
# It will also check detailed learned info and learned info after sending on#
# demand message #
################################################################################
import sys
import time
################################################################################
# Either feed the ixNetwork library path in the sys.path as below, or put the #
# IxNetwork.py file somewhere else where we python can autoload it #
# "IxNetwork.py" is available in <IxNetwork_installer_path>\API\Python #
################################################################################
sys.path.append('C:\Program Files (x86)\Ixia\IxNetwork\8.10-EA\API\Python')
import IxNetwork
print("loaded successfully")
#from lib import IxNetwork
#import time
class NgpfOpenFlowSwitch(object):
################################################################################
# Connecting to IxTCl server and cretaing new config #
################################################################################
def __init__(self, ix_tcl_server, ix_tcl_port, ix_version="8.10"):
ixNet = IxNetwork.IxNet()
print("connecting to IxNetwork client")
ixNet.connect(ix_tcl_server, '-port', ix_tcl_port, '-version', ix_version,
'-setAttribute', 'strict')
# cleaning up the old configfile, and creating an empty config
print("cleaning up the old configfile, and creating an empty config")
ixNet.execute('newConfig')
self.ixNet = ixNet
self.root = ixNet.getRoot()
def assignPorts(self, realPort1):
chassis1 = realPort1[0]
card1 = realPort1[1]
port1 = realPort1[2]
root = self.ixNet.getRoot()
vport1 = self.ixNet.add(root, 'vport')
self.ixNet.commit()
vport1 = self.ixNet.remapIds(vport1)[0]
chassisObj1 = self.ixNet.add(root + '/availableHardware', 'chassis')
self.ixNet.setAttribute(chassisObj1, '-hostname', chassis1)
self.ixNet.commit()
chassisObj1 = self.ixNet.remapIds(chassisObj1)[0]
cardPortRef1 = chassisObj1 + '/card:%s/port:%s' % (card1, port1)
self.ixNet.setMultiAttribute(vport1, '-connectedTo', cardPortRef1,
'-rxMode', 'captureAndMeasure', '-name', 'Ethernet - 001')
self.ixNet.commit()
################################################################################
# Start protocol and check statistics #
################################################################################
def start_protocol_check_stats(self):
print("Starting protocols and waiting for 45 seconds for protocols to come up")
self.ixNet.execute('startAllProtocols')
time.sleep(45)
print ("Fetching all Protocol Summary Stats\n")
viewPage = '::ixNet::OBJ-/statistics/view:"Protocols Summary"/page'
statcap = self.ixNet.getAttribute(viewPage, '-columnCaptions')
for statValList in self.ixNet.getAttribute(viewPage, '-rowValues'):
for statVal in statValList:
print("***************************************************")
index = 0
for satIndv in statVal:
print("%-30s:%s" % (statcap[index], satIndv))
index = index + 1
print("***************************************************")
print ("Verifying OpenFlow Switch Per Port stats\n")
viewPage = '::ixNet::OBJ-/statistics/view:"OF Switch Per Port"/page'
statcap = self.ixNet.getAttribute(viewPage, '-columnCaptions')
for statValList in self.ixNet.getAttribute(viewPage, '-rowValues'):
for statVal in statValList:
print("***************************************************")
index = 0
for satIndv in statVal:
print("%-30s:%s" % (statcap[index], satIndv))
index = index + 1
print("***************************************************")
def on_the_fly(self, switch_disable_enable):
for i in switch_disable_enable:
ofSwitchActive = self.ixNet.getAttribute(switch_disable_enable, '-active')
swActive = self.ixNet.add(ofSwitchActive, 'overlay')
self.ixNet.setMultiAttribute(swActive, '-value', 'false')
self.ixNet.commit()
globalObj = self.ixNet.getRoot() + '/globals'
topology = globalObj + '/topology'
print ("Applying changes on the fly")
try:
self.ixNet.execute('applyOnTheFly', topology)
except:
print("error in applying on the fly change")
time.sleep(10)
for i in switch_disable_enable:
ofSwitchActive = self.ixNet.getAttribute(switch_disable_enable, '-active')
swActive = self.ixNet.add(ofSwitchActive, 'overlay')
self.ixNet.setMultiAttribute(swActive, '-value', 'true')
self.ixNet.commit()
globalObj = self.ixNet.getRoot() + '/globals'
topology = globalObj + '/topology'
print ("Applying changes on the fly")
try:
self.ixNet.execute('applyOnTheFly', topology)
except:
print("error in applying on the fly change")
time.sleep(10)
def on_the_fly_port_number_ethernetaddress(self, sw_port):
EthernetDestVal = self.ixNet.getAttribute(sw_port, '-etherAddr')
print (EthernetDestVal)
val = self.ixNet.getAttribute(EthernetDestVal, '-values')[0]
print (val)
self.ixNet.setMultiAttribute(EthernetDestVal, '-clearOverlays', 'false')
self.ixNet.commit()
EthernetDestValues = self.ixNet.add(EthernetDestVal, 'singleValue')
self.ixNet.setMultiAttribute(EthernetDestValues, '-value', '56:00:00:00:00:1')
self.ixNet.commit()
time.sleep(20)
PortVal = self.ixNet.getAttribute(sw_port, '-portNumber')
self.ixNet.setMultiAttribute(PortVal, '-clearOverlays', 'false')
self.ixNet.commit()
PortSetValues = self.ixNet.add(PortVal, 'singleValue')
self.ixNet.setMultiAttribute(PortSetValues, '-value', '5677888')
self.ixNet.commit()
globalObj = self.ixNet.getRoot() + '/globals'
topology = globalObj + '/topology'
print ("Applying changes on the fly")
try:
self.ixNet.execute('applyOnTheFly', topology)
except:
print("error in applying on the fly change")
time.sleep(10)
###############################################################################
# print learned info #
###############################################################################
def print_learned_info(self, openFlowSwitch):
self.ixNet.execute('getOFChannelLearnedInfo', openFlowSwitch, '1')
time.sleep(5)
print("Print OFSwitch Learned Info")
linfo = self.ixNet.getList(openFlowSwitch, 'learnedInfo')[0]
linfoList = self.ixNet.getList(linfo, 'table')
print("***************************************************")
for table in linfoList:
tableType = self.ixNet.getAttribute(table, '-type')
print(tableType)
print("=================================================")
columns = self.ixNet.getAttribute(table, '-columns')
print(columns)
values = self.ixNet.getAttribute(table, '-values')
for value in values:
for word in values:
print(word)
time.sleep(15)
self.ixNet.execute('getOFSwitchFlowStatLearnedInfo', openFlowSwitch, '1')
time.sleep(5)
print ("Print OFswitch Flow Learned info")
linfo = self.ixNet.getList(openFlowSwitch, 'learnedInfo')[0]
linfoList = self.ixNet.getList(linfo, 'table')
print("***************************************************")
for table in linfoList:
tableType = self.ixNet.getAttribute(table, '-type')
print(tableType)
print("=================================================")
columns = self.ixNet.getAttribute(table, '-columns')
print(columns)
values = self.ixNet.getAttribute(table, '-values')
for value in values:
for word in values:
print(word)
time.sleep(15)
print ('Stopping protocols')
self.ixNet.execute('stopAllProtocols')
################################################################################
# protocol configuration section #
################################################################################
def main(self):
self.assignPorts(ports[0])
root = self.ixNet.getRoot()
vportTx = self.ixNet.getList(root, 'vport')[0]
print("adding topologies")
self.ixNet.add(root, 'topology', '-vports', vportTx)
self.ixNet.commit()
topologies = self.ixNet.getList(self.ixNet.getRoot(), 'topology')
topo1 = topologies[0]
print ("Adding 2 device groups")
deviceGroup1 = self.ixNet.add(topo1, 'deviceGroup')
self.ixNet.commit()
t1devices = self.ixNet.getList(topo1, 'deviceGroup')
t1dev1 = t1devices[0]
print("Configuring the multipliers (number of sessions)")
self.ixNet.setAttribute(t1dev1, '-multiplier', '1')
self.ixNet.commit()
print("Adding ethernet/mac endpoints")
self.ixNet.add(t1dev1, 'ethernet')
self.ixNet.commit()
mac1 = self.ixNet.getList(t1dev1, 'ethernet')[0]
print ('ixNet.help(\'::ixNet::OBJ-/topology/deviceGroup/ethernet\')')
print("Add ipv4")
self.ixNet.add(mac1, 'ipv4')
self.ixNet.commit()
ip1 = self.ixNet.getList(mac1, 'ipv4')[0]
mvAdd1 = self.ixNet.getAttribute(ip1, '-address')
mvGw1 = self.ixNet.getAttribute(ip1, '-gatewayIp')
print("configuring ipv4 addresses")
self.ixNet.setAttribute(mvAdd1 + '/singleValue', '-value', '1.1.1.2')
self.ixNet.commit()
self.ixNet.setAttribute(mvGw1 + '/singleValue', '-value', '1.1.1.1')
self.ixNet.commit()
self.ixNet.setAttribute(self.ixNet.getAttribute(ip1, '-prefix') + '/singleValue', '-value', '24')
self.ixNet.commit()
self.ixNet.setMultiAttribute(self.ixNet.getAttribute(ip1, '-resolveGateway') + '/singleValue', '-value', 'true')
self.ixNet.commit()
time.sleep(5)
print (self.ixNet.help('::ixNet::OBJ-/topology/deviceGroup/ethernet/ipv4'))
print("Adding Openflow Switch over IP4 stacks")
self.ixNet.add(ip1, 'openFlowSwitch')
self.ixNet.commit()
openFlowSwitch1 = self.ixNet.getList(ip1, 'openFlowSwitch')[0]
print (openFlowSwitch1)
time.sleep(5)
openflowSwitchchannels = self.ixNet.add(openFlowSwitch1, 'OFSwitchChannel')
self.ixNet.commit()
time.sleep(5)
openflowchannellist = self.ixNet.getList(openFlowSwitch1, 'OFSwitchChannel')[0]
self.ixNet.setMultiAttribute(openflowSwitchchannels, '-auxConnectionsPerChannel', '1')
self.ixNet.commit()
time.sleep(5)
#openflowTablelist = self.ixNet.getList(ip1, 'switchTablesList')[0]
self.ixNet.setMultiAttribute(openFlowSwitch1, '-numberOfTableRanges', '3')
self.ixNet.commit()
time.sleep(5)
switchTableList = self.ixNet.getList(openFlowSwitch1, 'switchTablesList')[0]
print (switchTableList)
networkTopologyObj = self.ixNet.add(deviceGroup1, 'networkTopology')
self.ixNet.commit()
networkTopologyObjRing = self.ixNet.add(networkTopologyObj, 'netTopologyRing')
self.ixNet.commit()
self.start_protocol_check_stats()
swtopology = self.ixNet.getList(self.ixNet.getRoot(), 'topology')[0]
print (swtopology)
deviceGroupSW = self.ixNet.getList(swtopology, 'deviceGroup')[0]
ethernetSw = self.ixNet.getList(deviceGroupSW, 'ethernet')[0]
ipv4Sw = self.ixNet.getList(ethernetSw, 'ipv4')[0]
ofSw = self.ixNet.getList(ipv4Sw, 'openFlowSwitch')[0]
print ("Now disable/Enable of switch on the fly")
self.on_the_fly(ofSw)
print ("Changing Ethernet Address, Port number on the fly!!!!!")
swPortActive = self.ixNet.getList(ofSw, 'ofSwitchPorts')[0]
print (swPortActive)
self.on_the_fly_port_number_ethernetaddress(swPortActive)
print ("Fetching Switch Learned info !!!!!")
self.print_learned_info(ofSw)
print ('!!! Test Script Ends !!!')
#################################################################################
# Give chassis/client/ixNetwork server port/ chassis port HW port information #
# below #
#################################################################################
if __name__ == "__main__":
ixTclServer = '10.214.101.141'
ixTclPort = '8558'
ports = [('12.0.1.253', '5', '10',)]
version = '8.10'
switch = NgpfOpenFlowSwitch(ixTclServer, ixTclPort, version)
switch.main()
|
from flask import Flask, request, abort
from state_manager import StateManager
app = Flask(__name__)
@app.route('/calculate', methods=['POST'])
def calc():
try:
print('Server received ' + str(request.json))
resp = StateManager.process_request(request.json)
print('Server returned ' + str(resp))
return resp
except Exception as e:
print(e)
abort(500)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
from __future__ import division, absolute_imports, print_function
"""
openmoltools wrapper for packmol ? https://github.com/choderalab/openmoltools
"""
def oesolvate(solute, density=1.0, padding_distance=10.0,
distance_between_atoms=2.5,
solvents='[H]O[H]', molar_fractions='1.0',
geometry='box', close_solvent=True,
salt='[Na+], [Cl-]', salt_concentration=0.0,
neutralize_solute=True, verbose=False, **kargs):
"""
This function solvates the passed solute in a cubic box or a sphere by using Packmol. Packmol
creates an initial point for molecular dynamics simulations by packing molecule in defined regions
of space. For additional info:
http://www.ime.unicamp.br/~martinez/packmol/home.shtml
The geometry volume is estimated by the using the padding parameter and the solute size.
The number of solvent molecules is calculated by using the specified density and volume.
Solvent molecules are specified as comma separated smiles strings. The molar fractions
of each solvent molecule are specified in a similar fashion. By default if the solute is
charged counter ions are added to neutralize it
Parameters:
-----------
solute: OEMol molecule
The solute to solvate
density: float
The solution density in g/ml
padding_distance: float
The largest dimension of the solute (along the x, y, or z axis) is determined (in A),
and a cubic box of size (largest dimension)+2*padding is used
distance_between_atoms: float
The minimum distance between atoms in A
solvents: python string
A comma separated smiles string of the solvent molecules
molar_fractions: python string
A comma separated molar fraction string of the solvent molecules
close_solvent: boolean
If True solvent molecules will be placed very close to the solute
salt: python string
A comma separated string of the dissociated salt in solution
salt_concentration: float
Salt concentration in millimolar
neutralize_solute: boolean
If True counter-ions will be added to the solution to neutralize the solute
Return:
-------
oe_mol: OEMol
The solvated system. If the selected geometry is a box a SD tag with
name 'box_vector' is attached the output molecule containing
the system box vectors
"""
|
saisie_chiffre = input("Saisir le chiffre1:")
chiffre1 = int(saisie_chiffre)
saisie_chiffre = input("Saisir le chiffre2:")
chiffre2 = int(saisie_chiffre)
def factoriel(chiffre):
resultat = 1
liste = range(chiffre,0,-1)
liste = list(liste)
#print(liste)
for elem in liste:
resultat = resultat * elem
print("factoriel de ",chiffre, "=", resultat)
factoriel(chiffre1)
factoriel(chiffre2)
factoriel(20)
factoriel(58)
|
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, '../preprocessing')
import csv
import json
import os
from tweet import sanitize
with open('sanders/corpus.csv', 'r') as corpus:
reader = csv.reader(corpus, delimiter=',', quotechar='"')
with open('sanders/tweets.txt', 'w') as tweets:
with open('sanders/classification.txt', 'w') as classification:
for row in reader:
if row[1] == 'irrelevant':
continue
filename = row[-1]
if os.path.isfile('sanders/rawdata/{0}.json'.format(filename)):
with open('sanders/rawdata/{0}.json'.format(filename), 'r') as tweet_file:
tweet = json.load(tweet_file)
print(tweet['text'].replace('\n', ' ').replace('\r', ''), file=tweets)
print(row[1], file=classification)
tweets = []
with open('sanders/tweets.txt', 'r') as file_tweets:
for line in file_tweets:
tweets.append(sanitize(line))
with open('sanders/tweets.txt', 'w') as file_tweets:
for i, tweet in enumerate(tweets):
file_tweets.write(tweet + "\n")
|
from numbers import Real
class ProbDict(dict):
"""A dictionary serving as a probability measure."""
def __init__(self, items = None):
"""Create a dictionary from iters.
If items can't be fed to a dictionary, it will be interpreted as a
collection of keys, and each value will default to value 1/n. Otherwise,
the values are normalized to sum to one. Raises ValueError if
some values are not numbers or are negative.
Arguments:
- `items`: argument with which to make dictionary
"""
if items is None: return dict.__init__(self)
try:
# can fail if items is not iterable or not full of size 2 items:
dict.__init__(self, items)
except TypeError:
try:
# Let's assume items is a finite iterable full of keys
vals = [1/len(items)] * len(items)
except TypeError:
# Apparently items has no length -- let's take it as the only
# key and put all the probability on it.
dict.__init__(self, (items, 1))
else:
# if items has a length, it can be iterated through with zip
dict.__init__(self, zip(items, vals))
else:
# we've successfully made dic from key, value pairs in items, now let's
# normalize the dictionary, and check the values
for v in self.values():
if not isinstance(v, Real):
raise TypeError("Values must be nonnegative real numbers so I " +
"can properly normalize them. " + str(v) + " is not.")
elif v < 0:
raise ValueError("Values must be nonnegative, unlike " + str(v))
tot = sum(self.values())
for k, v in self.items(): self[k] = v/tot
def __setitem__(self, key, value):
# Overridden to make sure dict is normalized.
if not isinstance(value, Real) or value < 0 or value > 1:
raise ValueError("Value must be a number between 0 and 1, unlike " +
str(i))
try:
r = (self[key] - value)/(1 - self[key])
# r is the fraction of the remaining probability mass that
# we're going to give up (take).
for k in filter(key.__ne__, self):
dict.__setitem__(self, k, self[k] * (1 + r))
value = value if len(self) != 1 else 1
if value:
dict.__setitem__(self, key, value)
else:
# This is the purging stage!
dict.__delitem__(self, key)
except ZeroDivisionError:
# self[key] = 1, so key has all the probability mass. We'll leave it
# as is, since there's no sensible way of reducing it.
pass
def __delitem__(self, key):
# Deleting frees up probability mass!
self[key] = 0
# Note that __setitem__ handles the deletion for us.
def __missing__(self, key):
# Accessing an inexistent key gives 0 rather than error, but
# does not create key, val pair (unlike defaultdict)
return 0
|
from FreeTAKServer.model.ExCheck.Checklists.checklistTask import checklistTask
class checklistTasks:
def __init__(self):
self.checklistTask = []
self.__count = 0
def setchecklistTask(self, checklistTaskobj):
if isinstance(checklistTaskobj, checklistTask):
self.checklistTask.append(checklistTaskobj)
else:
raise TypeError('unsupported type')
def getchecklistTask(self):
obj = self.checklistTask[self.__count]
self.__count += 1
return obj
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-18 18:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('team', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='team',
name='paint_points',
field=models.DecimalField(decimal_places=1, max_digits=5, null=True),
),
]
|
import requests, re
from bs4 import BeautifulSoup
from Model import Project, UsersProject
from tool import log as _l
log = _l("PARSER", "parser.log")
def write_db(f):
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
mutation = UsersProject.custom_insert(result)
return mutation
return wrapper
@write_db
def get_projects():
url = "https://kwork.ru/projects"
headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) \
Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)'}
projects = []
response = requests.get(url, headers=headers)
log.info( "Status code: %d. Length: %d " % ( response.status_code, len( response.text ) ) )
html = response.text
soup = BeautifulSoup( html, "html.parser" )
items = soup.select("div[class*=js-card]")
for item in items[:]:
title = item.select_one("div[class*=header-title]")
title = title.text if title else "Error title"
price = item.select_one("div.wants-card__right")
price = re.findall( r"\d{3}|\d{1,2}\s\d{3}", str(price) )
price = " - ".join(price)
description = item.select_one("div.breakwords.hidden")
description = description.text.replace("Скрыть","").strip() if description else "Description error"
if description == "Description error":
description = item.select_one("div.breakwords.first-letter ~ div")
description = description.text if description else "Description error2"
# import pdb;pdb.set_trace()
proposal_count = item.find(lambda tag:tag.name == "span" and "Предложений:" in tag.text)
proposal_count = re.findall(r"\d+", proposal_count.text)[0] if proposal_count else "Prop error"
author = item.select_one("a.v-align-t")
author = author.text if author else "Author error"
link = item.select_one("div.wants-card__header-title a")
link = link['href'] if link else "Link error"
timer = item.find( lambda tag:tag.name == "span" and "Осталось" in tag.text)
timer = timer.text if timer else "timer error"
params = (title, description, author, proposal_count,
price, timer, link)
project = Project( *params )
projects.append( project )
return projects
def main():
res = get_projects()
# import pdb;pdb.set_trace()
if __name__ == '__main__':
main()
|
import keras
import logging
import numpy as np
import tensorflow as tf
from collections import OrderedDict, deque
from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply
from keras.models import Model, load_model
import os
import sys
sys.path.append('../utils/')
sys.path.append('../metrics/')
from file_operation import write_result_to_trec_format, load_pickle, retain_file
from evaluations import evaluate_trec
from model import BasicModel, NBatchLogger
from nprf_knrm_config import NPRFKNRMConfig
from nprf_knrm_pair_generator import NPRFKNRMPairGenerator
from relevance_info import Relevance
from result import Result
from rank_losses import rank_hinge_loss
class NPRFKNRM(BasicModel):
def __init__(self, config):
super(NPRFKNRM, self).__init__(config)
self.initializer_gate = keras.initializers.RandomUniform(minval=-0.01, maxval=0.01, seed=118)
def build(self):
# qd_input = Input((self.config.kernel_size,), name="qd_input")
dd_input = Input((self.config.nb_supervised_doc, self.config.kernel_size), name='dd_input')
# z = Dense(self.config.hidden_size, activation='tanh', name="qd_hidden")(qd_input)
# qd_out = Dense(self.config.out_size, name="qd_out")(z)
z = Dense(self.config.hidden_size, activation='tanh', name="dd_hidden")(dd_input)
dd_init_out = Dense(self.config.out_size, name='dd_init_out')(z)
dd_gate = Input((self.config.nb_supervised_doc, 1), name='baseline_doc_score')
dd_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_gate')(dd_gate)
# dd_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config.nb_supervised_doc,), name='dd_softmax')(dd_w)
dd_w = Reshape((self.config.nb_supervised_doc,))(dd_w)
dd_init_out = Reshape((self.config.nb_supervised_doc,))(dd_init_out)
if self.config.method in [1, 3]: # no doc gating, with dense layer
z = dd_init_out
elif self.config.method == 2:
logging.info("Apply doc gating")
z = Multiply(name='dd_out')([dd_init_out, dd_w])
else:
raise ValueError("Method not initialized, please check config file")
if self.config.method in [1, 2]:
logging.info("Dense layer on top")
z = Dense(self.config.merge_hidden, activation='tanh', name='merge_hidden')(z)
out = Dense(self.config.merge_out, name='score')(z)
else:
logging.info("Apply doc gating, No dense layer on top, sum up scores")
out = Dot(axes=[1, 1], name='score')([z, dd_w])
model = Model(inputs=[dd_input, dd_gate], outputs=[out])
print(model.summary())
return model
def train_wrapper(self, fold, output_file,):
pair_generator = NPRFKNRMPairGenerator(**self.config.generator_params)
model = self.build()
# adagrad
model.compile(optimizer=self.config.optimizer, loss=rank_hinge_loss)
eval_met = self.train(model, pair_generator, fold, output_file, use_nprf=True)
return eval_met
def eval_by_qid_list_helper(self, qid_list, pair_generator):
relevance_dict = load_pickle(self.config.relevance_dict_path)
qid_list = sorted(qid_list)
qualified_qid_list = []
res_dict = OrderedDict()
for qid in qid_list:
relevance = relevance_dict.get(qid)
supervised_docid_list = relevance.get_supervised_docid_list()
if len(supervised_docid_list) < self.config.nb_supervised_doc:
# cannot construct d2d feature, thus not need to be update
score_list = relevance.get_supervised_score_list()
res = Result(qid, supervised_docid_list, score_list, self.config.runid)
res_dict.update({qid: res})
logging.warn("query {0} not to be rerank".format(qid))
else:
qualified_qid_list.append(qid)
# generate re rank features
dd_d, score_gate, len_indicator = \
pair_generator.generate_list_batch(qualified_qid_list, self.config.rerank_topk)
return [dd_d, score_gate], len_indicator, res_dict, qualified_qid_list
def eval_by_qid_list(self, X, len_indicator, res_dict, qualified_qid_list, model,
relevance_dict, rerank_topk, nb_supervised_doc, doc_topk_term, qrels_file,
docnolist_file, runid, output_file, ):
# qd_d, dd_d, score_gate = X
# dd_q, dd_d = list(map(lambda x: x[:, :nb_supervised_doc, : doc_topk_term, :], [dd_q, dd_d]))
topk_score_all = model.predict_on_batch(X)
topk_score_all = topk_score_all.flatten()
for i, qid in enumerate(qualified_qid_list):
relevance = relevance_dict.get(qid)
supervised_docid_list = relevance.get_supervised_docid_list()
topk_score = topk_score_all[sum(len_indicator[:i]): sum(len_indicator[:i]) + len_indicator[i]]
if len(supervised_docid_list) <= rerank_topk:
score_list = topk_score
else:
behind_score = np.min(topk_score) - 0.001 - np.sort(np.random.random((len(supervised_docid_list) - rerank_topk,)))
score_list = np.concatenate((topk_score, behind_score))
res = Result(qid, supervised_docid_list, score_list, runid)
res.update_ranking()
res_dict.update({qid: res})
# print "generate score {0}".format(time.time()-t)
write_result_to_trec_format(res_dict, output_file, docnolist_file)
met = evaluate_trec(qrels_file, output_file)
return met
if __name__ == '__main__':
conf = NPRFKNRMConfig()
ddmknrm = NPRFKNRM(conf)
# ddm.build()
# ddm.build2()
argv = sys.argv
phase = argv[1]
if phase == '--fold':
fold = int(argv[2])
temp = argv[3]
else:
fold = 1
temp = 'temp'
ddmknrm.train_wrapper(fold, temp)
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '02&secf=h$l-1guh_vo*wf2uj2uy8)#tm+v9y^w!gz%jd13fs-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop.apps.ShopConfig',
'cart.apps.CartConfig',
'orders.apps.OrdersConfig',
'payment.apps.PaymentConfig',
'coupons.apps.CouponsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cart.context_processors.cart'
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en','English'),
('es','Spanish'),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CART_SESSION_ID = 'cart'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB =1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media/')
EMAIL_HOST = 'smtp.gmail.com'
# EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_USER = "mahmudul15-5809@diu.edu.bd"
# EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
EMAIL_HOST_PASSWORD = "152-15-5809"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Braintree Settings
BRAINTREE_MERCHANT_ID = '8sv9k5gmhs3d75t5' #Merchant ID
BRAINTREE_PUBLIC_KEY = 'xs35v7hx4c6wgfjw' #Public Key
BRAINTREE_PRIVATE_KEY = '5eb47bb7489bcde474403a83075252a7'#Private key
from braintree import Configuration,Environment
Configuration.configure(
Environment.Sandbox,
BRAINTREE_MERCHANT_ID,
BRAINTREE_PUBLIC_KEY,
BRAINTREE_PRIVATE_KEY
)
|
from django.urls import path
from .views import create_post, view_post, edit_post, delete_post
app_name = 'post'
urlpatterns = [
path('create', create_post, name='create'),
path('<uuid:pk>', view_post, name='view_post'),
path('<uuid:pk>/edit', edit_post, name='edit_post'),
path('<uuid:pk>/delete', delete_post, name='delete_post')
]
|
from typing import List, Set, Tuple
import libcst as cst
from libcst import MetadataWrapper
from libcst.metadata import CodePosition, CodeRange, PositionProvider
def is_whitespace_node(node: cst.CSTNode) -> bool:
return isinstance(
node, (cst.BaseParenthesizableWhitespace, cst.EmptyLine, cst.TrailingWhitespace, cst.MaybeSentinel, cst.Newline)
)
def code_position_leq(a: CodePosition, b: CodePosition) -> bool:
"""
Are is the position `a` before (less or equal) `b`.
"""
if a.line < b.line:
return True
elif a.line == b.line and a.column <= b.column:
return True
return False
def code_position_within_range(pos: CodePosition, target_range: CodeRange) -> bool:
"""Is `pos` within `target_range`?"""
return code_position_leq(target_range.start, pos) and code_position_leq(pos, target_range.end)
def code_ranges_overlap(a: CodeRange, b: CodeRange) -> bool:
"""Does a overlap with b? (symmetric)"""
return (
code_position_within_range(a.start, b)
or code_position_within_range(a.end, b)
or code_position_within_range(b.start, a)
)
def subsumes_code_range(smaller: CodeRange, bigger: CodeRange) -> bool:
"""
Is the `smaller` node_range entirely within the `bigger` node_range?
"""
return code_position_within_range(smaller.start, bigger) and code_position_within_range(smaller.end, bigger)
def relative_range(base: CodeRange, target: CodeRange) -> CodeRange:
"""Return the range, relative to the base range."""
assert subsumes_code_range(target, base), "Target range not inside base range."
relative_start_line_no = target.start.line - base.start.line + 1
if relative_start_line_no == 1:
relative_start_col_no = target.start.column - base.start.column
else:
relative_start_col_no = target.start.column
relative_end_line_no = relative_start_line_no + target.end.line - target.start.line
if relative_end_line_no == 1:
relative_end_col_no = target.end.column - base.start.column
else:
relative_end_col_no = target.end.column
return CodeRange((relative_start_line_no, relative_start_col_no), (relative_end_line_no, relative_end_col_no))
class PersistentMetadataWrapper(MetadataWrapper):
"""The original MetadataWrapper keeps the metadata per-visit.
This class makes this persistent across multiple visits.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metadata_cache = {}
def resolve(self, provider):
if provider in self._metadata_cache:
return self._metadata_cache[provider]
else:
out = super().resolve(provider)
self._metadata_cache[provider] = out
return out
class PositionFilter(cst.CSTVisitor):
"""Collect all nodes that are within the given target node_range."""
METADATA_DEPENDENCIES = (PositionProvider,)
def __init__(self, target_range: CodeRange):
super().__init__()
self.__target_location = target_range
self.nodes_within_range: Set[cst.CSTNode] = set()
def on_visit(self, node: cst.CSTNode) -> bool:
pos: CodeRange = self.get_metadata(PositionProvider, node)
if subsumes_code_range(pos, self.__target_location):
self.nodes_within_range.add(node)
return code_ranges_overlap(pos, self.__target_location)
class AllFunctionFinder(cst.CSTVisitor):
METADATA_DEPENDENCIES = (PositionProvider,)
def __init__(self):
super().__init__()
self.all_function_nodes: List[Tuple[cst.CSTNode, CodeRange]] = []
def visit_FunctionDef(self, node: cst.FunctionDef) -> bool:
pos: CodeRange = self.get_metadata(PositionProvider, node)
self.all_function_nodes.append((node, pos))
return False
|
"""
APNS Notification platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.apns/
"""
import logging
import os
import voluptuous as vol
from homeassistant.helpers.event import track_state_change
from homeassistant.config import load_yaml_config_file
from homeassistant.components.notify import (
ATTR_TARGET, ATTR_DATA, BaseNotificationService, DOMAIN)
from homeassistant.const import CONF_NAME, CONF_PLATFORM
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import template as template_helper
REQUIREMENTS = ['apns2==0.1.1']
APNS_DEVICES = 'apns.yaml'
CONF_CERTFILE = 'cert_file'
CONF_TOPIC = 'topic'
CONF_SANDBOX = 'sandbox'
DEVICE_TRACKER_DOMAIN = 'device_tracker'
SERVICE_REGISTER = 'apns_register'
ATTR_PUSH_ID = 'push_id'
ATTR_NAME = 'name'
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'apns',
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_CERTFILE): cv.isfile,
vol.Required(CONF_TOPIC): cv.string,
vol.Optional(CONF_SANDBOX, default=False): cv.boolean,
})
REGISTER_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_PUSH_ID): cv.string,
vol.Optional(ATTR_NAME, default=None): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Return push service."""
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
name = config.get(CONF_NAME)
cert_file = config.get(CONF_CERTFILE)
topic = config.get(CONF_TOPIC)
sandbox = config.get(CONF_SANDBOX)
service = ApnsNotificationService(hass, name, topic, sandbox, cert_file)
hass.services.register(
DOMAIN, 'apns_{}'.format(name), service.register,
descriptions.get(SERVICE_REGISTER), schema=REGISTER_SERVICE_SCHEMA)
return service
class ApnsDevice(object):
"""
The APNS Device class.
Stores information about a device that is registered for push
notifications.
"""
def __init__(self, push_id, name, tracking_device_id=None, disabled=False):
"""Initialize Apns Device."""
self.device_push_id = push_id
self.device_name = name
self.tracking_id = tracking_device_id
self.device_disabled = disabled
@property
def push_id(self):
"""Return the APNS id for the device."""
return self.device_push_id
@property
def name(self):
"""Return the friendly name for the device."""
return self.device_name
@property
def tracking_device_id(self):
"""
Return the device Id.
The id of a device that is tracked by the device
tracking component.
"""
return self.tracking_id
@property
def full_tracking_device_id(self):
"""
Return the fully qualified device id.
The full id of a device that is tracked by the device
tracking component.
"""
return '{}.{}'.format(DEVICE_TRACKER_DOMAIN, self.tracking_id)
@property
def disabled(self):
"""Return the ."""
return self.device_disabled
def disable(self):
"""Disable the device from recieving notifications."""
self.device_disabled = True
def __eq__(self, other):
"""Return the comparision."""
if isinstance(other, self.__class__):
return self.push_id == other.push_id and self.name == other.name
return NotImplemented
def __ne__(self, other):
"""Return the comparision."""
return not self.__eq__(other)
def _write_device(out, device):
"""Write a single device to file."""
attributes = []
if device.name is not None:
attributes.append(
'name: {}'.format(device.name))
if device.tracking_device_id is not None:
attributes.append(
'tracking_device_id: {}'.format(device.tracking_device_id))
if device.disabled:
attributes.append('disabled: True')
out.write(device.push_id)
out.write(": {")
if attributes:
separator = ", "
out.write(separator.join(attributes))
out.write("}\n")
class ApnsNotificationService(BaseNotificationService):
"""Implement the notification service for the APNS service."""
def __init__(self, hass, app_name, topic, sandbox, cert_file):
"""Initialize APNS application."""
self.hass = hass
self.app_name = app_name
self.sandbox = sandbox
self.certificate = cert_file
self.yaml_path = hass.config.path(app_name + '_' + APNS_DEVICES)
self.devices = {}
self.device_states = {}
self.topic = topic
if os.path.isfile(self.yaml_path):
self.devices = {
str(key): ApnsDevice(
str(key),
value.get('name'),
value.get('tracking_device_id'),
value.get('disabled', False)
)
for (key, value) in
load_yaml_config_file(self.yaml_path).items()
}
tracking_ids = [
device.full_tracking_device_id
for (key, device) in self.devices.items()
if device.tracking_device_id is not None
]
track_state_change(
hass, tracking_ids, self.device_state_changed_listener)
def device_state_changed_listener(self, entity_id, from_s, to_s):
"""
Listen for sate change.
Track device state change if a device has a tracking id specified.
"""
self.device_states[entity_id] = str(to_s.state)
def write_devices(self):
"""Write all known devices to file."""
with open(self.yaml_path, 'w+') as out:
for _, device in self.devices.items():
_write_device(out, device)
def register(self, call):
"""Register a device to receive push messages."""
push_id = call.data.get(ATTR_PUSH_ID)
device_name = call.data.get(ATTR_NAME)
current_device = self.devices.get(push_id)
current_tracking_id = None if current_device is None \
else current_device.tracking_device_id
device = ApnsDevice(push_id, device_name, current_tracking_id)
if current_device is None:
self.devices[push_id] = device
with open(self.yaml_path, 'a') as out:
_write_device(out, device)
return True
if device != current_device:
self.devices[push_id] = device
self.write_devices()
return True
def send_message(self, message=None, **kwargs):
"""Send push message to registered devices."""
from apns2.client import APNsClient
from apns2.payload import Payload
from apns2.errors import Unregistered
apns = APNsClient(
self.certificate,
use_sandbox=self.sandbox,
use_alternative_port=False)
device_state = kwargs.get(ATTR_TARGET)
message_data = kwargs.get(ATTR_DATA)
if message_data is None:
message_data = {}
if isinstance(message, str):
rendered_message = message
elif isinstance(message, template_helper.Template):
rendered_message = message.render()
else:
rendered_message = ''
payload = Payload(
alert=rendered_message,
badge=message_data.get('badge'),
sound=message_data.get('sound'),
category=message_data.get('category'),
custom=message_data.get('custom', {}),
content_available=message_data.get('content_available', False))
device_update = False
for push_id, device in self.devices.items():
if not device.disabled:
state = None
if device.tracking_device_id is not None:
state = self.device_states.get(
device.full_tracking_device_id)
if device_state is None or state == str(device_state):
try:
apns.send_notification(
push_id, payload, topic=self.topic)
except Unregistered:
logging.error("Device %s has unregistered", push_id)
device_update = True
device.disable()
if device_update:
self.write_devices()
return True
|
from functools import partial
from pipe import select, where
from pydash import chunk
from pydash import filter_ as filter
from pydash import flatten, get, omit
from .primitives import parallel, pipeline, scatter
__all__ = (
"parallel",
"scatter",
"pipeline",
"partial",
"select",
"where",
"flatten",
"chunk",
"omit",
"get",
"filter",
)
|
"""
Crop box
========
Crop atoms within a box or, alternatively, the inverse.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import base
from . import _filtering
class CropBoxFilterSettings(base.BaseSettings):
"""
Settings for the crop box filter
"""
def __init__(self):
super(CropBoxFilterSettings, self).__init__()
self.registerSetting("invertSelection", default=False)
self.registerSetting("xEnabled", default=False)
self.registerSetting("yEnabled", default=False)
self.registerSetting("zEnabled", default=False)
self.registerSetting("xmin", default=0.0)
self.registerSetting("xmax", default=10.0)
self.registerSetting("ymin", default=0.0)
self.registerSetting("ymax", default=10.0)
self.registerSetting("zmin", default=0.0)
self.registerSetting("zmax", default=10.0)
class CropBoxFilter(base.BaseFilter):
"""
Crop box filter.
"""
def apply(self, filterInput, settings):
"""Apply the filter."""
# unpack inputs
defectFilterSelected = filterInput.defectFilterSelected
# settings
xmin = settings.getSetting("xmin")
xmax = settings.getSetting("xmax")
ymin = settings.getSetting("ymin")
ymax = settings.getSetting("ymax")
zmin = settings.getSetting("zmin")
zmax = settings.getSetting("zmax")
xEnabled = int(settings.getSetting("xEnabled"))
yEnabled = int(settings.getSetting("yEnabled"))
zEnabled = int(settings.getSetting("zEnabled"))
invertSelection = int(settings.getSetting("invertSelection"))
# are we cropping defects or atoms...
if defectFilterSelected:
self.logger.debug("Cropping defects")
# unpack inputs
inp = filterInput.inputState
ref = filterInput.refState
interstitials = filterInput.interstitials
vacancies = filterInput.vacancies
antisites = filterInput.antisites
onAntisites = filterInput.onAntisites
splitInterstitials = filterInput.splitInterstitials
# call C library
result = _filtering.cropDefectsFilter(interstitials, vacancies, antisites, onAntisites, splitInterstitials, inp.pos,
ref.pos, xmin, xmax, ymin, ymax, zmin, zmax, xEnabled, yEnabled, zEnabled,
invertSelection)
# unpack
NInt, NVac, NAnt, NSplit = result
vacancies.resize(NVac, refcheck=False)
interstitials.resize(NInt, refcheck=False)
antisites.resize(NAnt, refcheck=False)
onAntisites.resize(NAnt, refcheck=False)
splitInterstitials.resize(NSplit * 3, refcheck=False)
else:
self.logger.debug("Cropping atoms")
# unpack inputs
lattice = filterInput.inputState
visibleAtoms = filterInput.visibleAtoms
NScalars = filterInput.NScalars
fullScalars = filterInput.fullScalars
NVectors = filterInput.NVectors
fullVectors = filterInput.fullVectors
# call C library
NVisible = _filtering.cropFilter(visibleAtoms, lattice.pos, xmin, xmax, ymin, ymax, zmin, zmax, xEnabled,
yEnabled, zEnabled, invertSelection, NScalars, fullScalars, NVectors, fullVectors)
# resize visible atoms
visibleAtoms.resize(NVisible, refcheck=False)
# result
result = base.FilterResult()
return result
|
"""
Check that bitstream doesn't change between versions
"""
import cfg
from TestSuite import runTests, SummaryType, TestUtils as TU
import re
import operator as op
def main():
seqs = cfg.sequences
seq_names = cfg.class_sequence_names
base_v = 5
new_v = base_v + 1
outname = f"kvz_conf_check_v{new_v}"
bin_vers = [f"kvz_v{base_v}", f"kvz_v{new_v}"]
shared_param = ("--preset", "ultrafast")
depth = [tuple(), ("--pu-depth-inter", "1-2", "--pu-depth-intra", "2-3")]
gop = [tuple(), ("--gop", "0"), ("--gop", "8")]
def has_bipred(b):
return (lambda _, t: ("bi" in t) == b)
tpg = TU.TestParameterGroup()
tpg.add_const_param(input_names = seq_names,
layer_args = shared_param,
inputs = seqs,
validate = False,
version = new_v) \
.add_param_set(_bin_name = bin_vers, _depth = depth, _gop = gop)
tpg.set_param_group_transformer(TU.transformerFactory(
bin_name = lambda *, _bin_name, **_: cfg.bin_path + _bin_name + ".exe",
test_name = lambda *, _bin_name, _depth, _gop, **_: _bin_name + ("_depth" if _depth else "") + "_gop" + (_gop[1] if _gop else ""),
layer_args = lambda *, layer_args, _depth, _gop, **_: ((layer_args + _depth + _gop),)
))
tests = tpg.to_kvz_test_instance()
summary = [TU.make_BDBRMatrix_definition(TU.get_test_names(tests), write_bits = False, write_psnr = False),]
summary.append(
TU.make_AnchorList_multiAnchor_definition(TU.get_test_names(tests),
lambda test: [a for a in TU.get_test_names(tests) if a.split('_')[2:] == test.split('_')[2:] and a.split('_')[1] != test.split('_')[1]],
lambda test: f"v{new_v}" in test
)
)
runTests(tests, outname, *summary)
if __name__ == "__main__":
print("Execute test file " + __file__)
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch import Tensor
import numpy as np
from collections import OrderedDict
class RoundQuant(torch.autograd.Function):
@staticmethod
def forward(ctx, input, n_lvs):
return input.mul(n_lvs-1).round_().div_(n_lvs-1)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
def Quantizer(weight, bit):
a = torch.pow(2, torch.tensor([bit]))
def softmax_init(bits):
degree = 4
theta = (bits ** degree)/(bits ** degree).sum
return theta
"""
@inproceedings{
esser2020learned,
title={LEARNED STEP SIZE QUANTIZATION},
author={Steven K. Esser and Jeffrey L. McKinstry and Deepika Bablani and Rathinakumar Appuswamy and Dharmendra S. Modha},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=rkgO66VKDS}
}
"""
def grad_scale(x, scale):
yOut = x
yGrad = x * scale
return (yOut-yGrad).detach() + yGrad
class Q_ReLU(nn.Module):
def __init__(self, act_func=True, inplace=False):
super(Q_ReLU, self).__init__()
self.n_lvs = [1]
self.bits = [32]
self.act_func = act_func
self.inplace = inplace
self.a = Parameter(Tensor(1))
self.theta = Parameter(Tensor([1]))
self.tau = 1
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=False)
self.n_lvs = 2 ** self.bits
self.a = Parameter(Tensor(len(self.bits)))
#self.theta = Parameter(torch.ones(len(self.bits))/len(self.bits))
self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
def initialize_qonly(self, offset, diff):
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if self.act_func:
x = F.relu(x, self.inplace)
if len(self.bits)==1 and self.bits[0]==32:
#print("Q_ReLU")
return x#, 32
else:
a = F.softplus(self.a)
if self.train:
softmask = F.gumbel_softmax(self.theta, tau=self.tau, hard=False, dim=0)
else:
softmask = F.softmax(self.theta/self.tau, dim=0)
#x_bar = torch.zeros_like(x)
'''
for i, n_lv in enumerate(self.n_lvs):
x_temp = F.hardtanh(x / a[i], 0, 1)
x_bar = torch.add(x_bar, RoundQuant.apply(x_temp, n_lv) * c[i] * softmask[i])
'''
a_mean = (softmask * a).sum()
n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()
x = F.hardtanh(x / a_mean, 0, 1)
x_bar = RoundQuant.apply(x, n_lv_mean) * a_mean
#act_size = (softmask * self.bits).sum()
return x_bar#, act_size
class Q_ReLU6(Q_ReLU):
def __init__(self, act_func=True, inplace=False):
super(Q_ReLU6, self).__init__(act_func, inplace)
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=False)
self.n_lvs = 2 ** self.bits
self.a = Parameter(Tensor(len(self.bits)))
self.theta = Parameter(torch.ones(len(self.n_lvs))/len(self.n_lvs))
self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))
if offset + diff > 6:
self.a.data.fill_(np.log(np.exp(6)-1))
else:
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
def initialize_qonly(self, offset, diff):
if offset + diff > 6:
self.a.data.fill_(np.log(np.exp(6)-1))
else:
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
class Q_Sym(nn.Module):
def __init__(self):
super(Q_Sym, self).__init__()
self.n_lvs = [1]
self.bits = [32] #Parameter(Tensor([32]), requires_grad=False)
self.a = Parameter(Tensor(1))
self.theta = Parameter(Tensor([1]))
self.tau = 1
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=False)
self.n_lvs = 2 ** self.bits
self.a = Parameter(Tensor(len(self.bits)))
#self.theta = Parameter(torch.ones(len(self.bits))/len(self.bits))
self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
def initialize_qonly(self, offset, diff):
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if len(self.bits)==1 and self.bits[0]==32:
#print("Q_Sym")
return x#, 32
else:
a = F.softplus(self.a)
if self.train:
softmask = F.gumbel_softmax(self.theta, tau=self.tau, hard=False, dim=0)
else:
softmask = F.softmax(self.theta/self.tau, dim=0)
'''
x_bar = torch.zeros_like(x)
for i, n_lv in enumerate(self.n_lvs):
x_temp = F.hardtanh(x / a[i], -1, 1)
x_bar = torch.add(x_bar, RoundQuant.apply(x_temp, n_lv // 2) * c[i] * softmask[i])
'''
a_mean = (softmask * a).sum()
n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()
x = F.hardtanh(x / a_mean, -1, 1)
x_bar = RoundQuant.apply(x, torch.round(n_lv_mean / 2)) * a_mean
#act_size = (softmask * self.bits).sum()
return x_bar#, act_size
################## didn't modify Q_HSwish #################
class Q_HSwish(nn.Module):
def __init__(self, act_func=True):
super(Q_HSwish, self).__init__()
self.n_lvs = [1]
self.bits = [32]
self.act_func = act_func
self.a = Parameter(Tensor(1))
self.b = 3/8
self.c = Parameter(Tensor(1))
self.d = -3/8
def initialize(self, n_lvs, offset, diff):
self.n_lvs = n_lvs
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if self.act_func:
x = x * (F.hardtanh(x + 3, 0, 6) / 6)
if len(self.bits)==1 and self.bits[0]==32:
return x
else:
a = F.softplus(self.a)
c = F.softplus(self.c)
x = x + self.b
x = F.hardtanh(x / a, 0, 1)
x = RoundQuant.apply(x, self.n_lvs) * c
x = x + self.d
return x
##########################################################
class Q_Conv2d(nn.Conv2d):
def __init__(self, *args, **kargs):
super(Q_Conv2d, self).__init__(*args, **kargs)
self.n_lvs = [1]
self.bits = [32]
self.a = Parameter(Tensor(1))
self.weight_old = None
self.theta = Parameter(Tensor([1]))
self.computation = 0
self.tau = 1
def initialize(self, bits):
self.bits = Parameter(Tensor(bits), requires_grad=False)
self.n_lvs = 2 ** self.bits
self.a = Parameter(Tensor(len(self.bits)))
#self.theta = Parameter(torch.ones(len(self.bits))/len(self.bits))
self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))
max_val = self.weight.data.abs().max().item()
self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))
def initialize_qonly(self):
max_val = self.weight.data.abs().max().item()
self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))
def _weight_quant(self):
"""
a = F.softplus(self.a)
c = F.softplus(self.c)
if self.train:
softmask = F.gumbel_softmax(self.theta, tau=self.tau, hard=False, dim=0)
else:
softmask = F.softmax(self.theta/self.tau, dim=0)
'''
w_bar = torch.zeros_like(self.weight)
for i, n_lv in enumerate(self.n_lvs):
weight = F.hardtanh(self.weight / a[i], -1, 1)
w_bar = torch.add(w_bar, RoundQuant.apply(weight, n_lv // 2) * c[i] * softmask[i])
'''
a_mean = (softmask * a).sum()
c_mean = (softmask * c).sum()
n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()
w_bar = F.hardtanh(self.weight / a_mean, -1, 1)
w_bar = RoundQuant.apply(w_bar, torch.round(n_lv_mean / 2)) * c_mean
"""
softmask = F.softmax(self.theta/self.tau, dim=0)
n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()
max_w = torch.max(torch.abs(torch.tanh(self.weight)))
weight = torch.tanh(self.weight) / max_w
weight.add_(1.0)
weight.div_(2.0)
k = torch.round(n_lv_mean/2)-1
weight = torch.round(weight * k)
weight.div(k)
weight.mul_(2.0)
weight.sub_(1.0)
weight.mul_(max_w)
#bitwidth = (softmask * self.bits).sum()
return weight#, bitwidth
def forward(self, x):#, cost, act_size=None):
if len(self.bits)==1 and self.bits[0]==32:
#print("Q_Conv2d")
#cost += act_size * 32 * self.computation
return F.conv2d(x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups)#, cost
else:
weight = self._weight_quant() #, bitwidth
#cost += act_size * bitwidth * self.computation
return F.conv2d(x, weight, self.bias,
self.stride, self.padding, self.dilation, self.groups)#, cost
class Q_Linear(nn.Linear):
def __init__(self, *args, **kargs):
super(Q_Linear, self).__init__(*args, **kargs)
self.n_lvs = [0]
self.bits = [32]
self.a = Parameter(Tensor(1))
self.weight_old = None
self.theta = Parameter(Tensor([1]))
self.computation = 0
self.tau = 1
def initialize(self, bits):
self.bits = Parameter(Tensor(bits), requires_grad=False)
self.n_lvs = 2 ** self.bits
self.a = Parameter(Tensor(len(self.bits)))
#self.theta = Parameter(torch.ones(len(self.bits))/len(self.bits))
self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))
max_val = self.weight.data.abs().max().item()
self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))
def initialize_qonly(self):
max_val = self.weight.data.abs().max().item()
self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))
def _weight_quant(self):
"""
a = F.softplus(self.a)
c = F.softplus(self.c)
if self.train:
softmask = F.gumbel_softmax(self.theta, tau=self.tau, hard=False, dim=0)
else:
softmask = F.softmax(self.theta/self.tau, dim=0)
'''
w_bar = torch.zeros_like(self.weight)
for i, n_lv in enumerate(self.n_lvs):
weight = F.hardtanh(self.weight / a[i], -1, 1)
w_bar = torch.add(w_bar, RoundQuant.apply(weight, n_lv // 2) * c[i] * softmask[i])
'''
a_mean = (softmask * a).sum()
c_mean = (softmask * c).sum()
n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()
w_bar = F.hardtanh(self.weight / a_mean, -1, 1)
w_bar = RoundQuant.apply(w_bar, torch.round(n_lv_mean / 2)) * c_mean
"""
softmask = F.softmax(self.theta/self.tau, dim=0)
n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()
max_w = torch.max(torch.abs(torch.tanh(self.weight)))
weight = torch.tanh(self.weight) / max_w
weight.add_(1.0)
weight.div_(2.0)
k = torch.round(n_lv_mean/2)-1
weight = torch.round(weight * k)
weight.div(k)
weight.mul_(2.0)
weight.sub_(1.0)
weight.mul_(max_w)
#bitwidth = (softmask * self.bits).sum()
return weight#, bitwidth
def forward(self, x):#, cost, act_size=None):
if len(self.bits)==1 and self.bits[0]==32:
#print("Q_Linear")
#cost += act_size * 32 * self.computation
return F.linear(x, self.weight, self.bias)#, cost
else:
weight = self._weight_quant() #, bitwidth
#cost += act_size * bitwidth * self.computation
return F.linear(x, weight, self.bias)#, cost
class Q_Conv2dPad(Q_Conv2d):
def __init__(self, mode, *args, **kargs):
super(Q_Conv2dPad, self).__init__(*args, **kargs)
self.mode = mode
def forward(self, inputs):
if self.mode == "HS":
inputs = F.pad(inputs, self.padding + self.padding, value=-3/8)
elif self.mode == "RE":
inputs = F.pad(inputs, self.padding + self.padding, value=0)
else:
raise LookupError("Unknown nonlinear")
if self.n_lvs == 0:
return F.conv2d(inputs, self.weight, self.bias,
self.stride, 0, self.dilation, self.groups)
else:
weight = self._weight_quant()
return F.conv2d(inputs, weight, self.bias,
self.stride, 0, self.dilation, self.groups)
def initialize(model, loader, bits, act=False, weight=False, eps=0.05):
if weight:
print('==> set up weight bitwidth..')
elif act:
print('==> set up activation bitwidth..')
if isinstance(bits, int):
bits = [bits]
def initialize_hook(module, input, output):
if isinstance(module, (Q_ReLU, Q_Sym, Q_HSwish)) and act:
if not isinstance(input, list):
input = input[0]
input = input.detach().cpu().numpy()
if isinstance(input, Q_Sym):
input = np.abs(input)
elif isinstance(input, Q_HSwish):
input = input + 3/8
input = input.reshape(-1)
input = input[input > 0]
input = np.sort(input)
if len(input) == 0:
small, large = 0, 1e-3
else:
small, large = input[int(len(input) * eps)], input[int(len(input) * (1-eps))]
module.initialize(bits, small, large - small)
if isinstance(module, (Q_Conv2d, Q_Linear)) and weight:
module.initialize(bits)
if isinstance(module, Q_Conv2d) and weight:
O, I, K1, K2 = module.weight.shape
N, C, H, W = input[0].shape
s = module.stride[0]
module.computation = O * I * K1 * K2 * H * W / s / s
if isinstance(module, Q_Linear) and weight:
O, I = module.weight.shape
N, I = input[0].shape
module.computation = O * I
hooks = []
for name, module in model.named_modules():
hook = module.register_forward_hook(initialize_hook)
hooks.append(hook)
model.train()
model.cuda()
for i, (input, target) in enumerate(loader):
with torch.no_grad():
if isinstance(model, nn.DataParallel):
output = model.module(input.cuda())
else:
output = model(input.cuda())
break
model.cuda()
for hook in hooks:
hook.remove()
def sample_search_result(model, hard=True, print=True):
if hard:
for name, module in model.named_modules():
if isinstance(module, (Q_Conv2d, Q_Linear, Q_ReLU, Q_Sym, Q_HSwish)):
idx = torch.argmax(module.theta)
for var in ['a', 'c']:
setattr(module, var, Parameter(getattr(module, var)[idx].view(1)))
for var in ['bits', 'n_lvs']:
setattr(module, var, Parameter(getattr(module, var)[idx].view(1), requires_grad=False))
module.theta=Parameter(torch.Tensor([1]), requires_grad=False)
else:
# TODO: stochastic sampling
raise NotImplementedError
def extract_bitwidth(model, weight_or_act=None, tau=1):
assert weight_or_act != None
if weight_or_act == "weight" or weight_or_act == 0:
i = 1
module_set = (Q_Conv2d, Q_Linear)
elif weight_or_act == "act" or weight_or_act == 1:
i = 2
module_set = (Q_ReLU, Q_Sym, Q_HSwish)
else:
print(f'[ValueError] weight_or_act: {weight_or_act}')
raise ValueError
list_select = []
list_prob = []
str_prob = ''
for _, m in enumerate(model.modules()):
if isinstance(m, module_set):
prob = F.softmax(m.theta / tau, dim=0)
list_select.append(int(m.bits[torch.argmax(prob)].item()))
list_prob.append(prob)
prob = [f'{i:.5f}' for i in prob.cpu().tolist()]
str_prob += f'layer {i} [{", ".join(prob)}]\n'
i += 1
str_select = f'{weight_or_act} bitwidth select: \n' + ", ".join(map(str, list_select))
return list_select, list_prob, str_select, str_prob
def initialize_quantizer(model, loader, eps=0.05):
def initialize_hook(module, input, output):
if isinstance(module, (Q_ReLU, Q_Sym, Q_HSwish)):
if not isinstance(input, list):
input = input[0]
input = input.detach().cpu().numpy()
if isinstance(input, Q_Sym):
input = np.abs(input)
elif isinstance(input, Q_HSwish):
input = input + 3/8
input = input.reshape(-1)
input = input[input > 0]
input = np.sort(input)
if len(input) == 0:
small, large = 0, 1e-3
else:
small, large = input[int(len(input) * eps)], input[int(len(input) * (1-eps))]
module.initialize_qonly(small, large - small)
if isinstance(module, (Q_Conv2d, Q_Linear)):
module.initialize_qonly()
if isinstance(module, Q_Conv2d):
O, I, K1, K2 = module.weight.shape
N, C, H, W = input[0].shape
s = module.stride[0]
module.computation = O * I * K1 * K2 * H * W / s / s
if isinstance(module, Q_Linear):
O, I = module.weight.shape
N, I = input[0].shape
module.computation = O * I
hooks = []
for name, module in model.named_modules():
hook = module.register_forward_hook(initialize_hook)
hooks.append(hook)
model.train()
for i, (input, target) in enumerate(loader):
with torch.no_grad():
if isinstance(model, nn.DataParallel):
output = model.module(input.cuda())
else:
output = model(input.cuda())
break
model.cuda()
for hook in hooks:
hook.remove()
class Q_Sequential(nn.Sequential):
def __init__(self, *args):
super(Q_Sequential, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
idx = 0
for module in args:
if isinstance(module, Q_Sym) or (isinstance(module, Q_HSwish) and idx == 0):
self.add_module('-' + str(idx), module)
else:
self.add_module(str(idx), module)
idx += 1
def transfer_bitwidth(model_src, model_dst):
n_lvs_dict={}
bit_dict={}
for name, module in model_src.named_modules():
if isinstance(module, (Q_Conv2d, Q_Linear, Q_ReLU, Q_Sym, Q_HSwish)):
n_lvs_dict[name] = module.n_lvs.data
bit_dict[name] = module.bits.data
for name, module in model_dst.named_modules():
if isinstance(module, (Q_Conv2d, Q_Linear, Q_ReLU, Q_Sym, Q_HSwish)):
module.n_lvs.data = n_lvs_dict[name]
module.bits.data = bit_dict[name]
print(name)
class QuantOps(object):
initialize = initialize
initialize_quantizer = initialize_quantizer
transfer_bitwidth = transfer_bitwidth
Conv2d = Q_Conv2d
ReLU = Q_ReLU
ReLU6 = Q_ReLU6
Sym = Q_Sym
HSwish = Q_HSwish
Conv2dPad = Q_Conv2dPad
Sequential = Q_Sequential
Linear = Q_Linear
|
'''
module handles config
'''
import os
import sys
import configparser
from xdg.BaseDirectory import (xdg_data_home, xdg_config_home, xdg_cache_home)
class SaurConfigError (Exception):
'''Exception for when an error occurs within the SaurConfig class.'''
def __init__ (self, message, errorcode=None):
super ().__init__ (message)
if errorcode:
self.errors = errorcode
class SaurConfig ():
def __init__ (self, conf_file):
if not os.path.exists (conf_file):
raise SaurConfigError (f'No config file at {conf_file}!')
self.file = conf_file
self.defconf = { 'verbose': False,
'cachedir': xdg_cache_home + '/saur',
'datadir': xdg_data_home + '/saur',
'dbroot': None,
'dbname': None,
'gpgkey': None,
'flags':
{ 'def': [],
'sync': ["-n", "-s", "-r", "--clean"],
'rebuild': ["-n", "--noview", "--continue", "--rebuild"] },
'packages': None,
'bpackages': None }
def config (self):
return self.defconf
def parse_config (self):
with open (self.file, 'r', encoding='ascii') as fp:
config = configparser.ConfigParser (allow_no_value=True)
config.read_file (fp)
if 'settings' in config:
print ('Reading in settings... ', end='')
self.defconf.update (config['settings'])
for k, v in self.defconf.items():
if v in ('false', 'False'):
self.defconf[k] = False
if v in ('true', 'True'):
self.defconf[k] = True
print ('done')
if 'packages' not in config or not config['packages']:
raise SaurConfigError ('The config file has no packages listed!')
else:
self.defconf['packages'] = dict(config.items('packages', raw=True))
self.defconf['bpackages'] = {}
if self.defconf['dbroot']:
self.defconf['flags']['def'].append(f'--root={self.defconf["dbroot"]}')
if self.defconf['dbname']:
self.defconf['flags']['def'].append('-d')
self.defconf['flags']['def'].append(self.defconf['dbname'])
if self.defconf['gpgkey']:
self.defconf['flags']['def'].append('--sign')
if self.defconf['verbose']:
print (self.defconf)
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from collections import OrderedDict
def slack_post(token, channel, blind=False):
'''Post a slack message possibly with a picture. Prepare a function that
will be called later by the main script.'''
slack_api_url = 'https://slack.com/api/{}'
params = {
'token': token,
'channel': channel,
}
if blind:
params.update({
'as_user': 'false',
'username': 'Mini-Sentry',
'icon_url': 'https://wiki.teamfortress.com/w/images/e/ea/Red_Mini_Sentry.png'
})
url = slack_api_url.format('chat.postMessage')
else:
params['channels'] = params.pop('channel')
url = slack_api_url.format('files.upload')
def make_request(*args):
'''Will make the request, use the prepared params.'''
request_args = OrderedDict(
url=url,
params=params,
)
if blind:
request_args['params'].update({
'text': args[0]
})
else:
request_args['params'].update({
'title': args[0],
'initial_comment': args[1],
})
request_args['files'] = dict(file=args[2])
response = requests.post(**request_args)
return make_request
|
import pytest
from lorenz import generate_L96
import numpy as np
import numpy.random as rd
from tools import *
from pytest import approx
import os
# =========================================================
# Set up code
# =========================================================
N = 1000
t = np.linspace(0, 6 * np.pi, N)
noisy_cos = np.cos(t) + (2 * rd.random(N) - 1) / 10
smooth_cos = np.cos(t)
X_in = np.concatenate([[noisy_cos, smooth_cos]], axis=1)
params_broke = {
'n_reservoir': 600,
'sparsity': 0.1,
'rand_seed': 85,
'rho': 0.7,
'noise': 0.001,
'future': 20,
'window': 3,
'trainlen': 500
}
q = np.arange(0, 30.0, 0.01)
x = generate_L96(q)
params_work = {
'n_reservoir': 600,
'sparsity': 0.03,
'rand_seed': 85,
'rho': 1.5,
'noise': 0.01,
'future': 72,
'window': 72,
'trainlen': 1200
}
# =========================================================
# =========================================================
def test_MSE_float():
"""
MSE returns float
"""
assert(isinstance(MSE(smooth_cos, noisy_cos), np.float64))
return
def test_MSE_equal():
"""
MSE is zero for two equally sized arrays
"""
yhat = np.random.randint(100, size=(5))
y = yhat
obs_i = MSE(yhat, y)
exp_i = 0
assert obs_i == exp_i
return
def test_MSE_sldiff():
"""
MSE is a known float when all but one
entry are the same.
"""
y = np.array([[1, 2, 3]])
yhat = np.array([[1, 1, 3]])
obs = MSE(yhat, y)
exp = 0.5773502691896257
assert obs == approx(exp, 0.01)
return
def test_MSE_comdiff():
"""
The MSE should be one when every entry
is one unit off of its predicted value.
"""
y = np.array([[1, 2, 3]])
yhat = np.array([[0, 1, 2]])
obs = MSE(yhat, y)
exp = 1
assert obs == exp
return
def test_MSE_diffsize():
"""
Different sized arrays should not work for
MSE.
"""
yhat = np.array([[1, 2, 3]])
y = np.array([[1, 2]])
with pytest.raises(ValueError):
MSE(yhat, y)
return
def test_MAE_float():
"""
MAE returns float
"""
assert(isinstance(MAE(smooth_cos, noisy_cos), np.float64))
return
def test_MAE_equal():
"""
MAE is zero for two equally sized arrays.
"""
yhat = np.random.randint(100, size=(5))
y = yhat
obs_i = MAE(yhat, y)
exp_i = 0
assert obs_i == exp_i
return
def test_MAE_sldiff():
"""
MAE is 1/3 for two arrays that are the
same but for one entry which differs by one
unit.
"""
yhat = np.array([[1, 2, 3]])
y = np.array([[1, 1, 3]])
obs_i = MAE(yhat, y)
exp_i = 1/3
assert approx(obs_i, 0.01) == exp_i
return
def test_MAE_comdiff():
"""
MAE is a float for two completely different arrays
where the target vector is the same size as the
predicted vector but all of the entries are
three units apart from the corresponding one.
"""
yhat = np.array([[1, 2, 3]])
y = np.array([[4, 5, 6]])
obs_i = MAE(yhat, y)
exp_i = 3.0
assert obs_i == exp_i
return
def test_MAE_diffsize():
"""
Different sized arrays should not work for
MAE.
"""
yhat = np.array([[1, 2, 3]])
y = np.array([[1, 2]])
with pytest.raises(ValueError):
MAE(yhat, y)
return
def test_param_string():
"""
Verifies that param_string returns string.
"""
pstring = param_string(params_broke)
assert(isinstance(pstring, str))
return
def test_optimal_values_pmone():
"""
Optimal_values returns the correct set
when the output should clearly be plus
or minus one.
"""
x = np.array([-1, 0, 0])
y = np.array([1, 0, 0])
b = np.outer(3, 3)
min_set = (-1, 1)
opt_set = optimal_values(b, x, y)
assert(min_set == opt_set)
return
def test_optimal_values_equal_arrays():
"""
Optimal_values returns the correct
set for two arrays with the same
values.
"""
x = np.array([1, 0, 0])
y = np.array([1, 0, 0])
b = np.array([
[0.80, 0.28, 0.46],
[0.12, 0.49, 0.93],
[0.38, 0.50, 0.66]
])
min_set = (0, 1)
opt_set = optimal_values(b, x, y)
assert (min_set == opt_set)
return
def test_esn_prediction_diffsize():
"""
The ESN does not train because of
mismatched input shapes.
"""
with pytest.raises(IndexError):
pred = esn_prediction(X_in, params_work)
return
def test_esn_prediction_multiple():
"""
The window size is not a multiple of the
total future.
"""
with pytest.raises(AssertionError):
pred = esn_prediction(X_in, params_broke)
return
def test_esn_save():
"""
The esn_prediction function has the
ability to save predictions to the
data folder. This test generates a
sample data set based off of the
params_save values, with the domain
of q values and x function defined by
generate_L96 from the Lorenz module. It
makes a test file and then removes it.
As such, there should not be a test data
file after the test has been completed.
"""
esn_prediction(x, params_work, 'test_save')
assert os.path.exists('./data/test_save_prediction.npy')
if os.path.exists('./data/test_save_prediction.npy'):
os.remove('./data/test_save_prediction.npy')
else:
pass
return
def test_esn_scenario_output_size():
"""
The output size of esn_scenario
should contain an array of the same
size as the data parameter that
is input.
"""
zeros = np.zeros([3, 3])
params_work['future'] = 1
params_work['window'] = 1
output = esn_scenario(zeros, params_work)
outlen = len(output[0][0])
exp = 3
assert outlen == exp
return
def test_esn_scenario_output_type():
"""
The output of esn_scenario
should contain a numpy array.
"""
output = esn_scenario(x, params_work)
assert type(output[0][0]) is np.ndarray
return
|
def isPrime(num):
''' isPrime determines if num is prime
argument
-- num : int
return
-- boolean
'''
if num < 2:
return False
elif num in [2,3]:
return True
elif num % 2 == 0:
return False
else:
upper_limit = int(num ** 0.5) + 1
for divisor in range(3, upper_limit, 2):
if num % divisor == 0:
return False
return True
# end of isPrime
def primesUnderN(num):
''' primesUnderN1() returns a list of all prime numbers under num
argument
-- num : integer
return
-- list
'''
result = [2]
for i in range(3,num,2):
if isPrime(i): # if isPrime(i) == True:
result.append(i)
return result
# end of primesUnderN
def sieve(num):
'''
algorithm Sieve of Eratosthenes is
input: an integer n > 1.
output: all prime numbers from 2 through n.
let A be an array of Boolean values, indexed by integers 2 to n,
initially all set to true.
for i = 2, 3, 4, ..., not exceeding √n do
if A[i] is true
for j = i^2, i^2+i, i^2+2i, i^2+3i, ..., not exceeding n do
A[j] := false
return all i such that A[i] is true.
'''
# Array of Boolean Values of num length
result = [False, False] # Index of 0 and 1 is going to be False
result += [True] * (num - 1)
upper_limit = int(num ** 0.5) + 1
for i in range(2, upper_limit):
if result[i]: # if result[i] == True:
for j in range(i*i, num+1, i):
result[j] = False
# end of double for loops
prime_locations = []
# return all i such that A[i] is true.
for i in range(len(result)):
if result[i]:
prime_locations.append(i)
return prime_locations
# end of sieve
# We have functions:
# - primesUnderN()
# - sieve()
from timeit import default_timer as timer
num = int(input('Set the input size of num: '))
print('----')
# Testing primesUnderN()
runtimes = []
for x in range(5):
start = timer()
result = primesUnderN(num)
end = timer()
current_runtime = end - start
runtimes.append(current_runtime)
print('Testing primesUnderN()')
print('Our run time results:', runtimes)
average_runtime = sum(runtimes) / len(runtimes)
print('Our average run time is: %.6f seconds.' % average_runtime)
print('----\n')
# Testing sieve()
runtimes = []
for x in range(5):
start = timer()
result = sieve(num)
end = timer()
current_runtime = end - start
runtimes.append(current_runtime)
print('Testing sieve()')
print('Our run time results:', runtimes)
average_runtime = sum(runtimes) / len(runtimes)
print('Our average run time is: %.6f seconds.' % average_runtime)
print('----\n')
|
from pyleap import Rectangle, window, Line, Polygon, Circle, Text, repeat, run
bg = Rectangle(0, 0, window.w, window.h, "white")
def draw(dt):
window.clear()
bg.draw()
window.show_axis()
Rectangle(10, 10, 300, 200, "#00ff0080").stroke()
Line(100, 100, 600, 100, 100, 'pink').draw()
Polygon(200, 50, 300, 50, 250, 150, "green").draw()
Circle(200, 200, 120, "#ff000080").draw()
Text('Hello, world', 350, 100).draw()
window.show_fps()
repeat(draw)
run()
|
from bluebottle.activities.messages import ActivityRejectedNotification, ActivityCancelledNotification, \
ActivitySucceededNotification, ActivityRestoredNotification, ActivityExpiredNotification
from bluebottle.test.utils import NotificationTestCase
from bluebottle.time_based.tests.factories import DateActivityFactory
class ActivityNotificationTestCase(NotificationTestCase):
def setUp(self):
self.obj = DateActivityFactory.create(
title="Save the world!"
)
def test_activity_rejected_notification(self):
self.message_class = ActivityRejectedNotification
self.create()
self.assertRecipients([self.obj.owner])
self.assertSubject('Your activity "Save the world!" has been rejected')
self.assertBodyContains('Unfortunately your activity "Save the world!" has been rejected.')
self.assertActionLink(self.obj.get_absolute_url())
self.assertActionTitle('Open your activity')
def test_activity_cancelled_notification(self):
self.message_class = ActivityCancelledNotification
self.create()
self.assertRecipients([self.obj.owner])
self.assertSubject('Your activity "Save the world!" has been cancelled')
self.assertBodyContains('Unfortunately your activity "Save the world!" has been cancelled.')
self.assertActionLink(self.obj.get_absolute_url())
self.assertActionTitle('Open your activity')
def test_activity_restored_notification(self):
self.message_class = ActivityRestoredNotification
self.create()
self.assertRecipients([self.obj.owner])
self.assertSubject('The activity "Save the world!" has been restored')
self.assertBodyContains('Your activity "Save the world!" has been restored.')
self.assertActionLink(self.obj.get_absolute_url())
self.assertActionTitle('Open your activity')
def test_activity_expired_notification(self):
self.message_class = ActivityExpiredNotification
self.create()
self.assertRecipients([self.obj.owner])
self.assertSubject('The registration deadline for your activity "Save the world!" has expired')
self.assertBodyContains(
'Unfortunately, nobody applied to your activity '
'"Save the world!" before the deadline to apply. '
'That’s why we have cancelled your activity.')
self.assertActionLink(self.obj.get_absolute_url())
self.assertActionTitle('Open your activity')
def test_activity_succeeded_notification(self):
self.message_class = ActivitySucceededNotification
self.create()
self.assertRecipients([self.obj.owner])
self.assertSubject('Your activity "Save the world!" has succeeded 🎉')
self.assertBodyContains(
'You did it! Your activity "Save the world!" has succeeded, '
'that calls for a celebration!')
self.assertActionLink(self.obj.get_absolute_url())
self.assertActionTitle('Open your activity')
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
def cf_reports_beta_cl(cli_ctx, *_):
from msgraph.cli.core.commands.client_factory import get_mgmt_service_client
from azext_reports_beta.vendored_sdks.reports import Reports
return get_mgmt_service_client(cli_ctx,
Reports,
subscription_bound=False,
base_url_bound=False)
def cf_audit_log_audit_log_root(cli_ctx, *_):
return cf_reports_beta_cl(cli_ctx).audit_logs_audit_log_root
def cf_audit_log(cli_ctx, *_):
return cf_reports_beta_cl(cli_ctx).audit_logs
def cf_report_report_root(cli_ctx, *_):
return cf_reports_beta_cl(cli_ctx).reports_report_root
def cf_report(cli_ctx, *_):
return cf_reports_beta_cl(cli_ctx).reports
|
from sqlalchemy import Column, Boolean, Text, Integer, ForeignKey
from sqlalchemy.orm import relationship
from db_conf import Base
class DiagInfo(Base):
__tablename__ = "Diagnostics"
id = Column('id', Integer, primary_key=True, autoincrement=True)
dg_findings = Column('findings', Text)
dg_custcomp = Column('complaint', Text)
dg_mil = Column('mil', Boolean)
# dtc_id = Column(Integer, ForeignKey('DTCs.id'))
# dtc = relationship('DTCInfo', backref="Diagnostics")
def __init__(self, findings='', custcomp='', mil=False, dtc=None) -> None:
self.findings = findings
self.custcomp = custcomp
self.mil = mil
self.dtc = dtc
|
from .generic_wrappers import * # NOQA
from .lambda_wrappers import action_lambda_v1, observation_lambda_v0, reward_lambda_v0 # NOQA
from .multiagent_wrappers import agent_indicator_v0, black_death_v2, \
pad_action_space_v0, pad_observations_v0 # NOQA
from supersuit.generic_wrappers import frame_skip_v0, color_reduction_v0, resize_v0, dtype_v0, \
flatten_v0, reshape_v0, normalize_obs_v0, clip_actions_v0, clip_reward_v0, \
delay_observations_v0, frame_stack_v1, max_observation_v0, \
sticky_actions_v0 # NOQA
from .vector.vector_constructors import gym_vec_env_v0, stable_baselines_vec_env_v0, \
stable_baselines3_vec_env_v0, concat_vec_envs_v1, pettingzoo_env_to_vec_env_v1 # NOQA
from .aec_vector import vectorize_aec_env_v0 # NOQA
class DeprecatedWrapper(ImportError):
pass
def __getattr__(wrapper_name):
"""
Gives error that looks like this when trying to import old version of wrapper:
File "./supersuit/__init__.py", line 38, in __getattr__
raise DeprecatedWrapper(f"{base}{version_num} is now deprecated, use {base}{act_version_num} instead")
supersuit.DeprecatedWrapper: concat_vec_envs_v0 is now deprecated, use concat_vec_envs_v1 instead
"""
start_v = wrapper_name.rfind("_v") + 2
version = wrapper_name[start_v:]
base = wrapper_name[:start_v]
try:
version_num = int(version)
is_valid_version = True
except ValueError:
is_valid_version = False
globs = globals()
if is_valid_version:
for act_version_num in range(1000):
if f"{base}{act_version_num}" in globs:
if version_num < act_version_num:
raise DeprecatedWrapper(f"{base}{version_num} is now deprecated, use {base}{act_version_num} instead")
raise ImportError(f"cannot import name '{wrapper_name}' from 'supersuit'")
__version__ = "3.3.2"
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
""" Module with methods for notifications on people mentions."""
import re
from collections import defaultdict
from collections import namedtuple
from logging import getLogger
from email.utils import parseaddr
from urlparse import urljoin
import flask
from sqlalchemy.orm import load_only
from ggrc import models
from ggrc import settings
from ggrc import utils
from ggrc.app import db
from ggrc.gcalendar import utils as calendar_utils
from ggrc.notifications.common import send_mentions_bg
from ggrc.notifications import data_handlers
from ggrc.utils import user_generator, get_url_root
logger = getLogger(__name__)
EMAIL_REGEXP = r"[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+"
EMAIL_LINK_REGEXP = r"mailto:" + EMAIL_REGEXP
DEFAULT_PERSON = u"Unknown"
CommentData = namedtuple("CommentData",
["comment_text", "author", "created_at"])
def handle_comment_mapped(obj, comments):
"""Send mentions in the comments in the bg task.
Args:
obj: object for which comments were created,
comments: A list of comment objects.
"""
comments_data = _fetch_comments_data(comments)
if obj.__class__.__name__ == "CycleTaskGroupObjectTask":
url = calendar_utils.get_cycle_tasks_url_by_slug(obj.slug)
else:
url = urljoin(get_url_root(), utils.view_url_for(obj))
models.background_task.create_task(
name="send_mentions_bg",
url=flask.url_for("send_mentions_bg"),
parameters={
"comments_data": comments_data,
"object_name": obj.title,
"href": url,
},
queued_callback=send_mentions_bg,
)
def _fetch_comments_data(comments):
"""Fetches comments and people data from db and creates list of
CommentsData tuples.
Args:
comments - list of Comment objects
Returns:
a list of CommentData named tuples.
"""
comments_loaded = db.session.query(models.Comment).options(
load_only(
models.Comment.description,
models.Comment.created_at,
models.Comment.modified_by_id,
)
).filter(
models.Comment.id.in_([comment.id for comment in comments])
).all()
people = db.session.query(models.Person).filter(
models.Person.id.in_([comment.modified_by_id
for comment in comments_loaded])
).all()
people_dict = {person.id: person.email for person in people}
comments_data = [
CommentData(
comment_text=comment.description,
author=people_dict.get(comment.modified_by_id, DEFAULT_PERSON),
created_at=comment.created_at
)
for comment in comments_loaded
]
return comments_data
def send_mentions(object_name, href, comments_data):
"""Send emails for people mentions.
Params:
object_name: object title,
href: link to the object,
comments_data: set of CommentData named tuples.
"""
from ggrc.notifications.common import send_email
email_mentions = _find_email_mentions(comments_data)
for email, related_comments_data in email_mentions.iteritems():
title, email_comments = _generate_mention_email(
object_name, related_comments_data
)
body = settings.EMAIL_MENTIONED_PERSON.render(person_mention={
"comments": email_comments,
"url": href,
})
send_email(email, title, body)
db.session.commit()
def _extract_email(email_match):
"""Extracts email address from regexp match.
Params:
email_match: Match of EMAIL_REGEXP regexp.
Returns:
Email address from the match.
"""
email_parsed = parseaddr(email_match.group())[1]
return email_parsed
def _find_email_mentions(comments_data):
"""Find mentions of user email in the comment data.
If a user email is not registered in the app, the user will be created
via external service and a Creator role would be granted to this user.
Params:
comments_data: list of CommentData named tuples.
Returns:
a default dict with keys equals to mentioned user email and values
equals to a set of related CommentData.
"""
link_pattern = re.compile(EMAIL_LINK_REGEXP)
email_mentions = defaultdict(list)
for comment in comments_data:
comment_email_mentions = dict()
for match in link_pattern.finditer(comment.comment_text):
email = _extract_email(match)
person = user_generator.find_user(email)
if not person:
continue
comment_email_mentions[email] = comment
for email, matched_comment in comment_email_mentions.iteritems():
email_mentions[email].append(matched_comment)
return email_mentions
def _generate_mention_email(object_name, comments_data):
"""Generate title and body of the email.
Params:
object_name: name of the object in which person was mentioned,
comments_data: a set of CommentData named tuples.
Returns:
title: email title
body: email body
"""
# The only way to pass the list of different comments here - is via import.
# All comments created via import are authored by one user. This is why
# it is safe to use any author in the email title.
author = next(iter(comments_data)).author
title = u"{author} mentioned you on a comment within {object_name}".format(
author=author,
object_name=object_name,
)
body_template = (
u"{author} mentioned you on a comment within {object_name} "
u"at {created_at}:\n"
u"{comment_text}\n"
)
body = []
for comment in sorted(comments_data):
body.append(body_template.format(
author=comment.author,
object_name=flask.escape(object_name),
created_at=data_handlers.as_user_time(comment.created_at),
comment_text=comment.comment_text,
))
return title, body
|
#KNN_classify()
import numpy as np
from math import sqrt
from collections import Counter
def KNN_classify(k,X_train,y_train,x):
assert 1<=k<=X_train.shape[0],'k must be valid'
assert X_train.shape[0]==y_train.shape[0],\
'the size of X_train must equal to the size of y_train'
assert X_train.shape[1]==x.shape[0],\
'the feature number of x must be equal to X_train'
distances=[sqrt(np.sum((x_train-x)**2)) for x_train in X_train]
nearest=np.argsort(distances)
topK_y=[y_train[i] for i in nearest[:k]]
votes=Counter(topK_y)
return votes.most_common(1)[0][0]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Bert for pretraining."""
import numpy as np
import mindspore.nn as nn
from mindspore.common.initializer import initializer, TruncatedNormal
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import Parameter
from mindspore.common import dtype as mstype
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.context import ParallelMode
from mindspore.communication.management import get_group_size
from mindspore import context
from mindspore.ops import _selected_ops
from .bert_model import BertModel
from .utils import ClipByGlobalNorm
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 1.0
clip_grad = C.MultitypeFuncGraph("clip_grad")
# pylint: disable=consider-using-in
@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type != 0 and clip_type != 1:
return grad
dt = F.dtype(grad)
if clip_type == 0:
new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
F.cast(F.tuple_to_array((clip_value,)), dt))
else:
new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad
class GetMaskedLMOutput(nn.Cell):
"""
Get masked lm output.
Args:
config (BertConfig): The config of BertModel.
Returns:
Tensor, masked lm output.
"""
def __init__(self, config):
super(GetMaskedLMOutput, self).__init__()
self.width = config.hidden_size
self.reshape = P.Reshape()
self.gather = P.GatherV2()
weight_init = TruncatedNormal(config.initializer_range)
self.dense = nn.Dense(self.width,
config.hidden_size,
weight_init=weight_init,
activation=config.hidden_act).to_float(config.compute_type)
self.layernorm = nn.LayerNorm((config.hidden_size,)).to_float(config.compute_type)
self.output_bias = Parameter(
initializer(
'zero',
config.vocab_size),
name='output_bias')
self.matmul = P.MatMul(transpose_b=True)
self.log_softmax = nn.LogSoftmax(axis=-1)
self.shape_flat_offsets = (-1, 1)
self.rng = Tensor(np.array(range(0, config.batch_size)).astype(np.int32))
self.last_idx = (-1,)
self.shape_flat_sequence_tensor = (config.batch_size * config.seq_length, self.width)
self.seq_length_tensor = Tensor(np.array((config.seq_length,)).astype(np.int32))
self.cast = P.Cast()
self.compute_type = config.compute_type
self.dtype = config.dtype
def construct(self,
input_tensor,
output_weights,
positions):
"""Get output log_probs"""
flat_offsets = self.reshape(
self.rng * self.seq_length_tensor, self.shape_flat_offsets)
flat_position = self.reshape(positions + flat_offsets, self.last_idx)
flat_sequence_tensor = self.reshape(input_tensor, self.shape_flat_sequence_tensor)
input_tensor = self.gather(flat_sequence_tensor, flat_position, 0)
input_tensor = self.cast(input_tensor, self.compute_type)
output_weights = self.cast(output_weights, self.compute_type)
input_tensor = self.dense(input_tensor)
input_tensor = self.layernorm(input_tensor)
logits = self.matmul(input_tensor, output_weights)
logits = self.cast(logits, self.dtype)
logits = logits + self.output_bias
log_probs = self.log_softmax(logits)
return log_probs
class GetNextSentenceOutput(nn.Cell):
"""
Get next sentence output.
Args:
config (BertConfig): The config of Bert.
Returns:
Tensor, next sentence output.
"""
def __init__(self, config):
super(GetNextSentenceOutput, self).__init__()
self.log_softmax = _selected_ops.LogSoftmax()
weight_init = TruncatedNormal(config.initializer_range)
self.dense = nn.Dense(config.hidden_size, 2,
weight_init=weight_init, has_bias=True).to_float(config.compute_type)
self.dtype = config.dtype
self.cast = P.Cast()
def construct(self, input_tensor):
logits = self.dense(input_tensor)
logits = self.cast(logits, self.dtype)
log_prob = self.log_softmax(logits)
return log_prob
class BertPreTraining(nn.Cell):
"""
Bert pretraining network.
Args:
config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings.
Returns:
Tensor, prediction_scores, seq_relationship_score.
"""
def __init__(self, config, is_training, use_one_hot_embeddings):
super(BertPreTraining, self).__init__()
self.bert = BertModel(config, is_training, use_one_hot_embeddings)
self.cls1 = GetMaskedLMOutput(config)
self.cls2 = GetNextSentenceOutput(config)
def construct(self, input_ids, input_mask, token_type_id,
masked_lm_positions):
sequence_output, pooled_output, embedding_table = \
self.bert(input_ids, token_type_id, input_mask)
prediction_scores = self.cls1(sequence_output,
embedding_table,
masked_lm_positions)
seq_relationship_score = self.cls2(pooled_output)
return prediction_scores, seq_relationship_score
class BertPretrainingLoss(nn.Cell):
"""
Provide bert pre-training loss.
Args:
config (BertConfig): The config of BertModel.
Returns:
Tensor, total loss.
"""
def __init__(self, config):
super(BertPretrainingLoss, self).__init__()
self.vocab_size = config.vocab_size
self.onehot = P.OneHot()
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0.0, mstype.float32)
self.reduce_sum = P.ReduceSum()
self.reduce_mean = P.ReduceMean()
self.reshape = P.Reshape()
self.last_idx = (-1,)
self.neg = P.Neg()
self.cast = P.Cast()
def construct(self, prediction_scores, seq_relationship_score, masked_lm_ids,
masked_lm_weights, next_sentence_labels):
"""Defines the computation performed."""
label_ids = self.reshape(masked_lm_ids, self.last_idx)
label_weights = self.cast(self.reshape(masked_lm_weights, self.last_idx), mstype.float32)
one_hot_labels = self.onehot(label_ids, self.vocab_size, self.on_value, self.off_value)
per_example_loss = self.neg(self.reduce_sum(prediction_scores * one_hot_labels, self.last_idx))
numerator = self.reduce_sum(label_weights * per_example_loss, ())
denominator = self.reduce_sum(label_weights, ()) + self.cast(F.tuple_to_array((1e-5,)), mstype.float32)
masked_lm_loss = numerator / denominator
# next_sentence_loss
labels = self.reshape(next_sentence_labels, self.last_idx)
one_hot_labels = self.onehot(labels, 2, self.on_value, self.off_value)
per_example_loss = self.neg(self.reduce_sum(
one_hot_labels * seq_relationship_score, self.last_idx))
next_sentence_loss = self.reduce_mean(per_example_loss, self.last_idx)
# total_loss
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
class BertNetworkWithLoss(nn.Cell):
"""
Provide bert pre-training loss through network.
Args:
config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, config, is_training, use_one_hot_embeddings=False):
super(BertNetworkWithLoss, self).__init__()
self.bert = BertPreTraining(config, is_training, use_one_hot_embeddings)
self.loss = BertPretrainingLoss(config)
self.cast = P.Cast()
def construct(self,
input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights):
"""Get pre-training loss"""
prediction_scores, seq_relationship_score = \
self.bert(input_ids, input_mask, token_type_id, masked_lm_positions)
total_loss = self.loss(prediction_scores, seq_relationship_score,
masked_lm_ids, masked_lm_weights, next_sentence_labels)
return self.cast(total_loss, mstype.float32)
class BertTrainOneStepCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
"""
def __init__(self, network, optimizer, sens=1.0):
super(BertTrainOneStepCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.reducer_flag = False
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = None
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)
self.cast = P.Cast()
self.hyper_map = C.HyperMap()
def set_sens(self, value):
self.sens = value
def construct(self,
input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
self.cast(F.tuple_to_array((self.sens,)),
mstype.float32))
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
if self.reducer_flag:
# apply grad reducer on grads
grads = self.grad_reducer(grads)
succ = self.optimizer(grads)
return F.depend(loss, succ)
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
class BertTrainOneStepWithLossScaleCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
scale_update_cell (Cell): Cell to do the loss scale. Default: None.
"""
def __init__(self, network, optimizer, scale_update_cell=None, enable_global_norm=False):
super(BertTrainOneStepWithLossScaleCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.enable_global_norm = enable_global_norm
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_before_grad = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.depend_parameter_use = P.ControlDepend(depend_mode=1)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale")
@C.add_flags(has_effect=True)
def construct(self,
input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
sens=None):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
# alloc status and clear should be right before gradoperation
init = self.alloc_status()
self.clear_before_grad(init)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
self.cast(scaling_sens,
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads)
if self.enable_global_norm:
grads = ClipByGlobalNorm()(grads)
else:
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
self.get_status(init)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond, scaling_sens)
return F.depend(ret, succ)
cast = P.Cast()
update_accu_grads = C.MultitypeFuncGraph("update_accu_grads")
@update_accu_grads.register("Tensor", "Tensor")
def _update_accu_grads(accu_grad, grad):
succ = True
return F.depend(succ, F.assign_add(accu_grad, cast(grad, mstype.float32)))
zeroslike = P.ZerosLike()
reset_accu_grads = C.MultitypeFuncGraph("reset_accu_grads")
@reset_accu_grads.register("Tensor")
def _reset_accu_grads(accu_grad):
succ = True
return F.depend(succ, F.assign(accu_grad, zeroslike(accu_grad)))
class BertTrainAccumulateStepsWithLossScaleCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph. To mimic higher batch size, gradients are
accumulated N times before weight update.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
scale_update_cell (Cell): Cell to do the loss scale. Default: None.
accumulation_steps (int): Number of accumulation steps before gradient update. The global batch size =
batch_size * accumulation_steps. Default: 1.
"""
def __init__(self, network, optimizer, scale_update_cell=None, accumulation_steps=1, enable_global_norm=False):
super(BertTrainAccumulateStepsWithLossScaleCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.accumulation_steps = accumulation_steps
self.enable_global_norm = enable_global_norm
self.one = Tensor(np.array([1]).astype(np.int32))
self.zero = Tensor(np.array([0]).astype(np.int32))
self.local_step = Parameter(initializer(0, [1], mstype.int32), name="local_step")
self.accu_grads = self.weights.clone(prefix="accu_grads", init='zeros')
self.accu_overflow = Parameter(initializer(0, [1], mstype.int32), name="accu_overflow")
self.loss = Parameter(initializer(0, [1], mstype.float32), name="accu_loss")
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.reducer_flag = False
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.overflow_reducer = F.identity
if self.is_distributed:
self.overflow_reducer = P.AllReduce()
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_before_grad = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.logical_or = P.LogicalOr()
self.not_equal = P.NotEqual()
self.select = P.Select()
self.reshape = P.Reshape()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale")
@C.add_flags(has_effect=True)
def construct(self,
input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
sens=None):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
# update accumulation parameters
is_accu_step = self.not_equal(self.local_step, self.accumulation_steps)
self.local_step = self.select(is_accu_step, self.local_step + self.one, self.one)
self.loss = self.select(is_accu_step, self.loss + loss, loss)
mean_loss = self.loss / self.local_step
is_accu_step = self.not_equal(self.local_step, self.accumulation_steps)
# alloc status and clear should be right before gradoperation
init = self.alloc_status()
self.clear_before_grad(init)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
next_sentence_labels,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
self.cast(scaling_sens,
mstype.float32))
accu_succ = self.hyper_map(update_accu_grads, self.accu_grads, grads)
mean_loss = F.depend(mean_loss, accu_succ)
self.get_status(init)
flag_sum = self.reduce_sum(init, (0,))
overflow = self.less_equal(self.base, flag_sum)
overflow = self.logical_or(self.not_equal(self.accu_overflow, self.zero), overflow)
accu_overflow = self.select(overflow, self.one, self.zero)
self.accu_overflow = self.select(is_accu_step, accu_overflow, self.zero)
if is_accu_step:
succ = False
else:
# apply grad reducer on grads
grads = self.grad_reducer(self.accu_grads)
scaling = scaling_sens * self.degree * self.accumulation_steps
grads = self.hyper_map(F.partial(grad_scale, scaling), grads)
if self.enable_global_norm:
grads = ClipByGlobalNorm()(grad)
else:
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
accu_overflow = self.overflow_reducer(accu_overflow)
F.control_depend(grads, accu_overflow)
overflow = self.less_equal(self.base, accu_overflow)
accu_succ = self.hyper_map(reset_accu_grads, self.accu_grads)
overflow = F.depend(overflow, accu_succ)
overflow = self.reshape(overflow, (()))
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, overflow)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (mean_loss, overflow, scaling_sens)
return F.depend(ret, succ)
|
# Parse config files
configfile: "config.json"
# Parse the required files and parameter
# Parse the required files
GENOME = config["resources"]["reference"]
REF_INDEX = config["resources"]["index_ref"]
BAMDIR = config["resources"]["bam"]
# Parse the tools
GRAPHTYPER = config["tools"]["Graphtyper"]
SAMTOOLS = config["tools"]["Samtools"]
VT = config["tools"]["VT"]
VCFFILTER = config["tools"]["vcffilter"]
BEAGLE = config["tools"]["Beagle"]
BGZIP = config["tools"]["BGZIP"]
TABIX = config["tools"]["tabix"]
# Parse the chunk size and padding
slc = config["run"]["slice"]
padding = config["run"]["padding"]
CALLOPTS = config["run"]["opts"]
chr_list = list(range(1, 30))
samples, = glob_wildcards(BAMDIR + "/{sample}_recalibrated.bam")
rule all:
input:
expand("vcf_raw_beagle/vcf_raw_beagle_{chromosome}.vcf.gz", chromosome=chr_list),
expand("vcf_filtered_beagle/vcf_filtered_beagle_{chromosome}.vcf.gz", chromosome=chr_list)
### Get the padded region for given job id
def getstartpos(wildcards):
js = int(wildcards.job_id)
if js == 1:
start = 1
else:
start = ((js-1)*slc) - padding
return start
def getstoppos(wildcards):
js = int(wildcards.job_id)
with open(REF_INDEX,"r") as bg:
chr_length=int(bg.readlines()[int(wildcards.chromosome)-1].strip().split('\t')[1])
if js*slice >= chr_length:
stop = chr_length
else:
stop = (js*slc)+padding
return stop
# This rule to split the BAM given region
rule split_bam:
input:
"bam/{sample}_recalibrated.bam"
output:
"bam_small/{chromosome}/{job_id}/{sample}_{chromosome}_{job_id}.bam"
params:
chr = "{chromosome}",
start = getstartpos,
stop = getstoppos,
samtools = SAMTOOLS
shell:
"""
{params.samtools} view -bh -o {output} -O z {input} chr{params.chr}:{params.start}-{params.stop}
"""
rule get_bam_list:
input:
expand("bam_small/{{chromosome}}/{{job_id}}/{sample}_{{chromosome}}_{{job_id}}.bam", sample=samples)
output:
"bam_small/{chromosome}/{job_id}/{chromosome}_{job_id}.list"
params:
chr = "{chromosome}",
jobid = "{job_id}"
shell:
"""
ls {input} > {output}
"""
rule discovery_linear:
input:
rules.get_bam_list.output
output:
vcf = "iteration/{chromosome}/{job_id}/D1/{chromosome}_{job_id}_D1.vcf.gz",
vcf_index = "iteration/{chromosome}/{job_id}/D1/{chromosome}_{job_id}_D1.vcf.gz.tbi"
params:
graphtyper = GRAPHTYPER,
vt = VT,
tabix = TABIX,
bgzip = BGZIP,
dir_jobid = "iteration/{chromosome}/{job_id}/D1",
chr = "{chromosome}",
jobid = "{job_id}",
genome = GENOME,
start = getstartpos,
stop = getstoppos,
opt = CALLOPTS
shell:
"""
{params.graphtyper} construct {params.dir_jobid}/graph {params.genome} chr{params.chr}:{params.start}-{params.stop}
{params.graphtyper} index {params.dir_jobid}/graph
{params.graphtyper} call {params.opt} {params.dir_jobid}/graph "." \
--output {params.dir_jobid} \
--sams {input}
rm -f {params.dir_jobid}/graph && rm -rf {params.dir_jobid}/graph_gti
{params.vt} sort -o {params.dir_jobid}/new_region_sorted.vcf.gz {params.dir_jobid}/*_variants.vcf.gz
{params.vt} uniq -o {params.dir_jobid}/new.vcf {params.dir_jobid}/new_region_sorted.vcf.gz
cat {params.dir_jobid}/new.vcf | {params.bgzip} -c > {output[0]}
{params.tabix} {output[0]}
"""
rule discovery_graph:
input:
rules.get_bam_list.output,
rules.discovery_linear.output.vcf
output:
vcf = "iteration/{chromosome}/{job_id}/D2/{chromosome}_{job_id}_D2.vcf.gz",
vcf_index = "iteration/{chromosome}/{job_id}/D2/{chromosome}_{job_id}_D2.vcf.gz.tbi"
params:
graphtyper = GRAPHTYPER,
vt = VT,
tabix = TABIX,
bgzip= BGZIP,
dir_jobid = "iteration/{chromosome}/{job_id}/D2",
chr = "{chromosome}",
genome = GENOME,
start = getstartpos,
stop = getstoppos,
opt = CALLOPTS
shell:
"""
{params.graphtyper} construct {params.dir_jobid}/graph {params.genome} --vcf={input[1]} chr{params.chr}:{params.start}-{params.stop}
{params.graphtyper} index {params.dir_jobid}/graph
{params.graphtyper} call {params.opt} {params.dir_jobid}/graph "." \
--output {params.dir_jobid} \
--sams {input[0]}
ls {params.dir_jobid}/*.hap > {params.dir_jobid}/haps2
{params.graphtyper} haplotypes {params.dir_jobid}/graph \
--haplotypes {params.dir_jobid}/graph \
--output {params.dir_jobid}/haps.vcf.gz
rm --force {params.dir_jobid}/graph && rm -r --force {params.dir_jobid}/graph_gti
vt cat {params.dir_jobid}/haps.vcf.gz {params.dir_jobid}/*_variants.vcf.gz |
vt sort -o {params.dir_jobid}/new_region_sorted2.vcf.gz -
vt uniq -o {params.dir_jobid}/new2.vcf {params.dir_jobid}/new_region_sorted2.vcf.gz
cat {params.dir_jobid}/new2.vcf | {params.bgzip} -c > {output.vcf}
tabix {output.vcf}
"""
rule cleaning_graph:
input:
rules.get_bam_list.output,
rules.discovery_graph.output.vcf
output:
vcf = "iteration/{chromosome}/{job_id}/G1/{chromosome}_{job_id}_haps.vcf.gz",
vcf_index = "iteration/{chromosome}/{job_id}/G1/{chromosome}_{job_id}_haps.vcf.gz.tbi"
params:
graphtyper = GRAPHTYPER,
vt = VT,
tabix = TABIX,
dir_jobid = "iteration/{chromosome}/{job_id}/G1",
chr = "{chromosome}",
jobid = "{job_id}",
genome = GENOME,
start = getstartpos,
stop = getstoppos,
opt = CALLOPTS
shell:
"""
{params.graphtyper} construct {params.dir_jobid}/graph {params.genome} --vcf={input[1]} chr{params.chr}:{params.start}-{params.stop}
{params.graphtyper} index {params.dir_jobid}/graph
{params.graphtyper} call {params.opt} {params.dir_jobid}/graph "." \
--no_new_variants \
--output {params.dir_jobid} \
--sams input[0] \
ls {params.dir_jobid}/*.hap > {params.dir_jobid}/haps3
{params.graphtyper} haplotypes {params.dir_jobid}/graph \
--haplotypes {params.dir_jobid}/haps3 \
--output {output.vcf} \
--skip_breaking_down_extracted_haplotypes
tabix {output.vcf}
rm -f {params.dir_jobid}/graph && rm -rf {params.dir_jobid}/graph_gti/
"""
rule genotyping_variants:
input:
rules.get_bam_list.output,
rules.cleaning_graph.output.vcf
output:
vcf = "iteration/{chromosome}/{job_id}/G2/{chromosome}_{job_id}_haps.vcf.gz",
vcf_index = "iteration/{chromosome}/{job_id}/G2/{chromosome}_{job_id}_haps.vcf.gz.tbi"
params:
graphtyper = GRAPHTYPER,
vt = VT,
tabix = TABIX,
bgzip = BGZIP,
dir_jobid = "iteration/{chromosome}/{job_id}/G2",
chr = "{chromosome}",
jobid = "{job_id}",
genome = GENOME,
start = getstartpos,
stop = getstoppos,
opt = CALLOPTS
shell:
"""
{params.graphtyper} construct {params.dir_jobid}/graph {params.genome} --vcf={input[1]} chr{params.chr}:{params.start}-{params.stop}
{params.graphtyper} index $GRAPH
{params.graphtyper} call {params.opt} {params.dir_jobid}/graph "." \
--no_new_variants \
--output {output.vcf} \
--sams {input[0]}
{params.graphtyper} vcf_break_down \
{params.dir_jobid}/graph {params.dir_jobid}/*_calls.vcf.gz |
bgzip -c > {output.vcf} && tabix {output.vcf}
"""
#Get the padded region for given job id
def get_nopad_start(wildcards):
js = int(wildcards.job_id)
if js == 1:
start_nopad = 1
else:
start_nopad = ((js - 1) * slc) - 1
return start_nopad
def get_nopad_stop(wildcards):
js = int(wildcards.job_id)
with open(REF_INDEX,"r") as bg:
chr_length=int(bg.readlines()[int(wildcards.chromosome)-1].strip().split('\t')[1])
if js*slice >= chr_length:
stop_nopad = chr_length
else:
stop_nopad = (js * slc) - 2
return stop_nopad
rule repadding_variants:
input:
rules.genotyping_variants.output.vcf
output:
vcf = "iteration/{chromosome}/{job_id}/repad/{chromosome}_{job_id}_repad.vcf.gz",
vcf_index = "iteration/{chromosome}/{job_id}/repad/{chromosome}_{job_id}_repad.vcf.gz.tbi"
params:
chr = "{chromosome}",
start_nopad = get_nopad_start,
stop_nopad = get_nopad_stop,
tabix = TABIX,
bgzip = BGZIP
shell:
"""
{params.tabix} {input} -h chr{params.chr}:{params.start_nopad}-{params.stop_nopad} |
{params.bgzip} -c > {output.vcf}
{params.tabix} {output.vcf}
"""
def get_all_input_given_chromosome(wildcards):
list_file = []
start = 1
count = 0
with open(REF_INDEX,"r") as bg:
chr_length=int(bg.readlines()[int(wildcards.chromosome)-1].strip().split('\t')[1])
while start < chr_length:
stop = start + slice
job_id = count + 1
count += 1
if stop >= chr_length:
stop = chr_length
list_file.append("iteration/"+wildcards.chromosome+"/"+str(job_id)+"/repad/"+wildcards.chromosome+"_"+str(job_id)+"_repad.vcf.gz")
start = stop
return(list_file)
rule combine_chromosome_level_variants:
input:
get_all_input_given_chromosome
output:
vcf = "vcf/vcf_united_{chromosome}.vcf.gz",
vcf_index = "vcf/vcf_united_{chromosome}.vcf.gz.tbi"
params:
vt = VT,
tabix = TABIX,
bgzip = BGZIP,
genome = GENOME
shell:
"""
{params.vt} cat {input} |
{params.vt} sort - |
{params.vt} uniq - |
{params.vt} normalize - |
{params.bgzip} > {output.vcf}
{params.tabix} {output.vcf}
"""
rule filtering_variants:
input:
rules.combine_chromosome_level_variants.output
output:
vcf = "vcf_filtered/vcf_filtered_{chromosome}.vcf.gz",
vcf_index = "vcf_filtered/vcf_filtered_{chromosome}.vcf.gz.tbi"
params:
vcffilter = VCFFILTER,
bgzip = BGZIP,
tabix = TABIX
shell:
"""
{params.vcffilter} \
-f "ABHet < 0.0 | ABHet > 0.33" -f "ABHom < 0.0 | ABHom > 0.97" -f "MaxAASR > 0.4" -f "MQ > 30" \
-F "graphtyper_filter" \
-t "pass" {input} |
{params.bgzip} > {output.vcf}
{params.tabix} {output.vcf}
"""
rule raw_beagle_imputation:
input:
rules.combine_chromosome_level_variants.output.vcf
output:
vcf = "vcf_raw_beagle/vcf_raw_beagle_{chromosome}.vcf.gz",
vcf_index = "vcf_raw_beagle/vcf_raw_beagle_{chromosome}.vcf.gz.tbi"
params:
chr = "{chromosome}",
beagle = BEAGLE
shell:
"""
java -jar {params.beagle} \
gl={input} \
out=vcf_raw_beagle/vcf_raw_beagle_{params.chr}
"""
rule remove_filtered:
input:
rules.filtering_variants.output
output:
vcf = "vcf_filtered_removed/vcf_filtered_removed_{chromosome}.vcf.gz",
vcf_index = "vcf_filtered_removed/vcf_filtered_removed_{chromosome}.vcf.gz.tbi"
params:
vt = VT,
bgzip = BGZIP,
tabix = TABIX
shell:
"""
{params.vt} -f "filter.pass" {input} |
{params.bgzip} > {output.vcf}
{params.tabix} {output.vcf}
"""
rule filtered_beagle_imputation:
input:
rules.remove_filtered.output.vcf
output:
vcf = "vcf_filtered_beagle/vcf_filtered_beagle_{chromosome}.vcf.gz",
vcf_index = "vcf_filtered_beagle/vcf_filtered_beagle_{chromosome}.vcf.gz.tbi"
params:
chr = "{chromosome}",
beagle = BEAGLE
shell:
"""
java -jar {params.beagle} \
gl={input} \
out=vcf_filtered_beagle/vcf_filtered_beagle_{params.chr}
"""
|
import task
import deit
import deit_models
import torch
import fairseq
import os
from fairseq import utils
from fairseq_cli import generate
from PIL import Image
import torchvision.transforms as transforms
from data_aug import build_data_aug
def init(model_path, beam=5):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[model_path],
arg_overrides={
"beam": beam,
"task": "text_recognition",
"fp16": False
})
device = "cuda" if torch.cuda.is_available() else "cpu"
model[0].to(device)
img_transform = build_data_aug(size=(384, 384), mode='valid', preprocess_datasets=None)
generator = task.build_generator(
model, cfg.generation, extra_gen_cls_kwargs={'lm_model': None, 'lm_weight': None}
)
bpe = task.build_bpe(cfg.bpe)
return model, cfg, task, generator, bpe, img_transform, device
def preprocess(img_path, img_transform, device):
im = Image.open(img_path).convert('RGB').resize((384, 384))
im = img_transform(im).unsqueeze(0).to(device).float()
sample = {
'net_input': {"imgs": im},
}
return sample
def get_text(cfg, generator, model, sample, bpe, prefix_tokens=None, bos_token=None):
decoder_output = generator.generate(model, sample, prefix_tokens=prefix_tokens,
constraints=None, bos_token=bos_token)
decoder_output = decoder_output[0][0] #top1
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=decoder_output["tokens"].int().cpu(),
src_str="",
alignment=decoder_output["alignment"],
align_dict=None,
tgt_dict=model[0].decoder.dictionary,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=generate.get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = bpe.decode(hypo_str)
return detok_hypo_str, decoder_output['score']
if __name__ == '__main__':
model_path = 'path/to/model'
jpg_path = "path/to/pic"
beam = 5
model, cfg, task, generator, bpe, img_transform, device = init(model_path, beam)
sample = preprocess(jpg_path, img_transform, device)
text = get_text(cfg, generator, model, sample, bpe)
print(text)
print('done')
|
import numpy as np
import argparse
import os
import glob
from paths import GPU_DATASETS
def np_flat_map(src, func):
return np.concatenate([func(s) for s in src], 0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--run_name", type=str, required=True)
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--total_splits", type=int, required=True)
args = parser.parse_args()
X_mask = os.path.join(GPU_DATASETS, args.dataset,
"X_gan_100_%s_split_?_out_of_%i_setA.npy" % (args.run_name, args.total_splits))
Y_mask = os.path.join(GPU_DATASETS, args.dataset,
"Y_gan_100_%s_split_?_out_of_%i_setA.npy" % (args.run_name, args.total_splits))
x_list = sorted(glob.glob(X_mask))
y_list = sorted(glob.glob(Y_mask))
x = np_flat_map(x_list, np.load)
x = x.astype(np.uint8)
y = np_flat_map(y_list, np.load)
np.save(os.path.join(GPU_DATASETS, args.dataset, "X_gan_100_%s_%i_splits" % (args.run_name, args.total_splits)), x)
np.save(os.path.join(GPU_DATASETS, args.dataset, "Y_gan_100_%s_%i_splits" % (args.run_name, args.total_splits,)), y)
|
#!/usr/bin/python
import psycopg2
import sys
import os
DB = {
'DATABASE_SERVICE_NAME': os.getenv('DATABASE_SERVICE_NAME', None),
'DATABASE_USER': os.getenv('DATABASE_USER', None),
'DATABASE_PASSWORD': os.getenv('DATABASE_PASSWORD', None),
'DATABASE_NAME': os.getenv('DATABASE_NAME', None)
}
try:
conn_string = "host='{database_service_name}' dbname='{database_name}' user='{database_user}' password='{database_password}'".format(
database_service_name=DB['DATABASE_SERVICE_NAME'],
database_name=DB['DATABASE_NAME'],
database_user=DB['DATABASE_USER'],
database_password=DB['DATABASE_PASSWORD']
)
conn=psycopg2.connect(conn_string)
conn.close()
print('OK - Database connection checking passed')
except Exception as error:
print('CRITICAL - Database connection checking failed')
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict
from ....utils import lazy_import, Timer
from ...backend import BaseActorBackend, register_backend
from ..context import MarsActorContext
from .driver import RayActorDriver
from .pool import RayMainPool
from .utils import process_address_to_placement, get_placement_group
ray = lazy_import("ray")
__all__ = ["RayActorBackend"]
logger = logging.getLogger(__name__)
@register_backend
class RayActorBackend(BaseActorBackend):
@staticmethod
def name():
return "ray"
@staticmethod
def get_context_cls():
return MarsActorContext
@staticmethod
def get_driver_cls():
return RayActorDriver
@classmethod
async def _create_ray_pools(cls, address: str, n_process: int = None, **kwargs):
# pop `n_io_process` from kwargs as ray doesn't need this
kwargs.pop("n_io_process", 0)
pg_name, bundle_index, _ = process_address_to_placement(address)
from .pool import RayMainActorPool
pool_addresses = RayMainActorPool.get_external_addresses(address, n_process)
assert pool_addresses[0] == address
pg = get_placement_group(pg_name) if pg_name else None
num_cpus = kwargs.get("main_pool_cpus", 0)
sub_pools = {
sub_pool_address: RayMainActorPool.create_sub_pool(
address, sub_pool_address
)
for sub_pool_address in pool_addresses[1:]
}
actor_handle = (
ray.remote(RayMainPool)
.options(
num_cpus=num_cpus,
name=address,
max_concurrency=10000000, # By default, 1000 tasks can be running concurrently.
max_restarts=-1, # Auto restarts by ray
placement_group=pg,
placement_group_bundle_index=bundle_index,
placement_group_capture_child_tasks=False,
)
.remote(address, n_process, sub_pools, **kwargs)
)
pool_handle = RayPoolHandle(actor_handle, sub_pools)
return pool_handle
@classmethod
async def create_actor_pool(cls, address: str, n_process: int = None, **kwargs):
with Timer() as timer:
pool_handle = await cls._create_ray_pools(address, n_process, **kwargs)
logger.info(
"Submit create actor pool %s took %s seconds.",
pool_handle.main_pool,
timer.duration,
)
with Timer() as timer:
await pool_handle.main_pool.start.remote()
logger.info(
"Start actor pool %s took %s seconds.",
pool_handle.main_pool,
timer.duration,
)
return pool_handle
class RayPoolHandle:
def __init__(
self,
main_pool: "ray.actor.ActorHandle",
sub_pools: Dict[str, "ray.actor.ActorHandle"],
):
self.main_pool = main_pool
# Hold sub_pool actor handles to avoid gc.
self.sub_pools = sub_pools
def __getattr__(self, item):
if item in ("main_pool", "sub_pools"): # pragma: no cover
return object.__getattribute__(self, item)
return getattr(self.main_pool, item)
|
from recipe_scrapers.whatsgabycooking import WhatsGabyCooking
from tests import ScraperTest
class TestWhatsGabyCookingScraper(ScraperTest):
scraper_class = WhatsGabyCooking
def test_host(self):
self.assertEqual("whatsgabycooking.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://whatsgabycooking.com/vegetarian-quinoa-bake/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Vegetarian Quinoa Bake")
def test_total_time(self):
self.assertEqual(45, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("6 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://cdn.whatsgabycooking.com/wp-content/uploads/2017/10/WGC-Quinoa-Bake-copy-2.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertCountEqual(
[
"1 ½ cups uncooked multi-colored quinoa",
"2 cups shredded colby jack cheese divided",
"1 cup shredded mozzarella cheese divided",
"1 cup canned black beans rinsed and drained",
"1 cup frozen charred corn trader joes",
"1 4.5-ounce can chopped green chiles",
"1 ½ cup Gaby's chipotle or tomatillo salsa",
"Kosher salt and pepper to taste",
"Finely chopped scallions and cilantro as garnish",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"Preheat the oven to 375 degrees F.\nCook the quinoa according to the package directions. Once cooked, remove from heat and transfer the cooked quinoa into a large bowl.\nFold in 1 1/2 cups of the shredded colby jack cheese, ½ cup of the shredded mozzarella, black beans, corn, green chiles and salsa. Season the entire mixture with salt and pepper and stir to combine.\nLightly spray a medium sized skillet with non-stick spray, and transfer the mixture into the skillet. Top with the remaining shredded cheeses and bake for about 20-25 minutes until the top layer of cheese is bubbly and melted.\nRemove the baking dish from the oven and garnish with green onions and cilantro and serve.",
self.harvester_class.instructions(),
)
|
import sys
import os
import pandas as pd
import numpy as np
import nltk
import re
from sklearn import linear_model
from nltk.tokenize import word_tokenize
import time
import random
nltk.download('punkt')
# data_path = "C:\\Users\\darwi\\OneDrive - " \
# "The University of Texas at Dallas\\Acads\\Machine Learning\\Assignments\\MachineLearning\\Data"
cwd = os.getcwd()
def read(file_path):
with open(file_path, encoding='cp437') as file:
text = file.read()
return text
def bag_words(text_data, bag):
clean_text = nltk.sent_tokenize(text_data)
for i in range(len(clean_text)):
clean_text[i] = re.sub(r'\d', ' ', clean_text[i]) # Matches digits and replaces with blank space
clean_text[i] = re.sub(r'\W', ' ', clean_text[i]) # Matches non-word and replaces with blank space
clean_text[i] = re.sub(r'\s+', ' ', clean_text[i]) # Matches white-space and replaces with blank space
clean_text[i] = clean_text[i].lower() # Converts text to lower-case
for sentence in clean_text:
words = nltk.word_tokenize(sentence)
for word in words:
if word not in bag.keys():
bag[word] = 1
return bag
def sigmoid(x):
return 1/(1+np.exp(-1*x))
def sigmoid2(x):
return np.exp(-1 * x) / (1 + np.exp(-1 * x))
# data_path = "C:\\Users\\darwi\\OneDrive - " \
# "The University of Texas at Dallas\\Acads\\Machine Learning\\Assignments\\MachineLearning\\Data\\enron1"
data_path=sys.argv[1]
test_path_ham = data_path + os.path.sep + "test" + os.path.sep + "ham" + os.path.sep
test_path_spam = data_path + os.path.sep + "test" + os.path.sep + "spam" + os.path.sep
train_path_ham = data_path + os.path.sep + "train" + os.path.sep + "ham" + os.path.sep
train_path_spam = data_path + os.path.sep + "train" + os.path.sep + "spam" + os.path.sep
bag={}
for file in os.listdir(train_path_ham):
bag= bag_words(read(train_path_ham + file),bag)
# bag_spam = {}
for file in os.listdir(train_path_spam):
bag = bag_words(read(train_path_spam + file), bag)
count_features = bag.__len__()
hamFiles_count = os.listdir(train_path_ham).__len__()
spamFiles_count = os.listdir(train_path_spam).__len__()
data_X = np.zeros((hamFiles_count+spamFiles_count,count_features+1))
data_X[0:hamFiles_count,-1]=1
data_X[hamFiles_count:,-1]=0
data_y = np.ones((hamFiles_count+spamFiles_count,1))
data_y[hamFiles_count:,0]=0
z= os.listdir(test_path_ham)
baggedIndex={}
index=0
index_file=0
for file in os.listdir(train_path_ham):
words = bag_words(read(train_path_ham + file),{})
for word in words:
if word not in baggedIndex:
baggedIndex[word]=index
data_X[index_file][index]=words[word]
index +=1
else:
data_X[index_file][baggedIndex[word]]=words[word]
index_file +=1
for file in os.listdir(train_path_spam):
words = bag_words(read(train_path_spam + file),{})
for word in words:
if word not in baggedIndex:
baggedIndex[word]=index
data_X[index_file][index]=words[word]
index +=1
else:
data_X[index_file][baggedIndex[word]]=words[word]
index_file +=1
# ----------------------------- Splitting Data : 70-30 Ratio------------------------- #
np.random.shuffle(data_X)
splitValue= int((hamFiles_count+spamFiles_count)*0.7)
train_X,valid_X = data_X[:splitValue,:-1], data_X[splitValue:,:-1]
train_y,valid_y = data_X[:splitValue,-1], data_X[splitValue:,-1]
# default penalty --> L2 regularisation , tol-> (epsilon) for which the iterations stops before max iterations reached
SGDClf = linear_model.SGDClassifier(max_iter = 1000, tol=1e-3)
SGDClf.fit(train_X, train_y)
pred=SGDClf.predict(valid_X)
count=0
for x in range(len(valid_y)):
if valid_y[x]==pred[x]:
count+=1
print("--------------------------validation Results--------------------------")
print("Accuracy : ",count/len(valid_y))
# ------------------------ Read Test Data set-----------------------------#
testHam_files_count=os.listdir(test_path_ham).__len__()
testSpam_files_count=os.listdir(test_path_spam).__len__()
test_ham=np.zeros((testHam_files_count,count_features))
test_spam=np.zeros((testSpam_files_count,count_features))
# ----------------------------------Predict test ham--------------------------------------------#
index_file=0
for file in os.listdir(test_path_ham):
words = bag_words(read(test_path_ham + file), {})
for word in words:
if word in baggedIndex:
test_ham[index_file][baggedIndex[word]] = words[word]
index_file += 1
pred_ham = SGDClf.predict(test_ham)
count1=0
for x in range(len(test_ham)):
if pred_ham[x]==1:
count1+=1
print("\n--------------------------Test Dataset--------------------------")
print("--------------------------ham is ham --------------------------")
print("Accuracy : ",count1/testHam_files_count)
# ----------------------------------Predict test spam--------------------------------------------#
index_file=0
for file in os.listdir(test_path_spam):
words = bag_words(read(test_path_spam + file), {})
for word in words:
if word in baggedIndex:
test_spam[index_file][baggedIndex[word]] = words[word]
index_file += 1
pred_spam = SGDClf.predict(test_spam)
count2=0
for x in range(len(test_spam)):
if pred_spam[x]==0:
count2+=1
print("--------------------------spam is spam --------------------------")
print("Accuracy : ",count2/testSpam_files_count)
tp = count1
tn = count2
fp = testHam_files_count - count1
fn = testSpam_files_count - count2
acc=(tp+tn)/(tp+tn+fp+fn)
precision=(tp)/(tp+fp)
recall = tp/(tp+fn)
f1_score = 2*(recall * precision) / (recall + precision)
print("\n Total Accuracy on test files : ",acc)
print(" precision : ",precision)
print(" Recall : ",recall)
print(" F1_score : ",f1_score)
file_name="resultsSGDBernoulli_"+data_path.split(os.path.sep)[-1]+".txt"
with open(file_name,'w') as file:
text = "Trained with shuffled 70-30 Data split into training & validation Data\n\n"
text = text + "--------------Validation Results------------------" + "\n\n"
text = text + "validation Accuracy : " + repr(count/len(valid_y)) + "\n\n\n"
text = text + "--------------Results Test Data------------------"+"\n"
text = text + "\n Accuracy on test files : "+ str(acc) + "\n"
text = text + " precision : " + str(precision) + "\n"
text = text + " Recall : " + str(recall) + "\n"
text = text + " F1_score : " + str(f1_score) + "\n"
file.write(text)
|
"""
-------Simple Logistic Regression to Classify the Iris Flowers-----------
Read the database and assign the variables to a dataframe by using pandas.
Plot the variables to visualize the relation between each other.
According to the plots, on the first sight, setosa has very different
values, so it will be easier to classify but virginica and versicolor
variables are entangled so they will be harder to classify.
X is the 2d list for independent variables and independent variables are
sepal length, sepal width, petal length and petal width for this dataset.
It is necessary to standardize the dataset because there might be metric
differences between the independent variables.
Y values are our classes, in this case the flower types, which has a logit
equation with a sigmoid function. The value that Y gets is a probability so
it is between 0 and 1.
p(X) = e^(B0 + B1*x1 + B2*x2 + B3*x3 + B4*x4) / (1 + e^(B0 + B1*x1 + B2*x2
+ B3*x3 + B4*x4))
where Bi values are the MLE's of each independent variable. According to
MLE's, 3 different probabilities obtained for 3 different classes. Data
will be labeled with the highest probability level.
The dataset contains 150 values that is why I couldn't split it into
test and train data. I used the whole dataset to train the model and used
the whole dataset to test the model.
With accuracy.py compare the original dataset with the predicted dataset.
Find the accuracy level and print the defective predictions.
"""
from sklearn.linear_model import LogisticRegression
import pandas as pd
import preprocess
import accuracy
import plots
def main():
iris = pd.read_csv("iris.csv")
plots.plot(iris)
flower_mapping = {'Setosa':0, 'Versicolor':1, 'Virginica':2}
iris["variety"] = iris["variety"].map(flower_mapping)
X = iris[['sepal.length', 'sepal.width', 'petal.length',
'petal.width']].values
y = iris[['variety']].values.ravel()
X = preprocess.standardize(X)
model = LogisticRegression()
model.fit(X, y)
predicted = model.predict(X)
print(accuracy.accuracy(predicted, y))
main()
|
# -*- coding: utf-8 -*-
"""
@Author : Invoker Bot
@Email : invoker-bot@outlook.com
@Site :
@Data : 2021/3/25
@Version : 1.0
"""
import random
import collections
from typing import *
from ..basic import *
from .gtp import *
__all__ = ["GTPRandomBot"]
class GTPRandomBot(GTPClient):
name = "random_bot"
__version__ = "1.0"
def __init__(self):
super().__init__()
self.board = GoBoard()
def _do_play(self, color: GoPlayer, pos: GoPoint) -> bool:
try:
self.board.play(pos, color)
return True
except GoIllegalActionError:
return False
def valid_points(self, color: GoPlayer) -> List[GoPoint]:
return [pos for pos in self.board if
self.board.is_valid_point(pos, color) and not self.board.is_point_a_true_eye(pos, color)]
def _do_genmove(self, color: GoPlayer) -> Union[GoPoint, str]:
counts = collections.Counter(self.board.grid.flat)
komi = self.komi if color == GoPlayer.white else -self.komi
if counts[GoPlayer.none.value] + counts[color.value] + komi <= counts[color.other.value]:
return "resign"
points = self.valid_points(color)
if len(points) == 0:
return "pass"
else:
point = random.choice(points)
self.board.play(point, color)
return point
def _do_boardsize(self, size: int) -> bool:
self.board = GoBoard(size)
return True
def _do_clear_board(self) -> NoReturn:
self.board = GoBoard(self.board.grid.shape[0])
|
# -*- coding: utf-8 -*-
import numpy as np
import pprint
from envs.GridWorld import GridworldEnv
import matplotlib.pyplot as pl
pp = pprint.PrettyPrinter(indent=2)
env = GridworldEnv(shape=[4,4])
def policy_eval(policy, env, discount_factor=1.0, theta=0.00001):
"""
Evaluate a policy given an environment and a full description of the environment's dynamics.
Args:
policy: [S, A] shaped matrix representing the policy.
env: OpenAI env. env.P represents the transition probabilities of the environment.
env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
env.nS is a number of states in the environment.
env.nA is a number of actions in the environment.
theta: We stop evaluation once our value function change is less than theta for all states.
discount_factor: Gamma discount factor.
Returns:
Vector of length env.nS representing the value function.
"""
# Start with a random (all 0) value function
V = np.zeros(env.nS)
while True:
delta = 0
# For each state, perform a "full backup"
for s in range(env.nS):
v = 0
# Look at the possible next actions
for a, action_prob in enumerate(policy[s]):
# For each action, look at the possible next states...
for prob, next_state, reward, done in env.P[s][a]:
# Calculate the expected value
v += action_prob * prob * (reward + discount_factor * V[next_state])
# How much our value function changed (across any states)
delta = max(delta, np.abs(v - V[s]))
V[s] = v
# Stop evaluating once our value function change is below a threshold
if delta < theta:
break
return np.array(V)
def policy_improvement(env, policy_eval_fn=policy_eval, discount_factor=1.0):
"""
Policy Improvement Algorithm. Iteratively evaluates and improves a policy
until an optimal policy is found.
Args:
env: The OpenAI envrionment.
policy_eval_fn: Policy Evaluation function that takes 3 arguments:
policy, env, discount_factor.
discount_factor: gamma discount factor.
Returns:
A tuple (policy, V).
policy is the optimal policy, a matrix of shape [S, A] where each state s
contains a valid probability distribution over actions.
V is the value function for the optimal policy.
"""
# Start with a random policy
policy = np.ones([env.nS, env.nA]) / env.nA
k=0
historical=[]
errors=[]
while True:
policyStable=True
error=0
V=policy_eval_fn(policy,env)
historical+=[np.copy(V)]
for s in range(env.nS):
b=np.argmax(policy[s])
policy[s]=computeTerm(env,discount_factor,V,s)
#print(policy[s])
actionSelected=np.argmax(policy[s])
if (b!=actionSelected):
policyStable=False
error+=1
policy[s] = np.eye(env.nA)[actionSelected]
k+=1
print('iteration ',k, 'error {}'.format(error))
errors+=[error]
if policyStable==True:
return (policy, V,historical,errors)
def computeTerm(env,discount_factor,V,s):
output=np.zeros([env.nA])
for a in range(env.nA):
temp=0
for sp in range(len(env.P[s][a])):
transition=env.P[s][a][sp]
temp+=transition[0]*(transition[2]+discount_factor*V[transition[1]])
output[a]=temp
return(output)
policy, v, historical,errors = policy_improvement(env)
print("Policy Probability Distribution:")
print(policy)
print("")
print("Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):")
print(np.reshape(np.argmax(policy, axis=1), env.shape))
print("")
print("Value Function:")
print(v)
print("")
print("Reshaped Grid Value Function:")
print(v.reshape(env.shape))
print("")
# Test the value function
expected_v = np.array([ 0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0])
np.testing.assert_array_almost_equal(v, expected_v, decimal=2)
fig=pl.figure(figsize=(12,5))
for i in range(len(historical)):
ax=fig.add_subplot('14%d'%(i+1))
ax.imshow(np.reshape(historical[i],env.shape))
pl.title('iteration {}'.format(i))
ax=fig.add_subplot('144')
ax.imshow(np.reshape(expected_v,env.shape))
pl.title('expected value')
pl.savefig('PolicyIteration.png')
pl.legend()
pl.show()
fig=pl.figure()
pl.plot(errors)
pl.xlabel('iteration')
pl.ylabel('number of changes')
pl.title('number of changes done on the action selection')
pl.legend()
fig.savefig('errorPolicyIteration.png')
pl.show()
|
#!/usr/bin/python
from SciServer import Authentication, LoginPortal, Config, CasJobs, SkyServer, SkyQuery, SciDrive, Files, Jobs
import unittest2 as unittest
import os;
import pandas;
import sys;
import json;
from io import StringIO
from io import BytesIO
import skimage
# Define login Name and password before running the tests:
Authentication_loginName = '***';
Authentication_loginPassword = '***'
Authentication_login_sharedWithName = '***'
Authentication_login_sharedWithPassword = '***'
#skyserver
SkyServer_TestQuery = "select top 1 specobjid, ra, dec from specobj order by specobjid"
SkyServer_DataRelease = "DR13"
SkyServer_QueryResultCSV = "specobjid,ra,dec\n299489677444933632,146.71421,-1.0413043\n"
SkyServer_RadialSearchResultCSV = 'objid,run,rerun,camcol,field,obj,type,ra,dec,u,g,r,i,z,Err_u,Err_g,Err_r,Err_i,Err_z\n1237671939804561654,6162,301,3,133,246,3,258.250804,64.051445,23.339820,22.319400,21.411050,21.119710,20.842770,0.664019,0.116986,0.076410,0.080523,0.238198\n'
SkyServer_RectangularSearchResultCSV = 'objid,run,rerun,camcol,field,obj,type,ra,dec,u,g,r,i,z,Err_u,Err_g,Err_r,Err_i,Err_z\n1237671939804628290,6162,301,3,134,1346,6,258.304721,64.006203,25.000800,24.500570,22.485400,21.103450,20.149990,0.995208,0.565456,0.166184,0.071836,0.124986\n'
SkyServer_ObjectSearchResultObjID = 1237671939804561654
SkyQuery_TestTableName = "TestTable_SciScript_R"
SkyQuery_TestTableCSV = u"Column1,Column2\n4.5,5.5\n"
SkyQuery_TestTableCSVdownloaded = "#ID,Column1,Column2\n1,4.5,5.5\n"
SkyQuery_Query = "select 4.5 as Column1, 5.5 as Column2"
CasJobs_TestTableName1 = "MyNewtable1"
CasJobs_TestTableName2 = "MyNewtable2"
CasJobs_TestDatabase = "MyDB"
CasJobs_TestQuery = "select 4 as Column1, 5 as Column2 "
CasJobs_TestTableCSV = u"Column1,Column2\n4,5\n"
CasJobs_TestFitsFile = "SciScriptTestFile.fits"
CasJobs_TestCSVFile = "SciScriptTestFile.csv"
SciDrive_Directory = "/SciScriptPython"
SciDrive_FileName = "TestFile.csv"
SciDrive_FileContent = "Column1,Column2\n4.5,5.5\n"
Files_FileServiceName = "FileServiceJHU"
Files_RootVolumeName1 = "Storage"
Files_UserVolumeName1 = "UserVolume555"
Files_RootVolumeName2 = "Storage"
Files_UserVolumeName2 = "UserVolume999"
Files_NewDirectoryName1 = "myNewDirectory555"
Files_NewDirectoryName2 = "myNewDirectory999"
Files_LocalFileName = "MyNewFile.txt"
Files_LocalFileContent = "#ID,Column1,Column2\n1,4.5,5.5"
Jobs_DockerComputeDomainName = 'Small Jobs Domain'
Jobs_FileServiceName = "FileServiceJHU"
Jobs_RootVolumeName = "Storage"
Jobs_UserVolumeName = "JobsTestVolume"
Jobs_DirectoryName = "JobsTestDirectory"
Jobs_NotebookName = 'TestNotebook.ipynb'
Jobs_NoteBookOutPutFile = 'HelloWorld.txt'
Jobs_ShellCommand = 'pwd'
Jobs_DockerImageName = 'Python + R'
Jobs_UserVolumes = [{'name':Jobs_UserVolumeName, 'rootVolumeName':Jobs_RootVolumeName, 'owner':Authentication_loginName, 'needsWriteAccess':True},
{'name':'scratch', 'rootVolumeName':'Temporary', 'owner':Authentication_loginName, 'needsWriteAccess':True}]
Jobs_DataVolumes = [{'name':'SDSS DAS'}]
Jobs_Parameters = 'param1=1\nparam2=2\nparam3=3'
Jobs_Alias = 'MyNewJob'
Jobs_SqlQuery = 'select 1;'
Jobs_SqlQueryResult = 'column1\n1\n'
Jobs_RDBComputeDomainName = 'Manga (long)'
Jobs_DatabaseContextName = "manga"
Jobs_RemoteNotebookPath='/home/idies/workspace/' + Jobs_UserVolumeName + '/' + Jobs_DirectoryName + '/' + Jobs_NotebookName
Jobs_QueryResultsFile = 'myQueryResults'
class TestAuthentication(unittest.TestCase):
def setUp(self):
pass
# *******************************************************************************************************
# Authentication section
def test_Authentication_allMethods(self):
newToken1 = "myToken1"
newToken2 = "myToken2"
token1 = Authentication.login(Authentication_loginName, Authentication_loginPassword);
token2 = Authentication.getToken()
token3 = Authentication.getKeystoneToken()
token4 = Authentication.token.value
user = Authentication.getKeystoneUserWithToken(token1)
iden = Authentication.identArgIdentifier()
self.assertEqual(iden, "--ident=")
self.assertNotEqual(token1, "")
self.assertIsNot(token1, None)
self.assertEqual(token1, token2)
self.assertEqual(token1, token3)
self.assertEqual(token1, token4)
self.assertEqual(user.userName, Authentication_loginName)
self.assertIsNot(user.id, None)
self.assertNotEqual(user.id, "")
Authentication.setToken(newToken1)
self.assertEqual(newToken1, Authentication.getToken())
Authentication.setKeystoneToken(newToken2)
self.assertEqual(newToken2, Authentication.getKeystoneToken())
class TestLoginPortal(unittest.TestCase):
def setUp(self):
pass
# *******************************************************************************************************
# Authentication section
def test_LoginPortal_allMethods(self):
newToken1 = "myToken1"
newToken2 = "myToken2"
token1 = LoginPortal.login(Authentication_loginName, Authentication_loginPassword);
token2 = LoginPortal.getToken()
token3 = LoginPortal.getKeystoneToken()
user = LoginPortal.getKeystoneUserWithToken(token1)
iden = LoginPortal.identArgIdentifier()
self.assertEqual(iden, "--ident=")
self.assertNotEqual(token1, "")
self.assertIsNot(token1, None)
self.assertEqual(token1, token2)
self.assertEqual(token1, token3)
self.assertIsNot(user.userName, None)
self.assertNotEqual(user.userName, "")
self.assertIsNot(user.id, None)
self.assertNotEqual(user.id, "")
LoginPortal.setKeystoneToken(newToken1)
self.assertEqual(newToken1, LoginPortal.getKeystoneToken())
class TestCasJobs(unittest.TestCase):
token1 = Authentication.login(Authentication_loginName, Authentication_loginPassword);
def setUp(self):
pass
# *******************************************************************************************************
# CasJobs section:
def test_CasJobs_getSchemaName(self):
casJobsId = CasJobs.getSchemaName()
self.assertNotEqual(casJobsId,"")
def test_CasJobs_getTables(self):
tables = CasJobs.getTables(context="MyDB")
def test_CasJobs_executeQuery(self):
df = CasJobs.executeQuery(sql=CasJobs_TestQuery, context=CasJobs_TestDatabase, format="pandas")
self.assertEqual(CasJobs_TestTableCSV, df.to_csv(index=False))
def test_CasJobs_submitJob(self):
jobId = CasJobs.submitJob(sql=CasJobs_TestQuery + " into MyDB." + CasJobs_TestTableName1, context=CasJobs_TestDatabase)
jobDescription = CasJobs.waitForJob(jobId=jobId, verbose=True)
df = CasJobs.executeQuery(sql="DROP TABLE " + CasJobs_TestTableName1, context="MyDB", format="csv")
self.assertNotEqual(jobId, "")
def test_CasJobs_getJobStatus(self):
jobId = CasJobs.submitJob(sql=CasJobs_TestQuery, context=CasJobs_TestDatabase)
jobDescription = CasJobs.getJobStatus(jobId)
self.assertEqual(jobDescription["JobID"], jobId)
def test_CasJobs_cancelJob(self):
jobId = CasJobs.submitJob(sql=CasJobs_TestQuery, context=CasJobs_TestDatabase)
isCanceled = CasJobs.cancelJob(jobId=jobId)
self.assertEqual(isCanceled, True)
def test_CasJobs_waitForJob(self):
jobId = CasJobs.submitJob(sql=CasJobs_TestQuery, context=CasJobs_TestDatabase)
jobDescription = CasJobs.waitForJob(jobId=jobId, verbose=True)
self.assertGreaterEqual(jobDescription["Status"], 3)
def test_CasJobs_writeFitsFileFromQuery(self):
#CasJobs.getFitsFileFromQuery
try:
result = CasJobs.writeFitsFileFromQuery(fileName=CasJobs_TestFitsFile, queryString=CasJobs_TestQuery, context="MyDB")
self.assertEqual(result, True)
self.assertEqual(os.path.isfile(CasJobs_TestFitsFile), True)
finally:
try:
os.remove(CasJobs_TestFitsFile)
except:
pass;
def test_CasJobs_getPandasDataFrameFromQuery(self):
#CasJobs.getPandasDataFrameFromQuery
df = CasJobs.getPandasDataFrameFromQuery(queryString=CasJobs_TestQuery, context=CasJobs_TestDatabase)
self.assertEqual(df.to_csv(index=False), CasJobs_TestTableCSV)
def test_CasJobs_getNumpyArrayFromQuery(self):
#CasJobs.getNumpyArrayFromQuery
array = CasJobs.getNumpyArrayFromQuery(queryString=CasJobs_TestQuery, context=CasJobs_TestDatabase)
newArray = pandas.read_csv(StringIO(CasJobs_TestTableCSV), index_col=None).as_matrix()
self.assertEqual(array.all(), newArray.all())
def test_CasJobs_uploadPandasDataFrameToTable_uploadCSVDataToTable(self):
try:
df = pandas.read_csv(StringIO(CasJobs_TestTableCSV), index_col=None)
result = CasJobs.uploadPandasDataFrameToTable(dataFrame=df, tableName=CasJobs_TestTableName2, context="MyDB")
table = CasJobs.executeQuery(sql="select * from " + CasJobs_TestTableName2, context="MyDB", format="pandas")
result2 = CasJobs.executeQuery(sql="DROP TABLE " + CasJobs_TestTableName2, context="MyDB", format="csv")
self.assertEqual(result, True)
self.assertItemsEqual(table, df)
result = CasJobs.uploadCSVDataToTable(csvData=CasJobs_TestTableCSV, tableName=CasJobs_TestTableName2, context="MyDB")
df2 = CasJobs.executeQuery(sql="select * from " + CasJobs_TestTableName2, context="MyDB", format="pandas")
result2 = CasJobs.executeQuery(sql="DROP TABLE " + CasJobs_TestTableName2, context="MyDB", format="csv")
self.assertEqual(result, True)
self.assertItemsEqual(df, df2)
finally:
try:
csv = CasJobs.executeQuery(sql="DROP TABLE " + CasJobs_TestTableName2, context="MyDB",format="csv")
except:
pass;
class TestSkyServer(unittest.TestCase):
token1 = Authentication.login(Authentication_loginName, Authentication_loginPassword);
def setUp(self):
pass
# *******************************************************************************************************
# SkyServer section:
def test_SkyServer_sqlSearch(self):
#sql search
df = SkyServer.sqlSearch(sql=SkyServer_TestQuery, dataRelease=SkyServer_DataRelease)
self.assertEqual(SkyServer_QueryResultCSV, df.to_csv(index=False))
def test_SkyServer_getJpegImgCutout(self):
#image cutout
img = SkyServer.getJpegImgCutout(ra=197.614455642896, dec=18.438168853724, width=512, height=512, scale=0.4, dataRelease=SkyServer_DataRelease,opt="OG",query="SELECT TOP 100 p.objID, p.ra, p.dec, p.r FROM fGetObjFromRectEq(197.6,18.4,197.7,18.5) n, PhotoPrimary p WHERE n.objID=p.objID")
im = skimage.io.imread("./TestGalaxy.jpeg")
self.assertEqual(img.tobytes(), im.tobytes())
def test_SkyServer_radialSearch(self):
# radial search
df = SkyServer.radialSearch(ra=258.25, dec=64.05, radius=0.1, dataRelease=SkyServer_DataRelease)
self.maxDiff = None;
self.assertEqual(SkyServer_RadialSearchResultCSV, df.to_csv(index=False, float_format="%.6f"))
def test_SkyServer_rectangularSearch(self):
#rectangular search
df = SkyServer.rectangularSearch(min_ra=258.3, max_ra=258.31, min_dec=64,max_dec=64.01, dataRelease=SkyServer_DataRelease)
self.maxDiff = None;
self.assertEqual(SkyServer_RectangularSearchResultCSV, df.to_csv(index=False, float_format="%.6f"))
def test_SkyServer_objectSearch(self):
#object search
object = SkyServer.objectSearch(ra=258.25, dec=64.05, dataRelease=SkyServer_DataRelease)
self.maxDiff = None;
self.assertEqual(SkyServer_ObjectSearchResultObjID, object[0]["Rows"][0]["id"])
class TestSciDrive(unittest.TestCase):
token1 = Authentication.login(Authentication_loginName, Authentication_loginPassword);
def setUp(self):
pass
# *******************************************************************************************************
# SciDrive section:
def test_SciDrive_createContainer_directoryList_delete(self):
try:
responseDelete = SciDrive.delete(SciDrive_Directory)
except:
pass;
try:
responseCreate = SciDrive.createContainer(SciDrive_Directory)
self.assertEqual(responseCreate, True)
dirList = SciDrive.directoryList(SciDrive_Directory)
self.assertTrue(dirList["path"].__contains__(SciDrive_Directory));
finally:
responseDelete = SciDrive.delete(SciDrive_Directory)
self.assertEqual(responseDelete, True)
def test_SciDrive_publicUrl(self):
try:
responseDelete = SciDrive.delete(SciDrive_Directory)
except:
pass;
responseCreate = SciDrive.createContainer(SciDrive_Directory)
url = SciDrive.publicUrl(SciDrive_Directory)
responseDelete = SciDrive.delete(SciDrive_Directory)
isUrl = url.startswith("http")
self.assertEqual(responseCreate, True)
self.assertEqual(isUrl, True)
self.assertEqual(responseDelete, True)
def test_SciDrive_upload_download_delete(self):
try:
if (sys.version_info > (3, 0)): #python3
file = open(SciDrive_FileName, "w")
else: #python2
file = open(SciDrive_FileName, "wb")
file.write(SciDrive_FileContent)
file.close()
responseUpload = SciDrive.upload(path=SciDrive_Directory + "/" + SciDrive_FileName, localFilePath=SciDrive_FileName)
stringio = SciDrive.download(path=SciDrive_Directory + "/" + SciDrive_FileName, format="StringIO")
fileContent = stringio.read()
responseDelete = SciDrive.delete(SciDrive_Directory)
self.assertEqual(responseUpload["path"], SciDrive_Directory + "/" + SciDrive_FileName)
self.assertEqual(fileContent, SciDrive_FileContent)
self.assertEqual(responseDelete, True)
responseUpload = SciDrive.upload(path=SciDrive_Directory + "/" + SciDrive_FileName, data=SciDrive_FileContent)
fileContent = SciDrive.download(path=SciDrive_Directory + "/" + SciDrive_FileName, format="text")
responseDelete = SciDrive.delete(SciDrive_Directory)
self.assertEqual(responseUpload["path"], SciDrive_Directory + "/" + SciDrive_FileName)
self.assertEqual(fileContent, SciDrive_FileContent)
self.assertEqual(responseDelete, True)
finally:
try:
os.remove(SciDrive_FileName)
except:
pass;
class TestSkyQuery(unittest.TestCase):
token1 = Authentication.login(Authentication_loginName, Authentication_loginPassword);
def setUp(self):
pass
# *******************************************************************************************************
# SkyQuery section:
#-- submitting jobs:
def test_SkyQuery_listQueues(self):
queueList = SkyQuery.listQueues()
def test_SkyQuery_getQueueInfo(self):
queueInfo = SkyQuery.getQueueInfo('quick')
queueInfo = SkyQuery.getQueueInfo('long')
def test_SkyQuery_submitJob(self):
jobId = SkyQuery.submitJob(query=SkyQuery_Query, queue="quick")
self.assertNotEqual(jobId, "")
def test_SkyQuery_getJobStatus(self):
jobId = SkyQuery.submitJob(query=SkyQuery_Query, queue="quick")
jobDescription = SkyQuery.getJobStatus(jobId=jobId)
def test_SkyQuery_waitForJob(self):
jobId = SkyQuery.submitJob(query=SkyQuery_Query, queue="quick")
jobDescription = SkyQuery.waitForJob(jobId=jobId, verbose=True)
self.assertEqual(jobDescription["status"], "completed")
def test_SkyQuery_cancelJob(self):
isCanceled = SkyQuery.cancelJob(SkyQuery.submitJob(query=SkyQuery_Query, queue="long"))
self.assertEqual(isCanceled, True)
#-- uploading and downloading csv tables:
def test_SkyQuery_uploadTable_getTable_getTableInfo_listTableColumns_dropTable(self):
try:
result = SkyQuery.dropTable(tableName=SkyQuery_TestTableName, datasetName="MyDB");
except:
pass;
result = SkyQuery.uploadTable(uploadData=SkyQuery_TestTableCSV, tableName=SkyQuery_TestTableName, datasetName="MyDB", format="csv")
self.assertEqual(result, True)
table = SkyQuery.getTable(tableName=SkyQuery_TestTableName, datasetName="MyDB", top=10)
self.assertEqual(SkyQuery_TestTableCSVdownloaded, table.to_csv(index=False));
info = SkyQuery.getTableInfo(tableName="webuser." + SkyQuery_TestTableName, datasetName="MyDB")
columns = SkyQuery.listTableColumns(tableName="webuser." + SkyQuery_TestTableName, datasetName="MyDB")
result = SkyQuery.dropTable(tableName=SkyQuery_TestTableName, datasetName="MyDB");
self.assertEqual(result, True)
#-- getting database info
def test_SkyQuery_listJobs(self):
quickJobsList = SkyQuery.listJobs('quick')
longJobsList = SkyQuery.listJobs('long')
def test_SkyQuery_listAllDatasets(self):
datasets = SkyQuery.listAllDatasets()
def test_SkyQuery_getDatasetInfo(self):
info = SkyQuery.getDatasetInfo("MyDB")
def test_SkyQuery_listDatasetTables(self):
tables = SkyQuery.listDatasetTables("MyDB")
class TestFileService(unittest.TestCase):
token1 = Authentication.login(Authentication_loginName, Authentication_loginPassword);
def setUp(self):
pass
# *******************************************************************************************************
# Files section
def test_Files_getFileServices(self):
fileServices = Files.getFileServices();
self.assertTrue(fileServices.__len__() > 0)
def test_Files_getFileServicesNames(self):
fileServiceNames = Files.getFileServicesNames();
self.assertTrue(fileServiceNames.__len__() > 0)
def test_Files_getFileServicesNames(self):
fileServiceNames = Files.getFileServicesNames();
found = False;
for fileService in fileServiceNames:
if fileService.get('name') == Files_FileServiceName:
found=True;
self.assertTrue(found)
def test_Files_getFileServiceFromName(self):
fileService = Files.getFileServiceFromName(Files_FileServiceName);
self.assertTrue(fileService.get('name') == Files_FileServiceName);
def test_Files_getRootVolumesInfo(self):
fileService = Files.getFileServiceFromName(Files_FileServiceName);
rootVolumes = Files.getRootVolumesInfo(fileService)
self.assertTrue(rootVolumes.__len__() > 0)
found = False
for rootVolume in rootVolumes:
if rootVolume.get('rootVolumeName') == Files_RootVolumeName1:
found = True
self.assertTrue(found)
found = False
for rootVolume in rootVolumes:
if rootVolume.get('rootVolumeName') == Files_RootVolumeName2:
found = True
self.assertTrue(found)
def test_Files_getUserVolumesInfo(self):
fileService = Files.getFileServiceFromName(Files_FileServiceName);
userVolumesInfo = Files.getUserVolumesInfo(fileService)
self.assertTrue(userVolumesInfo.__len__() > 0)
def test_Files_createUserVolume_deleteUserVolume(self):
fileService = Files.getFileServiceFromName(Files_FileServiceName);
Files.createUserVolume(fileService,"/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1]),quiet=False)
Files.deleteUserVolume(fileService,"/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1]),quiet=False)
def test_Files_createDir_upload_dirList_download_download_shareUserVolume(self):
try:
fileService = Files.getFileServiceFromName(Files_FileServiceName);
os.remove(Files_LocalFileName);
Files.deleteUserVolume(fileService, Files_RootVolumeName1, Files_UserVolumeName1, quiet=True)
Files.deleteUserVolume(fileService, Files_RootVolumeName1, Files_UserVolumeName2, quiet=True)
except:
pass;
try:
fileService = Files.getFileServiceFromName(Files_FileServiceName);
Files.createUserVolume(fileService,"/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1]),quiet=False)
Files.createUserVolume(fileService,"/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName2]),quiet=False)
Files.createDir(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1, Files_NewDirectoryName1]));
Files.createDir(fileService, "/".join([Files_RootVolumeName2, Authentication_loginName, Files_UserVolumeName2, Files_NewDirectoryName2]));
dirList = Files.dirList(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1, Files_NewDirectoryName1]),level=2)
self.assertTrue(dirList.get('root').get('name') == Files_NewDirectoryName1)
Files.upload(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1, Files_NewDirectoryName1, Files_LocalFileName]),data=Files_LocalFileContent);
dirList = Files.dirList(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1, Files_NewDirectoryName1]) ,level=2)
self.assertTrue(dirList.get('root').get('files')[0].get('name') == Files_LocalFileName)
Files.download(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1,Files_NewDirectoryName1, Files_LocalFileName]),localFilePath=Files_LocalFileName);
with open(Files_LocalFileName, 'r') as myfile:
downloadedFileContent = myfile.read()
assert(downloadedFileContent == Files_LocalFileContent)
Files.delete(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1, Files_NewDirectoryName1, Files_LocalFileName]))
dirList = Files.dirList(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1, Files_NewDirectoryName1]),level=2)
self.assertIsNone(dirList.get('root').get('files'))
Files.upload(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1, Files_NewDirectoryName1, Files_LocalFileName]), localFilePath=Files_LocalFileName,quiet=False);
Files.move(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1, Files_NewDirectoryName1, Files_LocalFileName]), fileService, "/".join([Files_RootVolumeName2, Authentication_loginName, Files_UserVolumeName2, Files_NewDirectoryName2, Files_LocalFileName]));
Files.shareUserVolume(fileService, "/".join([Files_RootVolumeName2, Authentication_loginName, Files_UserVolumeName2]), sharedWith=Authentication_login_sharedWithName, type="USER",allowedActions=["read"])
token1 = Authentication.login(Authentication_login_sharedWithName, Authentication_login_sharedWithPassword);
string = Files.download(fileService, "/".join([Files_RootVolumeName2, Authentication_loginName, Files_UserVolumeName2, Files_NewDirectoryName2, Files_LocalFileName]), format="txt");
self.assertTrue(string, Files_LocalFileContent)
token1 = Authentication.login(Authentication_loginName, Authentication_loginPassword);
finally:
try:
os.remove(Files_LocalFileName);
Files.deleteUserVolume(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName1]),quiet=True)
Files.deleteUserVolume(fileService, "/".join([Files_RootVolumeName1, Authentication_loginName, Files_UserVolumeName2]),quiet=True)
except:
pass;
class TestJobs(unittest.TestCase):
token1 = Authentication.login(Authentication_loginName, Authentication_loginPassword);
def setUp(self):
pass
# *******************************************************************************************************
# Jobs section
# Docker Jobs ################################################################################################
def test_Jobs_getDockerComputeDomains(self):
dockerComputeDomains = Jobs.getDockerComputeDomains()
self.assertTrue(dockerComputeDomains.__len__() > 0)
found = False
for dockerComputeDomain in dockerComputeDomains:
if dockerComputeDomain.get('name') == Jobs_DockerComputeDomainName:
found = True
self.assertTrue(found)
def test_Jobs_getDockerComputeDomainsNames(self):
dockerComputeDomainsNames = Jobs.getDockerComputeDomainsNames()
self.assertTrue(Jobs_DockerComputeDomainName in dockerComputeDomainsNames)
def test_Jobs_getDockerComputeDomainFromName(self):
dockerComputeDomain = Jobs.getDockerComputeDomainFromName(Jobs_DockerComputeDomainName)
self.assertTrue(dockerComputeDomain.get('name') in Jobs_DockerComputeDomainName)
def test_Jobs_submitNotebookJob_cancel_waitForJob_getJobStatus_getJobDescription_submitShellCommandJob(self):
fileService = Files.getFileServiceFromName(Jobs_FileServiceName);
try:
Files.deleteUserVolume(fileService, Jobs_RootVolumeName, Jobs_UserVolumeName)
except:
pass
Files.createUserVolume(fileService, Jobs_RootVolumeName, Jobs_UserVolumeName)
Files.upload(fileService, Jobs_RootVolumeName, Jobs_UserVolumeName,
Jobs_DirectoryName + "/" + Jobs_NotebookName,
localFilePath=Jobs_NotebookName);
dockerComputeDomain = Jobs.getDockerComputeDomainFromName(Jobs_DockerComputeDomainName)
jobId_1 = Jobs.submitNotebookJob(
'/home/idies/workspace/' + Jobs_UserVolumeName + '/' + Jobs_DirectoryName + '/' + Jobs_NotebookName,
dockerComputeDomain,
Jobs_DockerImageName,
Jobs_UserVolumes, Jobs_DataVolumes,
Jobs_Parameters, Jobs_Alias)
Jobs.cancelJob(jobId_1)
jobStatus = Jobs.getJobStatus(jobId_1)
self.assertTrue(jobStatus.get('status') == 128)
jobId_2 = Jobs.submitNotebookJob(
Jobs_RemoteNotebookPath,
dockerComputeDomain,
Jobs_DockerImageName,
Jobs_UserVolumes, Jobs_DataVolumes,
Jobs_Parameters, Jobs_Alias)
jobStatus = Jobs.waitForJob(jobId_2)
self.assertTrue(jobStatus == Jobs.getJobStatus(jobId_2))
self.assertTrue(jobStatus.get('status') == 32)
job = Jobs.getJobDescription(jobId_2)
self.assertTrue(job.get('username') == Authentication_loginName)
self.assertTrue(job.get('dockerImageName') == Jobs_DockerImageName)
self.assertTrue(job.get('scriptURI') == Jobs_RemoteNotebookPath)
self.assertTrue(job.get('submitterDID') == Jobs_Alias)
jobDirectory = job.get('resultsFolderURI')
relativePath = jobDirectory.split('scratch/')[1] + Jobs_NoteBookOutPutFile;
string = Files.download(fileService, 'scratch', '',relativePath,format="txt", userVolumeOwner=Authentication_loginName);
string.rstrip("\n")
self.assertTrue(string, job.get('resultsFolderURI') )
jobs = Jobs.getJobsList(top=2)
found = False
for job in jobs:
if jobId_1 == job.get("id"):
found = True;
self.assertTrue(found)
found = False
for job in jobs:
if jobId_2 == job.get("id"):
found = True;
self.assertTrue(found)
jobId = Jobs.submitShellCommandJob(Jobs_ShellCommand,
dockerComputeDomain,
Jobs_DockerImageName,
Jobs_UserVolumes, Jobs_DataVolumes,
Jobs_Alias)
jobStatus = Jobs.waitForJob(jobId)
self.assertTrue(jobStatus == Jobs.getJobStatus(jobId))
self.assertTrue(jobStatus.get('status') == 32)
job = Jobs.getJobDescription(jobId)
self.assertTrue(job.get('username') == Authentication_loginName)
self.assertTrue(job.get('dockerImageName') == Jobs_DockerImageName)
self.assertTrue(job.get('command') == Jobs_ShellCommand)
self.assertTrue(job.get('submitterDID') == Jobs_Alias)
jobDirectory = job.get('resultsFolderURI')
relativePath = jobDirectory.split('scratch/')[1] + "command.txt";
string = Files.download(fileService, 'scratch', '',relativePath,format="txt", userVolumeOwner=Authentication_loginName);
string.rstrip("\n")
self.assertTrue(string, job.get('resultsFolderURI') )
Files.deleteUserVolume(fileService, Jobs_RootVolumeName, Jobs_UserVolumeName)
# RDB Jobs ################################################################################################
def test_Jobs_getRDBComputeDomains(self):
rdbComputeDomains = Jobs.getRDBComputeDomains()
self.assertTrue(rdbComputeDomains.__len__() > 0)
found = False
for rdbComputeDomain in rdbComputeDomains:
if rdbComputeDomain.get('name') == Jobs_RDBComputeDomainName:
found = True
self.assertTrue(found)
def test_Jobs_getRDBComputeDomainsNames(self):
rdbComputeDomainsNames = Jobs.getRDBComputeDomainsNames()
self.assertTrue(Jobs_RDBComputeDomainName in rdbComputeDomainsNames)
def test_Jobs_getRDBComputeDomainFromName(self):
rdbComputeDomain = Jobs.getRDBComputeDomainFromName(Jobs_RDBComputeDomainName)
self.assertTrue(rdbComputeDomain.get('name') in Jobs_RDBComputeDomainName)
def test_Jobs_submitRDBJob(self):
rdbComputeDomain = Jobs.getRDBComputeDomainFromName(Jobs_RDBComputeDomainName)
jobId = Jobs.submitRDBQueryJob(Jobs_SqlQuery, rdbComputeDomain, Jobs_DatabaseContextName, Jobs_QueryResultsFile,Jobs_Alias)
jobStatus = Jobs.waitForJob(jobId)
self.assertTrue(jobStatus == Jobs.getJobStatus(jobId))
self.assertTrue(jobStatus.get('status') == 32)
job = Jobs.getJobDescription(jobId)
self.assertTrue(job.get('username') == Authentication_loginName)
self.assertTrue(job.get('rdbDomainName') == Jobs_RDBComputeDomainName)
self.assertTrue(job.get('databaseContextName') == Jobs_DatabaseContextName)
self.assertTrue(job.get('inputSql') == Jobs_SqlQuery)
self.assertTrue(job.get('submitterDID') == Jobs_Alias)
fileService = Files.getFileServiceFromName(Jobs_FileServiceName);
jobDirectory = job.get('resultsFolderURI')
relativePath = jobDirectory.split('scratch/')[1] + Jobs_QueryResultsFile + '.csv';
string = Files.download(fileService, 'scratch', '',relativePath,format="txt", userVolumeOwner=Authentication_loginName);
string.rstrip("\n")
self.assertTrue(string, Jobs_SqlQueryResult)
def test_Jobs_getJobDirectory(self):
#TBD
pass;
if __name__ == '__main__':
#unittest.main()
unittest.TestLoader.sortTestMethodsUsing = lambda x, y: cmp(x,y);
testLoader = unittest.TestLoader()
testLoader.sortTestMethodsUsing = lambda x, y: 0;
suite = testLoader.loadTestsFromTestCase(TestAuthentication); unittest.TextTestRunner(verbosity=2).run(suite)
suite = testLoader.loadTestsFromTestCase(TestLoginPortal); unittest.TextTestRunner(verbosity=2).run(suite)
suite = testLoader.loadTestsFromTestCase(TestCasJobs); unittest.TextTestRunner(verbosity=2).run(suite)
suite = testLoader.loadTestsFromTestCase(TestSkyServer); unittest.TextTestRunner(verbosity=2).run(suite)
suite = testLoader.loadTestsFromTestCase(TestSciDrive); unittest.TextTestRunner(verbosity=2).run(suite)
suite = testLoader.loadTestsFromTestCase(TestSkyQuery); unittest.TextTestRunner(verbosity=2).run(suite)
suite = testLoader.loadTestsFromTestCase(TestFileService); unittest.TextTestRunner(verbosity=2).run(suite)
suite = testLoader.loadTestsFromTestCase(TestJobs); unittest.TextTestRunner(verbosity=2).run(suite)
|
from hypernet.apps import box
from hypernet.apps import fitRates
__all__ = [
"box",
"fitRates"
]
|
r,c,zr,zc=map(int,input().split());a=[];res=""
for i in range(r):
a.append(input())
for i in range(r*zr):
for j in range(c*zc):
res+=a[i//zr][j//zc]
res+='\n'
print(res,end="")
|
import json
import logging
import pickle
import re
import chardet
import faker
from urllib3.exceptions import InvalidHeader
_COOKIE_PARSER = re.compile(r"([\w\-\d]+)\((.*)\)")
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
_HOST_EXTRACT = h = re.compile(r'https?://(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}[:\d]{1,6}|[\w\d.\-]?[\w\d.\-]+)/?')
_DECODE_PARSER = re.compile(r"([\w\d\-]+)=([\w\d\-@.+]*)")
class JSON:
__slots__ = [
"__d",
"__current"
]
def __init__(self, t):
self.__d = t
self.__current = None
"""
{
"name": "admin",
"list": ["1","2","3"],
"dict": {
"age":18,
"phone":"123456789",
"li":[1,2,3,4]
},
}
JSON(xxx).Item("name").Value() -> admin
JSON(xxx).Array("list").Index(0).Value() -> 1
JSON(xxx).Items("dict").Item("age").Value() -> 18
JSON(xxx).Items("dict").Array("li").Index(0) -> 1
"""
def item(self, data):
if self.__current:
self.__current = self.__current[data]
else:
self.__current = self.__d[data]
return self
def restart(self):
self.__current = None
return self
def array(self, key):
if self.__current:
self.__current = self.__current[key]
else:
self.__current = self.__d[key]
return self
def index(self, index):
if isinstance(self.__current, list):
self.__current = self.__current[index]
return self
raise TypeError("except list but given {} ".format(self.__current.__class__))
@property
def value(self):
data = self.__current
if self.__current:
self.restart()
return data
return self.__d
def items(self, key):
if self.__current:
self.__current = self.__current[key]
else:
self.__current = self.__d[key]
return self
@staticmethod
def Except(case, except_value, fn, real_value):
return fn(case, except_value, real_value)
def __str__(self):
return "{}".format(self.__d)
class RandomUserAgentMixin(object):
__slots__ = ()
def random(self):
f = faker.Faker()
yield f.user_agent()
def enable_random_ua(self):
setattr(self, "ua", True)
return self
def close_random_ua(self):
if hasattr(self, "ua"):
delattr(self, "ua")
return self
class RequestMixin(object):
__slots__ = ()
def ready(self, method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
if hasattr(self, "random") and hasattr(self, "ua") and self.ua:
if headers:
headers.update({"User-Agent": next(self.random())})
else:
headers = {"User-Agent": next(self.random())}
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
self.prepare_hooks(hooks)
return self
@staticmethod
def first_word_upper(key):
first_char = key[:1]
return first_char.upper() + key[1:]
def __parser(self, item: str) -> tuple:
try:
key, value = _COOKIE_PARSER.findall(item)[0]
except IndexError:
raise SyntaxError("Header语法错误")
if "_" in key:
words = key.split("_")
all_words = [self.first_word_upper(w) for w in words]
key = "-".join(all_words)
return key, value
def parser(self, line: str):
"""
Line的格式
字段名(字段值)
如果有多个字段以|分割
例如
Cookie(PHPSESSID=ototlqt0uuhr2ejhrnlfqv6fsq)|Accept(text/html,application/xhtml+xml,*/*;q=0.8)
转换以后:
{
'Cookie': 'PHPSESSID=ototlqt0uuhr2ejhrnlfqv6fsq',
'UserAgent': 'xxx', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
:param line:
"""
if "|" in line:
for item in line.split("|"):
header = self.__parser(item)
check_header_validity(header)
k, v = header
self.headers[k] = v
else:
self.headers.update(self.__parser(line))
class ResponseMixin:
__slots__ = ()
def get_type(self):
"""
获取响应内容的类型
:return:
"""
types = self.headers["Content-Type"]
if types == "":
raise TypeError("Content-Type没有数据")
return types.split(";")[0]
@property
def charset(self):
"""
返回响应结果的编码类型
:return:
"""
return chardet.detect(self.content)["encoding"]
def regex(self, pattern: str, index=0, trim=False):
"""
使用正则表达式提取响应体中的内容
:param trim:
:param pattern:
:param index:
:return:
"""
data = self.text
if trim:
data = data.replace("\n", "")
try:
return re.findall(pattern, data)[index]
except IndexError:
logging.error("{},内容没有提取到".format(pattern))
def extract(self, fn):
"""
执行响应体提取函数
:param fn:
:return:
"""
return fn(self.text)
@property
def jsonify(self):
"""
将响应结果转为Json对象
该JSON对象可以提取内容,具体查看JSON类
:return:
"""
if self.content:
if self.get_type() == "application/json":
return JSON(json.loads(self.content, encoding=self.charset))
try:
return JSON(json.loads(self.content, encoding=self.charset))
except json.JSONDecodeError:
raise ValueError("返回内容不是json")
return JSON({})
def get_host(self):
"""
获取响应主机名
:return:
"""
return _HOST_EXTRACT.findall(self.url)[0]
def dump(self):
"""
保存Response对象
:return:
"""
with open(self.get_host() + ".kpl", "wb") as f:
pickle.dump(self, f)
def dump_text_context(self):
"""
将文本响应结果保存为文件形式
文件名为主机名
:return:
"""
with open(self.get_host(), "w") as f:
f.write(self.text)
def dump_binary_context(self):
"""
将二进制响应结果保存为文件形式
文件名为主机名
:return:
"""
with open(self.get_host(), "wb") as f:
f.write(self.content)
def check_header_validity(header):
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Value for header {%s: %s} must be of type str or "
"bytes, not %s" % (name, value, type(value)))
def dict2text(data: dict):
# dict -> admin=123&xx=222
return "&".join(["{0}={1}".format(k, v) for k, v in data.items()])
def text2dict(data):
# admin=123&xx=222 -> dict
return {p[0]: p[1] for p in _DECODE_PARSER.findall(data)}
def WithContext(cls):
__enter__ = cls.__enter__
__exit__ = cls.__exit__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
logging.error("Excetions:\ntype:{0}\n\tvalue:{1}\n\ttrack back:{2}\n".format(exc_type, exc_val, exc_tb))
if hasattr(self, "close"):
self.close()
cls.__enter__ = __enter__
cls.__exit__ = __exit__
return cls
class NoEnableCacheRequest(Exception):
pass
|
from space_wrappers.classify import *
from gym import Space
from gym.spaces import *
import numpy as np
import pytest
class UnknownSpace(Space):
pass
# is_discrete
def test_is_discrete():
assert is_discrete(Discrete(10))
assert is_discrete(MultiDiscrete([4, 5]))
assert is_discrete(MultiBinary(5))
assert is_discrete(Tuple((Discrete(5), Discrete(4))))
assert not is_discrete(Box(np.zeros(2), np.ones(2), dtype=np.float32))
with pytest.raises(TypeError):
is_discrete(5)
with pytest.raises(NotImplementedError):
is_discrete(UnknownSpace())
def test_is_compound():
assert not is_compound(Discrete(10))
assert is_compound(MultiDiscrete([4, 5]))
assert is_compound(MultiBinary(5))
assert is_compound(Tuple((Discrete(5), Discrete(4))))
assert is_compound(Box(np.zeros(2), np.ones(2), dtype=np.float32))
assert not is_compound(Box(np.zeros(1), np.ones(1), dtype=np.float32))
with pytest.raises(TypeError):
is_compound(5)
with pytest.raises(NotImplementedError):
is_compound(UnknownSpace())
def test_is_flat():
assert is_flat(Discrete(10))
assert not is_flat(MultiDiscrete([4, 5]))
assert not is_flat(MultiBinary(5))
assert not is_flat(Tuple((Discrete(5), Discrete(4))))
assert is_flat(Box(np.zeros(2), np.ones(2), dtype=np.float32))
assert not is_flat(Box(np.zeros((2, 3)), np.ones((2, 3)), dtype=np.float32))
with pytest.raises(TypeError):
is_flat(5)
with pytest.raises(NotImplementedError):
is_flat(UnknownSpace())
def test_num_discrete_actions():
with pytest.raises(TypeError):
num_discrete_actions(Box(np.zeros(2), np.ones(2), dtype=np.float32))
assert num_discrete_actions(Discrete(10)) == (10,)
assert num_discrete_actions(MultiDiscrete([5, 6])) == (5, 6)
assert num_discrete_actions(MultiBinary(3)) == (2, 2, 2)
with pytest.raises(NotImplementedError):
num_discrete_actions(UnknownSpace())
|
from itertools import groupby
import psycopg2
import smtplib
import helper_functions
from helper_functions import cleanDirPath
import subprocess
import argparse
import os
import sys
import zipfile
import xml.etree.ElementTree as ET
import xml
import sqlparse
from tableau_xml import TableauDatasource as TDS
from tableau_xml import TableauWorkbook as TWB
import csv
import vertica_python
import settings
parser = argparse.ArgumentParser(description='Daniel Seisun Archive old reports script')
parser.add_argument('--postgres_user', action="store", dest="postgres_user",required=True, help="Postgres Username")
parser.add_argument('--postgres_pass', action="store", dest="postgres_pass", required=True, help="Postgres Password")
parser.add_argument('--archive_path', action="store", dest="archive_path", required=True, help="Where to store the workbook files")
parser.add_argument('--vert_user', action='store', dest='vert_user', required=False, help='Username to database to store workbook connection info')
parser.add_argument('--vert_pass', action='store', dest='vert_pass', required=False, help='Password to database to store workbook connection info')
args = parser.parse_args()
########################################
#Pulling down twb, twbx, tds, and tdsx files
########################################
conn = psycopg2.connect(host=settings.TABLEAU_HOST, port=8060,database="workgroup", user=args.postgres_user, password=args.postgres_pass)
print 'Pulling down twb and twbx files from server...'
twbx_workbooks_query = open('./wb_and_ds_contentID.sql', 'r').read()
curs = conn.cursor()
curs.execute(twbx_workbooks_query)
workbook_query_records = helper_functions.list_dict([col.name for col in curs.description], curs.fetchall())
def tabExport(record):
if record['sourcetype'] == 'Datasource':
fileExt = 'tdsxm'
fileSearch = 'tds'
elif record['sourcetype'] == 'Workbook':
fileExt = 'twbxm'
fileSearch = 'twb'
lobj = psycopg2.extensions.lobject(conn, record['content'], 'r')
file_path = '%s/%s.%s' % (args.archive_path, record['repository_url'], fileExt)
lobj.export(file_path)
if zipfile.is_zipfile(file_path):
file_zip = zipfile.ZipFile(file_path)
for files in file_zip.namelist():
if files.find('.%s' % fileSearch) > 0:
file_zip.extract(files, path=args.archive_path)
os.remove(file_path)
else:
os.rename(file_path, file_path[:-2])
for record in workbook_query_records:
tabExport(record)
########################################
#Loading files into vertica
########################################
print 'Parsing files and inserting to vertica...'
wb_table_name = "test.DSEISUN_tableau_wb_datasources"
tds_table_name = "test.DSEISUN_tableau_server_datasources"
vert_connection = vertica_python.connect({
'host': '127.0.0.1',
'port': 5436,
'user': args.vert_user,
'password': args.vert_pass,
'database': 'analytics_raw'
})
twb_files = (x for x in os.listdir(args.archive_path) if x[-3:] == 'twb')
tds_files = (x for x in os.listdir(args.archive_path) if x[-3:] == 'tds')
all_files = {'workbook': twb_files, 'tableau_datasource': tds_files}
vert_curs = vert_connection.cursor()
for twb_file in twb_files:
print twb_file
twb = TWB(open(cleanDirPath(args.archive_path) + twb_file, 'r'))
for wb_con in twb.datasources:
if twb.find_datasource_type(wb_con) <> 'param':
conn_info = twb.get_wb_datasource(wb_con)
row = (twb.wb_name, conn_info['ds_name'], conn_info['class'], conn_info['server'], conn_info['dbname'], conn_info['relation_datasource'].encode('ascii', errors='backslashreplace'))
vert_curs.execute("insert into %s VALUES (:0|, :1|, :2|, :3|, :4|, :5|)" % wb_table_name, {'0|':row[0], '1|':row[1], '2|':row[2], '3|':row[3], '4|':row[4], '5|':row[5]})
for tds_file in tds_files:
print tds_file
tds = TDS(open(cleanDirPath(args.archive_path) + tds_file, 'r'))
conn_info = tds.datasource_info
row = (conn_info['ds_name'], conn_info['class'], conn_info['server'], conn_info['dbname'], conn_info['relation_datasource'].encode('ascii', errors='backslashreplace'))
vert_curs.execute("insert into %s VALUES (:0|, :1|, :2|, :3|, :4|)" % tds_table_name, {'0|':row[0], '1|':row[1], '2|':row[2], '3|':row[3], '4|':row[4]})
print "Committing Files"
vert_curs.execute('commit;')
print "Complete!"
|
from app import db
print(f"running create_db")
db.create_all()
|
if node.os != 'ubuntu' and node.os != 'raspbian':
raise Exception('{} {} is not supported by this bundle'.format(node.os, node.os_version))
files = {}
pkg_apt = {
'sudo': {},
}
files['/etc/sudoers'] = {
'source': 'sudoers',
'content_type': 'text',
'mode': '0440',
'owner': 'root',
'group': 'root',
'context': {},
}
files['/etc/sudoers.d/users'] = {
'source': 'users',
'content_type': 'mako',
'mode': '0440',
'owner': 'root',
'group': 'root',
'context': {
'users': dict((k, v) for k, v in node.metadata['users'].items() if v.get('enabled', False) and v.get('sudo', False)),
},
}
|
# -------------------------------------------------------------------------------------
# Libraries
import logging
import re
import xarray as xr
import pandas as pd
from src.hyde.algorithm.settings.satellite.gsmap.lib_gsmap_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read data for gsmap
def read_data_gsmap(file_name, var_name=None,
tag_coord_time='time', tag_coord_geo_x='lon', tag_coord_geo_y='lat',
tag_dim_time='time', tag_dim_geo_x='lon', tag_dim_geo_y='lat'):
# Starting info
log_stream.info(' --> Open file ' + file_name + ' ... ')
if var_name is None:
log_stream.error(' ===> Variable name is undefined!')
raise IOError(' ===> Variable name is a mandatory argument!')
else:
if not isinstance(var_name, list):
var_name = [var_name]
# Open datasets
dst = xr.open_dataset(file_name)
# Get variables ALL and DATA
var_list_all = list(dst.variables)
var_list_data = list(dst.data_vars)
# Get time, geo x and geo y
log_stream.info(' ---> Get time, geo_x and geo_y data ... ')
if tag_coord_time in var_list_all:
da_time = dst[tag_coord_time]
else:
log_stream.error(' ===> Time dimension name is not in the variables list of grib file')
raise IOError(' ===> Check the time dimension!')
if tag_coord_geo_x in var_list_all:
da_geo_x_tmp = dst[tag_coord_geo_x]
if tag_dim_time in da_geo_x_tmp.dims:
da_geo_x = da_geo_x_tmp.squeeze(tag_dim_time)
else:
da_geo_x = da_geo_x_tmp
else:
log_stream.error(' ===> GeoX dimension name is not in the variables list of grib file')
raise IOError(' ===> Check the GeoX dimension!')
if tag_coord_geo_y in var_list_all:
da_geo_y_tmp = dst[tag_coord_geo_y]
if tag_dim_time in da_geo_y_tmp.dims:
da_geo_y = da_geo_y_tmp.squeeze(tag_dim_time)
else:
da_geo_y = da_geo_y_tmp
else:
log_stream.error(' ===> GeoY dimension name is not in the variables list of grib file')
raise IOError(' ===> Check the GeoY dimension!')
log_stream.info(' ---> Get time, geo_x and geo_y data ... DONE')
var_list_select = []
for var_step in var_name:
if var_step in var_list_data:
var_list_select.append(var_step)
else:
log_stream.warning(' ===> Variable name ' + var_step + ' is not available in the datasets')
time_period = []
for time_step in da_time.values:
time_period.append(time_step)
datetime_idx = pd.to_datetime(time_period, format='%Y-%m-%dT%H:%M:%S')
datetime_idx = datetime_idx.round('H')
log_stream.info(' ---> Get time, geo_x and geo_y data ... DONE')
# Get data
da_var = []
for var_list_step in var_list_select:
log_stream.info(' ---> Get ' + var_list_step + ' data ... ')
da_step = dst[var_list_step]
da_step.coords[tag_coord_time] = datetime_idx
da_var.append(da_step)
log_stream.info(' ---> Get ' + var_list_step + ' data ... DONE')
# Ending info
log_stream.info(' --> Open file ' + file_name + ' ... DONE')
# Start Debug
# mat = da_values[0].values
# plt.figure()
# plt.imshow(mat[0,:,:])
# plt.colorbar()
# plt.show()
# End Debug
return da_var, da_time, da_geo_x, da_geo_y
# -------------------------------------------------------------------------------------
|
import random
import os
import discord
from discord.ext import commands
from validators.user import is_registered, is_unregistered
from utils.commands import confirm
class Management(commands.Cog):
@is_unregistered()
@commands.command()
async def register(self, ctx):
await ctx.bot.user_repository.create(ctx.author.id)
await ctx.send(f'<@{ctx.author.id}> succesfully registered!')
@is_registered()
@commands.command()
async def unregister(self, ctx):
confirm_emoji = '💀'
confirm_msg = (
'Are you sure? You will lose all your progress. '
f'React with a \{confirm_emoji} to confirm.'
)
confirmation = await confirm(ctx, confirm_msg, confirm_emoji)
if confirmation:
await ctx.bot.user_repository.delete(ctx.message.author.id)
await ctx.send(f'<@{ctx.author.id}> succesfully unregistered.')
|
def fun():
for x in range(6):
for y in [-1, 4, 2, 1, 7]:
if (x > y):
yield x*2+y
gencomp = (x*2+y for x in range(6) for y in [-1, 4, 2, 1, 7] if x > y)
___assertEqual(list(fun()), list(gencomp))
def filgen(f, it):
if (f == None):
for x in it:
if (x):
yield x
else:
for x in it:
if (f(x)):
yield x
f = lambda c: 'a' <= c <= 'z'
it = 'Hello World'
___assertEqual(list(filter(f, it)), list(filgen(f, it)))
___assertEqual([1, 'hello', [3], 9], list(filgen(None, [1, 'hello', [], [3], '', None, 9, 0])))
___assertEqual([1, 9, 2], list(filgen(lambda x: x > 0, [1, -3, 9, 0, 2])))
|
from nba_api.stats.endpoints._base import Endpoint
from nba_api.stats.library.http import NBAStatsHTTP
from nba_api.stats.library.parameters import GameDate, LeagueID
class VideoStatus(Endpoint):
endpoint = 'videostatus'
expected_data = {'VideoStatus': ['GAME_ID', 'GAME_DATE', 'VISITOR_TEAM_ID', 'VISITOR_TEAM_CITY', 'VISITOR_TEAM_NAME', 'VISITOR_TEAM_ABBREVIATION', 'HOME_TEAM_ID', 'HOME_TEAM_CITY', 'HOME_TEAM_NAME', 'HOME_TEAM_ABBREVIATION', 'GAME_STATUS', 'GAME_STATUS_TEXT', 'IS_AVAILABLE', 'PT_XYZ_AVAILABLE']}
def __init__(self,
game_date=GameDate.default,
league_id=LeagueID.default):
self.nba_response = NBAStatsHTTP().send_api_request(
endpoint=self.endpoint,
parameters={
'GameDate': game_date,
'LeagueID': league_id
},
)
data_sets = self.nba_response.get_data_sets()
self.data_sets = [Endpoint.DataSet(data=data_set) for data_set_name, data_set in data_sets.items()]
self.video_status = Endpoint.DataSet(data=data_sets['VideoStatus'])
|
"""
Manage symbols can be used in BUILD files.
"""
__build_rules = {}
def register_variable(name, value):
"""Register a variable that accessiable in BUILD file """
__build_rules[name] = value
def register_function(f):
"""Register a function as a build function that callable in BUILD file """
register_variable(f.__name__, f)
def get_all():
"""Get the globals dict"""
return __build_rules
|
from setuptools import setup, find_packages
with open("requirements.txt") as f:
requirements = f.read().splitlines()
setup(
name='dsmodule',
packages=find_packages(),
install_requires=requirements,
include_package_data=True,
description='Template Python package for Data Science projects.',
version='0.1',
url='https://github.com/gcastella/py_ds_template',
author='Gerard Castellà Canals',
author_email='gcastella.91@gmail.com',
keywords=['python', 'template', 'repository', 'data', 'data-science']
)
|
from indicators.rsi import *
def test_rsi():
assert(True is True)
|
# --- Day 5: How About a Nice Game of Chess? ---
import hashlib
def solve1(door_id):
code = []
i = 0
while len(code) < 8:
s = (door_id + str(i)).encode('utf-8')
hash = hashlib.md5(s).hexdigest()
i += 1
if hash[:5] == '00000':
code.append(hash[5])
return ''.join(code)
print(solve1(door_id='ojvtpuvg'))
def solve2(door_id):
code = ['-'] * 8
i = 0
while '-' in code:
s = (door_id + str(i)).encode('utf-8')
hash = hashlib.md5(s).hexdigest()
i += 1
if (hash[:5] == '00000'):
index = int(hash[5], base=16)
if index <= 7 and (code[index] == '-'):
code[index] = hash[6]
return ''.join(code)
print(solve2(door_id='ojvtpuvg'))
|
import astrometry.net.appsecrets.auth as authsecrets
# The namespace for social authentication is horrible.
# This uses the Python Social Auth module,
# http://python-social-auth-docs.readthedocs.io/en/latest/configuration/settings.html
# which was derived from the django-social-auth module.
SOCIAL_INSTALLED_APPS = (
'social_django',
)
SOCIAL_AUTH_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.yahoo.YahooOAuth2',
'social_core.backends.github.GithubOAuth2',
'social_core.backends.flickr.FlickrOAuth',
'social_core.backends.evernote.EvernoteOAuth',
'social_core.backends.amazon.AmazonOAuth2',
#'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_TEMPLATE_CONTEXT_PROCESSORS = (
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
)
# SOCIAL_MIGRATION = {
# 'default': 'social.apps.django_app.default.south_migrations'
# }
# SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
# SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = authsecrets.google.key
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = authsecrets.google.secret
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['openid', 'email']
# dstn can't figure out how to get user's email addr from flickr. Argh!
SOCIAL_AUTH_FLICKR_KEY = authsecrets.flickr.key
SOCIAL_AUTH_FLICKR_SECRET = authsecrets.flickr.secret
SOCIAL_AUTH_FLICKR_SCOPE = ['openid', 'email']
### I was getting a Flickr SSL verification error...
SOCIAL_AUTH_FLICKR_VERIFY_SSL = False
github_secrets = authsecrets.githubs
# SOCIAL_AUTH_GITHUB_KEY = authsecrets.githubs[sitename].key
# SOCIAL_AUTH_GITHUB_SECRET = authsecrets.githubs[sitename].secret
# #SOCIAL_AUTH_GITHUB_SCOPE = ['openid', 'email']
SOCIAL_AUTH_GITHUB_SCOPE = ['user:email']
# dstn can't figure out how to get user's email addr from twitter. Argh!
# https://twittercommunity.com/t/how-to-get-email-from-twitter-user-using-oauthtokens/558/74
SOCIAL_AUTH_TWITTER_KEY = authsecrets.twitter.key
SOCIAL_AUTH_TWITTER_SECRET = authsecrets.twitter.secret
SOCIAL_AUTH_TWITTER_SCOPE = ['email']
#SOCIAL_AUTH_TWITTER_SCOPE = ['user:email']
# Key not working.... keep getting 401 auth req'd, with message oauth_problem=consumer_key_rejected
# SOCIAL_AUTH_YAHOO_OAUTH_KEY = authsecrets.yahoo.key
# SOCIAL_AUTH_YAHOO_OAUTH_SECRET = authsecrets.yahoo.secret
# SOCIAL_AUTH_YAHOO_OAUTH_VERIFY_SSL = False
SOCIAL_AUTH_YAHOO_OAUTH2_KEY = authsecrets.yahoo.key
SOCIAL_AUTH_YAHOO_OAUTH2_SECRET = authsecrets.yahoo.secret
SOCIAL_AUTH_RAISE_EXCEPTIONS = True
#SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SOCIAL_AUTH_EVERNOTE_KEY = authsecrets.evernote.key
SOCIAL_AUTH_EVERNOTE_SECRET = authsecrets.evernote.secret
SOCIAL_AUTH_AMAZON_KEY = authsecrets.amazon.key
SOCIAL_AUTH_AMAZON_SECRET = authsecrets.amazon.secret
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from nose.tools import eq_
from pyexcel_cli.view import view
from click.testing import CliRunner
from textwrap import dedent
def test_stdin_option():
runner = CliRunner()
result = runner.invoke(view,
["--source-file-type", "csv",
"--csv-output-lineterminator", "\n",
"--output-file-type", "csv", '-'],
input='1,2,3')
eq_(result.output, '1,2,3\n')
eq_(result.exit_code, 0)
def test_stdout_option():
runner = CliRunner()
test_fixture = os.path.join("tests", "fixtures", "transcode_simple.csv")
result = runner.invoke(view, ["--output-file-type", "csv",
"--csv-output-lineterminator", "\n",
test_fixture])
eq_(result.exit_code, 0)
eq_(result.output, '1,2,3\n')
def test_csv_encoding_option():
runner = CliRunner()
test_fixture = os.path.join("tests", "fixtures", "csv-encoding-utf16.csv")
result = runner.invoke(view, ["--output-file-type", "csv",
"--csv-source-encoding", "utf-16",
"--csv-output-lineterminator", "\n",
test_fixture])
eq_(result.exit_code, 0)
if sys.version_info[0] == 2:
output = result.output.encode('utf-8')
else:
output = result.output
eq_(output, 'Äkkilähdöt,Matkakirjoituksia,Matkatoimistot\n')
def test_url_option():
runner = CliRunner()
test_fixture = "https://github.com/pyexcel/pyexcel-cli/raw/master/tests/fixtures/multiple-sheets.xls" # noqa
result = runner.invoke(view, [test_fixture])
expected = dedent("""
Sheet 1:
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| 4 | 5 | 6 |
+---+---+---+
| 7 | 8 | 9 |
+---+---+---+
Sheet 2:
+---+---+---+
| X | Y | Z |
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| 4 | 5 | 6 |
+---+---+---+
Sheet 3:
+---+---+---+
| O | P | Q |
+---+---+---+
| 3 | 2 | 1 |
+---+---+---+
| 4 | 3 | 2 |
+---+---+---+
""").strip('\n')
eq_(result.exit_code, 0)
eq_(result.output, expected)
|
import flask, os, sys,time
from flask import request,render_template,send_file,Flask
import subprocess
from shutil import copyfile
import textrank
import FSM
app = Flask(__name__, static_folder='pdf')
def html_and_txt(file_name):
path = os.getcwd().replace('\\','/') + '/pdf'
cmd_docker = 'docker run -idt --rm -v {}:/pdf bwits/pdf2htmlex pdf2htmlEX --zoom 1.3 {}.pdf'.format(path,file_name)
subprocess.call(cmd_docker)
subprocess.call(cmd_docker)
subprocess.call('python pdf2txt.py -o ./pdf/{}.txt ./pdf/{}.pdf '.format(file_name,file_name))
interface_path = os.path.dirname(__file__)
sys.path.insert(0, interface_path) #将当前文件的父目录加入临时系统变量
@app.route('/', methods=['get'])
def index():
return '<form action="/upload" method="post" enctype="multipart/form-data"><input type="file" id="img" name="img"><button type="submit">上传</button></form>'
@app.route('/upload', methods=['post'])
def upload():
fname = request.files['img'] #获取上传的文件
if fname:
# t = time.strftime('%Y%m%d%H%M%S')
new_fname = r'pdf/' + fname.filename
fname.save(new_fname) #保存文件到指定路径
# 生成html和txt
html_and_txt(fname.filename[:-4])
# 得到关键字
words_list = textrank.words(fname.filename[:-4])
# 生成高亮html
FSM.light(words_list,fname.filename[:-4])
return send_file('./pdf/{}.html'.format(fname.filename[:-4]))
else:
return '{"msg": "请上传文件!"}'
# print('----------路由和视图函数的对应关系----------')
# print(server.url_map) #打印路由和视图函数的对应关系
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9500, debug=True)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import re
from bs4 import BeautifulSoup
import scrape_common as sc
def strip_value(value):
if value:
return re.sub(r'[^0-9]', '', value)
return None
base_url = 'https://www.vs.ch'
url = f'{base_url}/web/coronavirus/statistiques'
content = sc.download(url, silent=True)
soup = BeautifulSoup(content, 'html.parser')
pdf_url = soup.find('a', string=re.compile(r'2020.*Sit Epid.*')).get('href')
pdf_url = f'{base_url}{pdf_url}'
content = sc.pdfdownload(pdf_url, silent=True, layout=True, page=1)
dd = sc.DayData(canton='VS', url=pdf_url)
dd.datetime = sc.find(r'(\d{2}/\d{2}/20\d{2})', content)
dd.datetime = re.sub(r'/', '.', dd.datetime)
dd.cases = strip_value(sc.find(r'.*Cumul cas positifs.*\s+(\d+.\d+)\s+', content))
dd.deaths = strip_value(sc.find(r'.*Cumul d.c.s.*\s+(\d+.\d+)\s+', content))
dd.hospitalized = strip_value(sc.find(r'.*Hospitalisations en cours de cas COVID-19.*\s+(\d+)\s+', content))
dd.icu = strip_value(sc.find(r'.*SI en cours.*\s+(\d+)\s+', content))
dd.vent = strip_value(sc.find(r'.*Intubation en cours.*\s+(\d+)\s+', content))
is_first = True
if dd:
is_first = False
print(dd)
xls_url = 'https://raw.githubusercontent.com/statistikZH/covid19_drop/master/Chiffres%20%20COVID-19%20Valais.xlsx'
main_url = 'https://www.vs.ch/de/web/coronavirus'
xls = sc.xlsdownload(xls_url, silent=True)
rows = sc.parse_xls(xls, header_row=1)
for i, row in enumerate(rows):
if not isinstance(row['Date'], datetime.datetime):
continue
if not sc.represents_int(row['Cumul cas positifs']):
continue
if row['Nb nouveaux cas positifs'] is None and row["Nb nouvelles admissions à l'hôpital"] is None:
continue
dd = sc.DayData(canton='VS', url=main_url)
dd.datetime = row['Date'].date().isoformat()
dd.cases = row['Cumul cas positifs']
dd.hospitalized = row['Total hospitalisations COVID-19']
dd.new_hosp = row['Nb nouvelles admissions à l\'hôpital']
dd.icu = row['Patients COVID-19 aux SI total (y.c. intubés)']
dd.vent = row['Patients COVID-19 intubés']
dd.deaths = row['Cumul décès COVID-19']
# Since 2020-10-19 VS does no longer publish data about isolation/quarantined
#dd.isolated = row['Nombre de cas en cours d\'isolement']
#dd.quarantined = row['Nombre de contacts en cours de quarantaine']
#dd.quarantine_riskareatravel = row['Nombre de voyageurs en cours de quarantaine']
if row['Nb de nouvelles sorties'] is not None:
dd.recovered = sum(r['Nb de nouvelles sorties'] for r in rows[:i+1])
if not is_first:
print('-' * 10)
is_first = False
print(dd)
|
from src import EventManager, ModuleManager, utils
CAP = utils.irc.Capability("echo-message", depends_on=["labeled-response"])
@utils.export("cap", CAP)
class Module(ModuleManager.BaseModule):
@utils.hook("raw.send.privmsg", priority=EventManager.PRIORITY_LOW)
@utils.hook("raw.send.notice", priority=EventManager.PRIORITY_LOW)
def send_message(self, event):
if event["server"].has_capability(CAP):
event.eat()
@utils.hook("preprocess.send.privmsg")
@utils.hook("preprocess.send.notice")
@utils.hook("preprocess.send.tagmsg")
def preprocess_send(self, event):
if event["server"].has_capability(CAP):
event["events"].on("labeled-response").hook(self.on_echo)
def on_echo(self, event):
event["responses"][0].id = event["line"].id
|
import sys
MAP_WIDTH = 5
MAP_HEIGHT = 5
ITERATIONS = 3
def neighbours(x, y):
for dx, dy in [(-1,1),(0,1),(1,1),(-1,0),(1,0),(-1,-1),(0,-1),(1,-1)]:
nx = x + dx
ny = y + dy
if nx >= 0 and nx < MAP_WIDTH and ny >= 0 and ny < MAP_HEIGHT:
yield (nx, ny)
else:
continue
def print_grid(grid):
for line in grid:
print(''.join(line))
def import_grid(location):
grid = []
with open(location) as maze_file:
for row in maze_file:
grid.append(list(row.strip()))
return grid
def step(grid):
# Deep copy a new list for the next iterateion to avoid step errors
next_grid = list(grid)
# Iterate over the grid cell, by cell
for i, row in enumerate(grid):
for j, cell in enumerate(row):
n_cells = list(map(lambda coord: grid[coord[0]][coord[1]], neighbours(i, j)))
alive_neighbours = sum(map(lambda x: 1 if x == '*' else 0, n_cells))
print('cell at ({},{}) has {} neighbours and is value {}'.format(i, j, alive_neighbours, cell))
if cell == '.' and alive_neighbours == 3:
# Cell is reborn
next_grid[i][j] = '*'
elif cell == '*':
if alive_neighbours < 2:
# Cell dies
next_grid[i][j] = '.'
elif alive_neighbours == 2 or alive_neighbours == 3:
# Cell survives
next_grid[i][j] = '*'
elif alive_neighbours > 3:
# Cell dies
next_grid[i][j] = '.'
return next_grid
def main():
print(list(neighbours(0,2)))
grid = []
grid = import_grid(sys.argv[1])
for iteration in range(ITERATIONS):
print('ITERATION {}'.format(iteration))
print('-'*50)
print_grid(grid)
grid = step(grid)
if __name__ == '__main__':
main()
|
def hanoi(n : int, s : str, d : str, i : str):
if n == 0:
return
hanoi(n - 1, s, i, d)
print("Move disc", n, "from", s, "to", d)
hanoi(n - 1, i, d, s)
hanoi(5, "left", "right", "centre")
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase, override_settings
from uw_sws.util import fdao_sws_override
from uw_pws.util import fdao_pws_override
from sis_provisioner.builders import Builder
from sis_provisioner.csv.data import Collector
from sis_provisioner.exceptions import CoursePolicyException
from restclients_core.exceptions import DataFailureException
@fdao_sws_override
@fdao_pws_override
class BuilderTest(TestCase):
def test_builder(self):
builder = Builder()
self.assertEquals(type(builder.data), Collector)
self.assertEquals(builder.queue_id, None)
self.assertEquals(len(builder.invalid_users), 0)
self.assertEquals(builder.build(), None)
self.assertRaises(NotImplementedError, builder._process, True)
def test_get_section_resource_by_id(self):
builder = Builder()
# OK
section = builder.get_section_resource_by_id(
'2013-winter-DROP_T-100-B')
self.assertEqual(section.section_label(), '2013,winter,DROP_T,100/B')
# 404 Not Found
self.assertRaises(DataFailureException,
builder.get_section_resource_by_id,
'2013-winter-FAKE-999-A')
# Invalid ID
self.assertRaises(CoursePolicyException,
builder.get_section_resource_by_id,
'2013-winter-AAA-BBB')
def test_add_registrations_by_section(self):
builder = Builder()
section = builder.get_section_resource_by_id(
'2013-winter-DROP_T-100-B')
self.assertEqual(builder.add_registrations_by_section(section), None)
@override_settings(ALLOWED_LOGIN_DOMAINS=['gmail.com'])
def test_add_group_enrollment_data(self):
builder = Builder()
builder.add_group_enrollment_data(
'javerage', '2013-winter-AAA-BB-groups', 'student', 'active')
self.assertEqual(
str(builder.data.enrollments[0]), (
',,9136CCB8F66711D5BE060004AC494FFE,student,'
',2013-winter-AAA-BB-groups,active,\n'))
builder = Builder()
builder.add_group_enrollment_data(
'jav.erage@gmail.com', '2013-winter-AAA-BB-groups', 'ta', 'active')
self.assertEqual(
str(builder.data.enrollments[0]), (
',,javerage@gmail.com,ta,'
',2013-winter-AAA-BB-groups,active,\n'))
|
'''
This module provides an "in memory" implementation of the NetworkNode interface.
As the module's name implies, it's intended for use in testing code. This
implementation provides deterministic messaging that always deliveres messages
in the same order across repeated runs of the same test.
'''
from twisted.internet import defer, task, reactor
from zpax.network import channel
TRACE = False
nodes = dict() # uid => NetworkNode object
def setup():
nodes.clear()
try:
defer.gatherResults( [defer.succeed(None),], consumeErrors=True )
use_consume = True
except TypeError:
use_consume = False
def gatherResults( l ):
if use_consume:
return defer.gatherResults(l, consumeErrors=True)
else:
return defer.gatherResults(l)
def trace_messages( fn ):
'''
Function decorator that may be applied to a test_ method to display all
messages exchanged during the test
'''
@defer.inlineCallbacks
def wrapit(self, *args, **kwargs):
global TRACE
TRACE = True
print ''
print 'Trace:'
yield fn(self, *args, **kwargs)
TRACE = False
print ''
return wrapit
def show_stacktrace( fn ):
'''
Function decorator that catches exceptions and prints a traceback
'''
@defer.inlineCallbacks
def wrapit(self, *args, **kwargs):
try:
yield fn(self, *args, **kwargs)
except:
import traceback
traceback.print_exc()
raise
return wrapit
def broadcast_message( src_uid, channel_name, message_type, *parts ):
if len(parts) == 1 and isinstance(parts[0], (list, tuple)):
parts = parts[0]
for n in nodes.values():
if n.link_up:
n.recv_message( src_uid, channel_name, message_type, parts )
def unicast_message( src_uid, dst_uid, channel_name, message_type, *parts ):
if len(parts) == 1 and isinstance(parts[0], (list, tuple)):
parts = parts[0]
if dst_uid in nodes:
nodes[dst_uid].recv_message( src_uid, channel_name, message_type, parts )
class Channel( channel.Channel ):
def get_link_up(self):
return self.net_node.link_up
def set_link_up(self, v):
self.net_node.link_up = v
link_up = property(get_link_up, set_link_up)
def create_subchannel(self, sub_channel_name):
return Channel( self.channel_name + '.' + sub_channel_name, self.net_node )
class NetworkNode (object):
def __init__(self, node_uid):
self.node_uid = node_uid
self.zpax_nodes = None # Dictionary of node_uid -> (rtr_addr, pub_addr)
self.message_handlers = dict()
self.link_up = False
def add_message_handler(self, channel_name, handler):
if not channel_name in self.message_handlers:
self.message_handlers[ channel_name ] = list()
self.message_handlers[channel_name].append( handler )
def connect(self, zpax_nodes):
self.zpax_nodes = zpax_nodes
self.link_up = True
nodes[ self.node_uid ] = self
def shutdown(self):
self.link_up = False
if self.node_uid in nodes:
del nodes[ self.node_uid ]
def _dispatch_message(self, from_uid, channel_name, message_type, parts):
handlers = self.message_handlers.get(channel_name, None)
if handlers:
for h in handlers:
f = getattr(h, 'receive_' + message_type, None)
if f:
f(from_uid, *parts)
break
def recv_message(self, src_uid, channel_name, message_type, parts):
if self.link_up:
if TRACE:
print src_uid, '=>', self.node_uid, '[rcv]', '[{0}]'.format(channel_name), message_type.ljust(15), parts
self._dispatch_message( src_uid, channel_name, message_type, parts )
else:
if TRACE:
print src_uid, '=>', self.node_uid, '[drp]', '[{0}]'.format(channel_name), message_type.ljust(15), parts
def broadcast_message(self, channel_name, message_type, *parts):
if not self.link_up:
return
if len(parts) == 1 and isinstance(parts[0], (list, tuple)):
parts = parts[0]
if isinstance(parts, tuple):
parts = list(parts)
broadcast_message(self.node_uid, channel_name, message_type, parts)
def unicast_message(self, to_uid, channel_name, message_type, *parts):
if not self.link_up:
return
if len(parts) == 1 and isinstance(parts[0], (list, tuple)):
parts = parts[0]
if isinstance(parts, tuple):
parts = list(parts)
unicast_message(self.node_uid, to_uid, channel_name, message_type, parts)
|
# Copyright (c) 2020-2021 Matematyka dla Ciekawych Świata (http://ciekawi.icm.edu.pl/)
# Copyright (c) 2020-2021 Robert Ryszard Paciorek <rrp@opcode.eu.org>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try: clipData
except NameError: clipData = []
clipData += [
{ 'title': [ "#04.1", "Operacje", "na plikach", "(tekstowych)" ] },
{ 'comment': 'grep' },
{
'console': [
[0.0, ""],
["grep", eduMovie.runCommandString(r"grep '^s[sf]' /etc/passwd", hiddenargs="--color=always")],
],
'text' : [
'Poznane wcześniej polecenie find pozwala wyszukiwać pliki <m> w oparciu o ich zewnętrzne atrybuty, takie jak nazwa, rozmiar, itp. <m>'
'Nie pozwala ono jednak na przeszukiwanie zawartości plików, <m> czy też wyszukiwanie ich w oparciu o zawartość. <mark name="grep" />'
'Funkcjonalność taką oferuje polecenie grep, <m> umożliwia ono przeszukiwanie plików w oparciu o wyrażenia regularne. <m>'
'Wyrażenia regularne już poznaliśmy w ramach zajęć <m> poświęconych programowaniu w Pythonie. <m>'
'I dobrą wiadomością jest to że grep używa bardzo podobnych <m> a z odpowiednim przełącznikiem nawet takich samych wyrażeń regularnych. <m>'
'Tak samo jak w Pythonie kropka oznacza dowolny znak, <m> gwiazdka dowolną także zerową ilość powtórzeń, itd. <m>'
"Jako że przynajmniej niektóre ze znaków używanych w wyrażeniach <m> regularnych są znakami specjalnymi dla powłoki, <m> wyrażenia przekazywane do grep'a warto ujmować w apostrofy. <m>"
"Na ekranie widzimy wywołanie grep'a, które wypisuje z pliku </etc/passwd>[e te ce pass wu de] <m> linie pasujące do podanego wyrażenia regularnego. <m>"
'Wyrażenie to wymaga aby na początku dopasowywanego napisu <m> (czyli w przypadku grepa na początku linii) <m> znajdowała się litera <s>[es] a po niej kolejne <s>[es] lub <f>[ef]. <m>'
]
},
{
'console': [
[0.0, eduMovie.runCommandString(r"grep -v '^[a-s]' /etc/passwd", hiddenargs="--color=always")],
["minusi - 1.3", eduMovie.runCommandString(r"grep 'mail' /etc/passwd", hiddenargs="--color=always")],
["minusi", eduMovie.runCommandString(r"grep -i 'mail' /etc/passwd", hiddenargs="--color=always")],
["greprekursywny", eduMovie.runCommandString(r"grep -r 'messagebus' /etc/ 2>/dev/null", hiddenargs="--color=always")],
["greprekursywny2", eduMovie.runCommandString(r"grep -rl 'messagebus' /etc/ 2>/dev/null", hiddenargs="--color=always")],
],
'text' : [
"Z ważniejszych opcji grep'a należy wspomnieć o <m>"
'<-v>[minus V], które powoduje wypisywanie linii niepasujących do podanego wyrażenia regularnego (zamiast linii pasujących) oraz <mark name="minusi" />'
'<-i>[minus I], które spowoduje ignorowanie wielkości liter. <m>'
"Możemy również wymusić przeszukiwanie przez grep'a <m> plików binarnych, tak jakby były plikami tekstowymi, "
"czyli jeżeli grep'owi wydaje się że plik jest plikiem binarnym <m> to możemy nakazać mu potraktować ten plik jak plik tekstowy. <m>"
'Służy do tego opcja <-a>[minus A]. <mark name="greprekursywny" />'
'Opcja <-r>[minus R] pozwala na rekursywne przeszukiwanie podanych katalogów, <mark name="greprekursywny2" />'
'opcja <-l>[minus L małe] pozwala na wypisanie <m> jedynie ścieżek do plików w których było dopasowanie, <m>'
'a opcja <-L>[minus L duże] tych w których dopasowania nie było. <m>'
'Warto zauważyć iż małe l wraz z v nie równoważy dużego L, <m> gdyż opcja <-v>[minus V] działa per linia, a opcje <-l, -L>[minus L] per plik. <m>'
'W pokazanym przykładzie zignorowane <m> (poprzez przekierowanie do </dev/null>[dev null]) <m> zostały błędy związane z brakiem dostępu do pewnych plików w <etc>[e te ce]. <m>'
'Zwykły użytkownik nie może czytać niektórych <m> podkatalogów <etc>[e te ce] i jest to normalne. <m>'
]
},
{
'console': [
["minusP", eduMovie.runCommandString(r"grep 'ro.*t' /etc/passwd", hiddenargs="--color=always")],
["minusP2", eduMovie.runCommandString(r"grep -P 'ro.*?t' /etc/passwd", hiddenargs="--color=always")],
["minusE", eduMovie.runCommandString(r"grep -E '(r..t).*\1' /etc/passwd", hiddenargs="--color=always")],
["minusE + 1", eduMovie.runCommandString(r"grep '\(r..t\).*\1' /etc/passwd", hiddenargs="--color=always")],
["minusE + 4", eduMovie.runCommandString(r"grep -E '\(ad.*\)' /etc/passwd", hiddenargs="--color=always")],
["minusE + 5", eduMovie.runCommandString(r"grep '(ad.*)' /etc/passwd", hiddenargs="--color=always")],
],
'text' : [
"Grep potrafi korzystać z trzech dialektów wyrażeń regularnych <m> - podstawowych, rozszerzonych i perl'owskich. <m>"
"Domyślnie grep korzysta z wyrażeń podstawowych, <m> wyrażenia rozszerzone możemy włączyć opcją <-E>[minus E duże], a perl'owskie <-P>[minus P duże]. <m>"
'''Najbardziej podobne do tego czego uczyliśmy się na zajęciach <m> związanych z pythonem są wyrażenia perl'owskie <mark name="minusP" /> - zadziałają w nich podwyrażenia grupowane nawiasami okrągłymi, <m>'''
'krotności powtórzeń wyrażane w nawiasach klamrowych, <mark name="minusP2" /> a także nie zachłanne dopasowania z użyciem gwiazdka pytajnik. <m>'
"Wyrażenia rozszerzone są niejako pomiędzy perl'owskimi a podstawowymi <m> - nie obsługują dopasowań nie zachłannych, <m> natomiast reszta będzie działała w taki sposób <m> w jaki używaliśmy wyrażeń regularnych w Pythonie. <m>"
'Z kolei wyrażenia podstawowe pozwalają w zasadzie na to samo <m> co rozszerzone, różnią się jednak zapisem. <mark name="minusE" />'
'Niektóre ze znaków sterujących używanych w wyrażeniach rozszerzonych <m> (na przykład nawiasy okrągłe, klamrowe, pytajnik) <m>'
'aby pozostały znakami sterującymi w wyrażeniach podstawowych muszą <m> zostać poprzedzone odwrotnym ukośnikiem. <m>'
'Inaczej będą traktowane jak zwykłe znaki. <m> Z kolei jeżeli chcemy je traktować jak zwykłe znaki w wyrażeniach <m> rozszerzonych to muszą w nich być poprzedzane odwrotnym ukośnikiem. <m>'
'Wynika to oczywiście z tak zwanej kompatybilności wstecznej. <m>'
'Grep pierwotnie traktował pytajnik jako pytajnik, <m> zatem później kiedy wymyślono dla niego znaczenie specjalne, <m> nie mógł zacząć traktować go jako znaku specjalnego. <m>'
'Gdyż wszystkie programy używające grepa, w których pytajnik <m> był traktowany jako zwykły znak, mogły przestać działać. <m>'
'W związku z tym zostały wprowadzone wyrażenia rozszerzone <m> w których pytajnik stał się znakiem specjalnym, <m>'
'a w podstawowych jako znak sterujący postanowiono traktować eskejpowany pytajnik, <m> czyli ciąg odwrotny ukośnik pytajnik. <m>'
'Jak zwykle szczegóły można przeczytać w dokumentacji systemowej man. <m>'
]
},
{
'console': [
[0.0, eduMovie.runCommandString(r"echo abcdef | grep --color cd | grep a")],
[1.0, eduMovie.runCommandString(r"echo abcdef | grep --color=always cd | grep a")],
[2.0, eduMovie.runCommandString(r"echo abcdef | grep --color=always cd | grep de")],
],
'text' : [
'To czy grep domyślnie koloruje swoje wyjście, czy nie, <m> zależy od ustawień na danym systemie. <m> Kolorowanie można włączyć lub wyłączyć opcją <--color>[minus minus kolor]. <m>'
'Standardowo kolorowanie jest wyłączane jeżeli wyjście grepa <m> jest przekierowywane do innych poleceń. <m> Dodając argument always opcji <color>[kolor] możemy wymusić <m> aby działało także w takiej sytuacji. <m>'
'Jednak należy pamiętać że kolorowanie odbywa się poprzez <m> dodanie do napisu specjalnych sekwencji sterujących. <m>'
'W związku z tym przekierowanie kolorowego wyjścia <m> na przykład do kolejnego grepa, tak jak pokazano na ekranie, <m> może spowodować niewłaściwe funkcjonowanie. <m>'
'W widocznym przykładzie ciąg <de>[D E] nie został znaleziony, <m> gdyż pomiędzy <d>[de] a <e>[E] jest sekwencja kończąca kolorowanie. <m>'
]
},
]
|
import tensorflow as tf
from tensorflow.python.ops import variables
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tf_extended import math as tfe_math
import util
def _create_local(name, shape, collections=None, validate_shape=True,
dtype=tf.float32):
"""Creates a new local variable.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
collections: A list of collection names to which the Variable will be added.
validate_shape: Whether to validate the shape of the variable.
dtype: Data type of the variables.
Returns:
The created variable.
"""
# Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES
collections = list(collections or [])
collections += [ops.GraphKeys.LOCAL_VARIABLES]
return variables.Variable(
initial_value=array_ops.zeros(shape, dtype=dtype),
name=name,
trainable=False,
collections=collections,
validate_shape=validate_shape)
def streaming_tp_fp_arrays(num_gbboxes, tp, fp,
metrics_collections=None,
updates_collections=None,
name=None):
"""Streaming computation of True and False Positive arrays.
"""
with variable_scope.variable_scope(name, 'streaming_tp_fp',
[num_gbboxes, tp, fp]):
num_gbboxes = tf.cast(num_gbboxes, tf.int32)
tp = tf.cast(tp, tf.bool)
fp = tf.cast(fp, tf.bool)
# Reshape TP and FP tensors and clean away 0 class values.
tp = tf.reshape(tp, [-1])
fp = tf.reshape(fp, [-1])
# Local variables accumlating information over batches.
v_num_objects = _create_local('v_num_gbboxes', shape=[], dtype=tf.int32)
v_tp = _create_local('v_tp', shape=[0, ], dtype=tf.bool)
v_fp = _create_local('v_fp', shape=[0, ], dtype=tf.bool)
# Update operations.
num_objects_op = state_ops.assign_add(v_num_objects,
tf.reduce_sum(num_gbboxes))
tp_op = state_ops.assign(v_tp, tf.concat([v_tp, tp], axis=0),
validate_shape=False)
fp_op = state_ops.assign(v_fp, tf.concat([v_fp, fp], axis=0),
validate_shape=False)
# Value and update ops.
val = (v_num_objects, v_tp, v_fp)
with ops.control_dependencies([num_objects_op, tp_op, fp_op]):
update_op = (num_objects_op, tp_op, fp_op)
return val, update_op
def precision_recall(num_gbboxes, tp, fp, scope=None):
"""Compute precision and recall from true positives and false
positives booleans arrays
"""
# Sort by score.
with tf.name_scope(scope, 'precision_recall'):
# Computer recall and precision.
tp = tf.reduce_sum(tf.cast(tp, tf.float32), axis=0)
fp = tf.reduce_sum(tf.cast(fp, tf.float32), axis=0)
recall = tfe_math.safe_divide(tp, tf.cast(num_gbboxes, tf.float32), 'recall')
precision = tfe_math.safe_divide(tp, tp + fp, 'precision')
return tf.tuple([precision, recall])
def fmean(pre, rec):
"""Compute f-mean with precision and recall
"""
def zero():
return tf.zeros([])
def not_zero():
return 2 * pre * rec / (pre + rec)
return tf.cond(pre + rec > 0, not_zero, zero)
|
import json
import os
from django.contrib.auth import get_user_model
from django.utils import timezone
from iotile_cloud.utils.gid import IOTileProjectSlug, IOTileStreamSlug, IOTileVariableSlug
from apps.configattribute.models import ConfigAttribute, ConfigAttributeName
from apps.configattribute.serializers import ConfigAttributeSerializer
from apps.devicetemplate.models import DeviceTemplate
from apps.devicetemplate.serializers import DeviceTemplateSerializer
from apps.org.models import Org
from apps.org.serializers import OrgSerializer
from apps.physicaldevice.claim_utils import device_claim
from apps.physicaldevice.models import Device
from apps.physicaldevice.serializers import DeviceSerializer
from apps.project.models import Project
from apps.project.serializers import ProjectSerializer
from apps.projecttemplate.models import ProjectTemplate
from apps.projecttemplate.serializers import ProjectTemplateSerializer
from apps.property.models import GenericProperty
from apps.property.serializers import GenericPropertyWriteOnlySerializer
from apps.sensorgraph.models import SensorGraph, VariableTemplate
from apps.sensorgraph.serializers import SensorGraphSerializer, VariableTemplateSerializer
from apps.stream.models import StreamId, StreamVariable
from apps.streamdata.models import StreamData
from apps.streamevent.models import StreamEventData
from apps.utils.objects.utils import get_object_by_slug
from .serializers import *
user_model = get_user_model()
class BaseDeviceMock(object):
def __init__(self, fixture_filename):
path = os.path.join(os.path.dirname(__file__), 'data', fixture_filename)
with open(path) as infile:
data = json.load(infile)
if data:
# Orgs must be created first
if 'org' in data:
self.process_org(data['org'])
if 'var_type' in data:
self.process_var_type(data['var_type'])
if 'device_template' in data:
self.process_device_template(data['device_template'])
if 'project_template' in data:
self.process_project_template(data['project_template'])
if 'sensor_graph' in data:
self.process_sensor_graph(data['sensor_graph'])
if 'project' in data:
self.process_project(data['project'])
self.post_process()
def tearDown(self):
Org.objects.all().delete()
StreamId.objects.all().delete()
StreamVariable.objects.all().delete()
Device.objects.all().delete()
Project.objects.all().delete()
StreamData.objects.all().delete()
StreamEventData.objects.all().delete()
SensorGraph.objects.all().delete()
VarType.objects.all().delete()
VariableTemplate.objects.all().delete()
ProjectTemplate.objects.all().delete()
DeviceTemplate.objects.all().delete()
ConfigAttributeName.objects.all().delete()
ConfigAttribute.objects.all().delete()
GenericProperty.objects.all().delete()
def post_process(self):
pass
def process_org(self, orgs):
for data in orgs:
user = user_model.objects.get(slug=data['created_by'])
serializer = OrgSerializer(data=data)
assert serializer.is_valid()
org = serializer.save(created_by=user)
if 'vendor' in data and data['vendor']:
org.is_vendor = True
org.save()
if 'config' in data and data['config']:
# Need to process Config Attibutes
for item in data['config']:
ConfigAttributeName.objects.get_or_create(
name=item['name'], defaults={
'created_by': user
}
)
item['target'] = org.obj_target_slug
config_serializer = ConfigAttributeSerializer(data=item)
assert config_serializer.is_valid()
config_serializer.save(updated_by=user, target=org)
def process_device_template(self, templates):
for data in templates:
user = user_model.objects.get(slug=data['created_by'])
serializer = DeviceTemplateSerializer(data=data)
assert serializer.is_valid()
serializer.save(created_by=user)
def process_project_template(self, templates):
for data in templates:
user = user_model.objects.get(slug=data['created_by'])
serializer = ProjectTemplateSerializer(data=data)
assert serializer.is_valid()
serializer.save(created_by=user)
def process_var_type(self, var_types):
for data in var_types:
user = user_model.objects.get(slug=data['created_by'])
serializer = VarTypeSerializer(data=data)
assert serializer.is_valid()
serializer.save(created_by=user)
def process_sensor_graph(self, sensor_graphs):
for data in sensor_graphs:
user = user_model.objects.get(slug=data['created_by'])
serializer = SensorGraphSerializer(data=data)
assert serializer.is_valid()
sg = serializer.save(created_by=user)
if 'variable_templates' in data:
for vt_data in data['variable_templates']:
if 'sg' not in vt_data:
vt_data['sg'] = sg.slug
vt_serializer = VariableTemplateSerializer(data=vt_data)
if not vt_serializer.is_valid():
print(vt_serializer.errors)
else:
vt = vt_serializer.save(created_by=user)
def process_project(self, projects):
for data in projects:
user = user_model.objects.get(slug=data['created_by'])
serializer = ProjectSerializer(data=data)
assert serializer.is_valid()
project = serializer.save(created_by=user)
if 'device' in data:
self.process_device(data['device'], project)
def _cleanup_stream_data(self, item):
"""
To allow us to just copy and paste from the API payload,
need to remove item fields not needed
:param item: a data or event object
:return: Nothing (change by reference)
"""
for field in ['id', 'stream', 'project', 'device', ]:
if field in item:
del item[field]
def process_device(self, devices, project=None):
for data in devices:
user = user_model.objects.get(slug=data['created_by'])
serializer = DeviceSerializer(data=data)
if not serializer.is_valid():
print(serializer.errors)
assert True
device = serializer.save(created_by=user)
assert device.template
if project:
device_claim(device=device, project=project, claimed_by=device.created_by)
if 'event' in data:
for item in data['event']:
project_slug = IOTileProjectSlug(project.slug)
varid = item.pop('variable')
variable_slug = IOTileVariableSlug(varid, project=project_slug)
stream_slug = IOTileStreamSlug()
stream_slug.from_parts(project=project_slug, device=device.slug, variable=variable_slug)
self._cleanup_stream_data(item)
event = StreamEventData(stream_slug=str(stream_slug), **item)
event.deduce_slugs_from_stream_id()
event.save()
if 'data' in data:
for item in data['data']:
project_slug = IOTileProjectSlug(project.slug)
varid = item.pop('variable')
variable_slug = IOTileVariableSlug(varid, project=project_slug)
stream_slug = IOTileStreamSlug()
stream_slug.from_parts(project=project_slug, device=device.slug, variable=variable_slug)
self._cleanup_stream_data(item)
point = StreamData(stream_slug=str(stream_slug), **item)
point.deduce_slugs_from_stream_id()
point.save()
if 'properties' in data:
for item in data['properties']:
item['target'] = device.slug
serializer = GenericPropertyWriteOnlySerializer(data=item)
if serializer.is_valid():
p = serializer.save(created_by=user)
else:
print(serializer.errors)
|
# outlook
outlook_client_id = "your-client-id-here"
outlook_client_secret = "your-client-secret-here"
outlook_scopes = ["basic", "calendar"]
outlook_token_path = "./credentials/"
outlook_token_filename = "outlook_token.txt"
previous_days = 40 # retrieve this many past days of events
future_days = 365 # retrieve this many future days of events
# google
google_token_path = "./credentials/google_token.pickle"
# calendars
calendars = [
["You Outlook Calendar Name Here", "your-calendar-id-here@group.calendar.google.com"]
]
# misc
events_ts_json_path = "./events_ts_{0}.json"
pause = 0.1
force = False # force full run even if no changes
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: temporal/api/enums/v1/reset.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import enum_type_wrapper
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n!temporal/api/enums/v1/reset.proto\x12\x15temporal.api.enums.v1*r\n\x10ResetReapplyType\x12"\n\x1eRESET_REAPPLY_TYPE_UNSPECIFIED\x10\x00\x12\x1d\n\x19RESET_REAPPLY_TYPE_SIGNAL\x10\x01\x12\x1b\n\x17RESET_REAPPLY_TYPE_NONE\x10\x02\x42~\n\x18io.temporal.api.enums.v1B\nResetProtoP\x01Z!go.temporal.io/api/enums/v1;enums\xaa\x02\x15Temporal.Api.Enums.V1\xea\x02\x18Temporal::Api::Enums::V1b\x06proto3'
)
_RESETREAPPLYTYPE = DESCRIPTOR.enum_types_by_name["ResetReapplyType"]
ResetReapplyType = enum_type_wrapper.EnumTypeWrapper(_RESETREAPPLYTYPE)
RESET_REAPPLY_TYPE_UNSPECIFIED = 0
RESET_REAPPLY_TYPE_SIGNAL = 1
RESET_REAPPLY_TYPE_NONE = 2
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b"\n\030io.temporal.api.enums.v1B\nResetProtoP\001Z!go.temporal.io/api/enums/v1;enums\252\002\025Temporal.Api.Enums.V1\352\002\030Temporal::Api::Enums::V1"
_RESETREAPPLYTYPE._serialized_start = 60
_RESETREAPPLYTYPE._serialized_end = 174
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Author: <Zurdi>
import sys
import os
import psutil
import argparse
from pyvirtualdisplay import Display
from nbz_core import NBZCore
from parser.nbz_parser import NBZParser
from data.natives import NATIVES
from lib.lib_log_nbz import Logging
logger = Logging()
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def close_all():
"""Close all connections"""
logs_dir = os.path.join(BASE_DIR, "logs")
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
logs = ['server.log', 'bmp.log', 'geckodriver.log', 'ghostdriver.log']
for log in logs:
if os.path.isfile(os.path.join(os.getcwd(), log)):
os.rename(os.path.join(os.getcwd(), log), os.path.join(logs_dir, log))
root_process = psutil.Process(os.getppid())
root_children = root_process.children(recursive=True)[1:]
for child in reversed(root_children):
os.kill(child.pid, 9)
class NBZInterface:
"""Interface between all modules of the nbz.
This class provides all the attributes needed to the core module, using the parser module
to parse the nbz-script previously. After all script is executed, this class ends all connections.
Attributes:
core_attributes: dictionary of attributes needed for the core module
Methods:
compile_script
"""
def __init__(self, script, script_parameters, proxy_enabled, debug):
"""Init NBZInterface class with some attributes"""
self.core_attributes = {
'instruction_set': [],
'variables': {},
'NATIVES': NATIVES,
'USER_FUNC': {},
'script': script,
'script_name': os.path.basename(script)[0:-4],
'script_parameters': script_parameters,
'browser': [],
'proxy_enabled': proxy_enabled,
'set_net_report': False,
'net_reports_path': '',
'complete_csv_path': '',
'complete_csv': None,
'debug': debug,
}
try:
logger.log_header()
self.compile_script()
nbz_core = NBZCore(self.core_attributes)
nbz_core.execute_instructions()
nbz_core.export_har_log()
logger.log_footer()
except Exception as e:
logger.log('ERROR', str(e))
logger.log_error()
finally:
close_all()
def compile_script(self):
"""Compile script to be executed.
Returns:
A lists structure with all the nbz-script converted
A dict mapping variables of the script and their values
"""
try:
z_code, z_code_vars = NBZParser(self.core_attributes['script'])
self.core_attributes['instruction_set'] = z_code
self.core_attributes['variables'] = z_code_vars
if self.core_attributes['debug']:
logger.log('NOTE',
'Instructions: {instructions}'.format(instructions=self.core_attributes['instruction_set']))
logger.log('NOTE', 'Variables: {variables}'.format(variables=self.core_attributes['variables']))
except Exception as e:
logger.log('ERROR',
'Script not compiled ({script}): {exception}'.format(script=self.core_attributes['script'],
exception=e))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-script", help="script file", required=False)
parser.add_argument("-script_parameters", help="script parameters", required=False, nargs='+')
parser.add_argument("-display", help="enable display emulation", required=False)
parser.add_argument("-resolution", help="set the screen emulator resolution", required=False)
parser.add_argument("-proxy", help="enable proxy", required=False)
parser.add_argument("-debug", help="debug mode", required=False)
args = parser.parse_args()
script = args.script
script_parameters = args.script_parameters
display = args.display
resolution = args.resolution
if display == 'true':
if resolution != 'default':
resolution = resolution.split('x')
try:
display = Display(visible=0, size=(resolution[0], resolution[1]))
except IndexError:
logger.log('ERROR', 'Error in resolution parameter. Must be like 1920x1080.')
sys.exit(4)
else:
display = Display(visible=0, size=(2920, 1080))
display.start()
proxy_enabled = True if args.proxy == 'true' else False
debug = True if args.debug == 'true' else False
NBZInterface(script, script_parameters, proxy_enabled, debug)
if __name__ == "__main__":
sys.exit(main())
|
from ...Location import Location
from ...Specification.SpecEvaluation import ProblemObjectivesEvaluations
from ..NoSolutionError import NoSolutionError
class ObjectivesMaximizerMixin:
@property
def objectives_before(self):
if self._objectives_before is None:
sequence = self.sequence
self.sequence = self.sequence_before
self._objectives_before = self.objectives_evaluations()
self.sequence = sequence
return self._objectives_before
def objectives_evaluations(self):
"""Return a list of the evaluation of each objective of the problem"""
return ProblemObjectivesEvaluations.from_problem(self)
def objective_scores_sum(self):
return self.objectives_evaluations().scores_sum()
def objectives_text_summary(self):
return self.objectives_evaluations().to_text()
def optimize_by_exhaustive_search(self):
"""
"""
if not self.all_constraints_pass():
summary = self.constraints_text_summary(failed_only=True)
raise NoSolutionError(
summary
+ "Optimization can only be done when all constraints are "
"verified.",
self,
)
if all(
[obj.best_possible_score is not None for obj in self.objectives]
):
best_possible_score = sum(
[obj.best_possible_score for obj in self.objectives]
)
else:
best_possible_score = None
current_best_score = self.objective_scores_sum()
current_best_sequence = self.sequence
all_variants = self.mutation_space.all_variants(self.sequence)
space_size = int(self.mutation_space.space_size)
self.logger(mutation__total=space_size)
for variant in self.logger.iter_bar(mutation=all_variants):
self.sequence = variant
if self.all_constraints_pass():
score = self.objective_scores_sum()
if score > current_best_score:
current_best_score = score
current_best_sequence = self.sequence
if (best_possible_score is not None) and (
current_best_score >= best_possible_score
):
self.logger(mutation__index=space_size)
break
self.sequence = current_best_sequence
def optimize_by_random_mutations(self):
"""
"""
if not self.all_constraints_pass():
summary = self.constraints_text_summary()
raise ValueError(
summary + "Optimization can only be done when all"
" constraints are verified"
)
score = self.objective_scores_sum()
if all(
[obj.best_possible_score is not None for obj in self.objectives]
):
best_possible_score = sum(
[
obj.best_possible_score * obj.boost
for obj in self.objectives
]
)
else:
best_possible_score = None
iters = self.max_random_iters
stagnating_iterations = 0
for iteration in self.logger.iter_bar(mutation=range(iters)):
if (best_possible_score is not None) and (
score >= best_possible_score
):
self.logger(mutation__index=iters)
break
if (self.optimization_stagnation_tolerance is not None) and (
stagnating_iterations > self.optimization_stagnation_tolerance
):
break
previous_sequence = self.sequence
self.sequence = self.mutation_space.apply_random_mutations(
n_mutations=self.mutations_per_iteration,
sequence=self.sequence,
)
if self.all_constraints_pass():
new_score = self.objective_scores_sum()
if new_score > score:
score = new_score
stagnating_iterations = 0
else:
self.sequence = previous_sequence
else:
self.sequence = previous_sequence
stagnating_iterations += 1
def optimize_objective(self, objective):
"""Optimize the total objective score, focusing on a single objective.
This method will attempt to increase the global objective score by
focusing on a single objective. First the locations of under-optimal
subsequences for this objective are identified, then these locations
are optimized one after the other, left to right.
For each location, a local problem is created and the optimization uses
either a custom optimization algorithm, an exhaustive search, or a
random search, to optimize the local problem
"""
# EVALUATE OBJECTIVE. RETURN IF THERE IS NOTHING TO BE DONE.
evaluation = objective.evaluate(self)
locations = evaluation.locations
if (objective.best_possible_score is not None) and (
evaluation.score == objective.best_possible_score
):
return
# FOR EACH LOCATION, CREATE AND OPTIMIZE A LOCAL PROBLEM.
for location in self.logger.iter_bar(
location=locations, bar_message=lambda l: str(l)
):
# Localize the mutation space by freezing any nucleotide outside of
# it
mutation_space = self.mutation_space.localized(location)
if mutation_space.space_size == 0:
continue
# Update the location so it matches the span of the mutation_space
# the resulting location will be equal or smaller to the original
# location.
location = Location(*mutation_space.choices_span)
localized_constraints = [
cst.localized(location, problem=self)
for cst in self.constraints
]
localized_constraints = [
cst for cst in localized_constraints if cst is not None
]
localized_objectives = [
obj.localized(location, problem=self)
for obj in self.objectives
]
localized_objectives = [
obj for obj in localized_objectives if obj is not None
]
local_problem = self.__class__(
sequence=self.sequence,
constraints=localized_constraints,
mutation_space=mutation_space,
objectives=localized_objectives,
)
self.logger.store(
problem=self, local_problem=local_problem, location=location
)
local_problem.randomization_threshold = (
self.randomization_threshold
)
local_problem.max_random_iters = self.max_random_iters
local_problem.optimization_stagnation_tolerance = (
self.optimization_stagnation_tolerance
)
local_problem.mutations_per_iteration = (
self.mutations_per_iteration
)
# OPTIMIZE THE LOCAL PROBLEM
if hasattr(objective, "optimization_heuristic"):
# Some specifications implement their own optimization method.
objective.optimization_heuristic(local_problem)
else:
# Run an exhaustive or random search depending on the size
# of the mutation space.
space_size = local_problem.mutation_space.space_size
exhaustive_search = space_size < self.randomization_threshold
if exhaustive_search:
local_problem.optimize_by_exhaustive_search()
else:
local_problem.optimize_by_random_mutations()
# UPDATE THE PROBLEM's SEQUENCE
self.sequence = local_problem.sequence
def optimize(self):
"""Maximize the total score by optimizing each objective in turn."""
objectives = [
obj for obj in self.objectives if not obj.optimize_passively
]
if len(objectives) == 0:
return
for objective in self.logger.iter_bar(
objective=objectives, bar_message=lambda o: str(o)
):
self.optimize_objective(objective=objective)
|
#!/usr/bin/env python
# encoding: utf-8
'''
zscores_for_mutants_with_vaf_median.py
Created by Joan Smith
on 2017-9-03.
Calculate zscores for mutants where patients are only counted as mutated if their vaf is >= median vaf for a gene
Copyright (c) 2018. All rights reserved.
'''
import pandas as pd
import numpy as np
import getopt
import sys
import os
sys.path.append('../common/')
import utilities as util
import analysis
import variant_allele_freq
MUTATION_PERCENT = .04
VARIANT_ALLELE_FREQ_CUTOFF = 'MEDIAN'
def prep_data(mutation, clinical_data, key):
df = pd.read_csv(mutation, sep='\t', low_memory=False, dtype=str)
cancer_type = util.get_cancer_type(mutation)
# remove column headers from combined mutation sheet
df = df[~df[u'Hugo_Symbol'].str.contains('Hugo_Symbol')]
df[u'Tumor_Sample_Barcode'] = df[u'Tumor_Sample_Barcode'].str.strip()
number_barcodes_in_mutation_data = df[u'Tumor_Sample_Barcode'].unique().size
print 'Number of total sequenced barcodes: ', number_barcodes_in_mutation_data
df = util.maybe_clear_non_01s(df, u'Tumor_Sample_Barcode', cancer_type)
df['VAF'] = variant_allele_freq.calculate_vaf(df, key.loc[cancer_type])
# Reduce mutation data to patients that also have clinical data
df = util.add_identifier_column(df, u'Tumor_Sample_Barcode')
df = df.join(clinical_data, on='identifier', how='inner')
df.set_index([u'Hugo_Symbol', 'identifier'], inplace=True)
# symmetrically filter clinical data down to patients that were also sequenced
unique_patients = df.index.get_level_values('identifier').unique()
unique_patients_df = pd.DataFrame(unique_patients, index=unique_patients)
clinical_data_with_sequenced_patients = clinical_data.join(unique_patients_df, how='inner')
num_patients = clinical_data_with_sequenced_patients.shape[0]
print 'Number of patients with sequence and clinical data: ', num_patients
return df, clinical_data_with_sequenced_patients, num_patients
def calculate_cox(mutation, clinical_data, key, outdir):
df, clinical_data_with_sequenced_patients, num_patients = prep_data(mutation, clinical_data, key)
#prep output file
cancer_type = os.path.basename(mutation).split('_')[0].split('.')[0]
print cancer_type
outfile = os.path.join(outdir, (cancer_type + '_mutation-fraction-' + str(MUTATION_PERCENT) +
'_vaf_cutoff-' + str(VARIANT_ALLELE_FREQ_CUTOFF) +'.zscores.out.csv'))
formatstring = '\'{0}, {1}, {2}, {3}, {4}\n'
with open(outfile, 'w') as out:
out.write('gene,zscore,pvalue,num mutations,num patients\n')
#for every gene, collect the clinical data with the mutation data.
# only for non-silent mutations
patients_with_gene = df.groupby(level=u'Hugo_Symbol')
for gene, gene_df in patients_with_gene:
# Remove silent mutations
non_silent = gene_df.where(gene_df[u'Variant_Classification'] != 'Silent')
non_silent = non_silent.dropna(subset=[u'Variant_Classification'])
mutated_patient_list = non_silent.index.get_level_values('identifier').unique()
num_mutations = len(mutated_patient_list)
if num_mutations >= MUTATION_PERCENT * num_patients:
# Get "effectively mutated" patients: those who's VAF >= median
median_vaf = non_silent['VAF'].median()
greater_than_median = non_silent[non_silent['VAF'] >= median_vaf]
effectively_mutated_patients = greater_than_median.index.get_level_values('identifier').unique()
num_effective_mutations = len(effectively_mutated_patients)
# take the patients with mutations and without, and build an analysis dataframe with time and censor.
analysis_data = pd.DataFrame(
{'mutated': np.ones(num_effective_mutations)},
index=effectively_mutated_patients)
analysis_data = analysis_data.join(clinical_data_with_sequenced_patients, how='right')
analysis_data['mutated'].fillna(0, inplace=True)
#Do analysis!
print 'Doing analysis for ', gene, num_mutations
time = analysis_data['time']
censor = analysis_data['censor']
split = analysis_data['mutated']
name = cancer_type+ '_' + gene
analysis.do_km(name, time, censor, split, outdir)
cox_dict = analysis.do_cox(time, censor, split)
if cox_dict['n'] != len(analysis_data['time']):
print 'ERROR'
out.write(formatstring.format(gene, cox_dict['z'], cox_dict['p'], num_mutations,cox_dict['n']))
analysis_data.to_csv(os.path.join(outdir, name + '_data.csv'),
columns=['time', 'censor', 'mutated'])
def usage():
print 'Provide a mutation file with -m and a clincial file with -c'
print 'Output directory with -o'
sys.exit(1)
def get_options(argv):
try:
opts, args = getopt.getopt(argv[1:], 'hm:c:o:g:k:',
['help', 'mutation=', 'clinical=', 'outdir=', 'key='])
except getopt.error, msg:
usage()
mutation = None
clinical = None
outdir = '.'
key = None
for option, value in opts:
if option in ('-o', '--outdir'):
outdir = value
if option in ('-h', '--help'):
usage()
if option in ('-m', '--mutation'):
mutation = value
if option in ('-c', '--clinical'):
clinical = value
if option in ('-k', '--key'):
key = value
return mutation, clinical, outdir, key
def main(argv=None):
if argv is None:
argv = sys.argv
mutation, clinical, outdir, key_file = get_options(argv)
key = pd.read_csv(key_file, index_col=0, na_values=['-'])
key = key.dropna(how='all')
cancer_type = util.get_cancer_type(mutation)
if cancer_type in key.index:
clinical_data = util.get_clinical_data(clinical)
if not os.path.isdir(outdir):
os.makedirs(outdir)
calculate_cox(mutation, clinical_data, key, outdir)
if __name__ == "__main__":
main()
|
"""Users model
"""
from django.contrib.auth.models import User
from django.db import models
class Profile(models.Model):
"""Profile model
Args:
models (subclass): a python class that subclasses django.db.models.Model
Returns:
string: account of the user profile
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default="default.jpg", upload_to="profile_pics")
def __str__(self):
return f"Compte de {self.user.username}"
|
"""
Classes from the 'CoreMotion' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
CMWorkoutManager = _Class("CMWorkoutManager")
CMWorkoutManagerInternal = _Class("CMWorkoutManagerInternal")
CMKappa = _Class("CMKappa")
CMKappaInternal = _Class("CMKappaInternal")
CLNotifierClientAdapter = _Class("CLNotifierClientAdapter")
CMOnBodyStatusManager = _Class("CMOnBodyStatusManager")
CMFitnessMachineData = _Class("CMFitnessMachineData")
CLPropertyValue = _Class("CLPropertyValue")
CMSwimData = _Class("CMSwimData")
CMMetMinute = _Class("CMMetMinute")
CMStrideCalibrationEntry = _Class("CMStrideCalibrationEntry")
CMStrideCalibrationEntryInternal = _Class("CMStrideCalibrationEntryInternal")
CMWorkoutMetsData = _Class("CMWorkoutMetsData")
CMMotionAlarm = _Class("CMMotionAlarm")
CMSwimTracker = _Class("CMSwimTracker")
CMSwimTrackerInternal = _Class("CMSwimTrackerInternal")
CMElevation = _Class("CMElevation")
CMAltimeter = _Class("CMAltimeter")
CMAltimeterInternal = _Class("CMAltimeterInternal")
CMFitnessShared = _Class("CMFitnessShared")
CMDeviceMotionLite = _Class("CMDeviceMotionLite")
CMStepCounter = _Class("CMStepCounter")
CMStepCounterProxy = _Class("CMStepCounterProxy")
CMActivityAlarm = _Class("CMActivityAlarm")
CMVehicleConnection = _Class("CMVehicleConnection")
CMVehicleConnectionData = _Class("CMVehicleConnectionData")
CMActivityAlarmProxy = _Class("CMActivityAlarmProxy")
CMPoseInternal = _Class("CMPoseInternal")
CMMotionUtils = _Class("CMMotionUtils")
CLHealthAssessmentRecordTypeForDuration = _Class(
"CLHealthAssessmentRecordTypeForDuration"
)
CMTremorResult = _Class("CMTremorResult")
CMDyskineticSymptomResult = _Class("CMDyskineticSymptomResult")
CMPedometerBin = _Class("CMPedometerBin")
CMHeadphoneMotionManager = _Class("CMHeadphoneMotionManager")
CMHeadphoneMotionManagerInternal = _Class("CMHeadphoneMotionManagerInternal")
CMSkiData = _Class("CMSkiData")
CMWorkoutMets = _Class("CMWorkoutMets")
CMWorkoutMetsInternal = _Class("CMWorkoutMetsInternal")
CMCatherineFeeder = _Class("CMCatherineFeeder")
CMCatherineFeederInternal = _Class("CMCatherineFeederInternal")
CMSkiTracker = _Class("CMSkiTracker")
CMSkiTrackerInternal = _Class("CMSkiTrackerInternal")
CMFall = _Class("CMFall")
CMSleepData = _Class("CMSleepData")
CMPocketStateManager = _Class("CMPocketStateManager")
CMPocketStateQueueBlockPair = _Class("CMPocketStateQueueBlockPair")
CMDeviceOrientationManager = _Class("CMDeviceOrientationManager")
CMDeviceOrientationManagerInternal = _Class("CMDeviceOrientationManagerInternal")
CMPedometerEvent = _Class("CMPedometerEvent")
CMPedometerData = _Class("CMPedometerData")
CMPickupManager = _Class("CMPickupManager")
CMAnomalyMessenger = _Class("CMAnomalyMessenger")
CLSensorRecorderSensorSampleRate = _Class("CLSensorRecorderSensorSampleRate")
CLSensorRecorderSensorAvailable = _Class("CLSensorRecorderSensorAvailable")
CLSensorRecorderWriteSensorDataToFileForDateRange = _Class(
"CLSensorRecorderWriteSensorDataToFileForDateRange"
)
CLSensorRecorderRecordSensorTypeFor = _Class("CLSensorRecorderRecordSensorTypeFor")
CLSensorRecorderSensorDataRequestById = _Class("CLSensorRecorderSensorDataRequestById")
CLSensorRecorderSensorMetaRequestByDateRange = _Class(
"CLSensorRecorderSensorMetaRequestByDateRange"
)
CLSensorRecorderSensorMetaRequestById = _Class("CLSensorRecorderSensorMetaRequestById")
CLSensorRecorderSensorMeta = _Class("CLSensorRecorderSensorMeta")
CLLocationInternalClient_CoreMotion = _Class("CLLocationInternalClient_CoreMotion")
CMFitnessMachine = _Class("CMFitnessMachine")
CMFitnessMachineInternal = _Class("CMFitnessMachineInternal")
CMStrideCalibrationData = _Class("CMStrideCalibrationData")
CMOdometerData = _Class("CMOdometerData")
CMAnomalyEvent = _Class("CMAnomalyEvent")
CMPedometer = _Class("CMPedometer")
CMPedometerProxy = _Class("CMPedometerProxy")
CMActivityManager = _Class("CMActivityManager")
CMActivityManagerInternal = _Class("CMActivityManagerInternal")
CMGestureManager = _Class("CMGestureManager")
CMGestureManagerInternal = _Class("CMGestureManagerInternal")
CMElevationData = _Class("CMElevationData")
CLDeviceMotionProperties = _Class("CLDeviceMotionProperties")
CMHealthTracker = _Class("CMHealthTracker")
CMHealthTrackerInternal = _Class("CMHealthTrackerInternal")
CMAudioAccessoryManager = _Class("CMAudioAccessoryManager")
CMAudioAccessoryManagerInternal = _Class("CMAudioAccessoryManagerInternal")
CMAnomalyManager = _Class("CMAnomalyManager")
CMMagnetometerDataInternal = _Class("CMMagnetometerDataInternal")
CMOdometerSuitabilityManager = _Class("CMOdometerSuitabilityManager")
CMOdometerSuitabilityManagerProxy = _Class("CMOdometerSuitabilityManagerProxy")
CMNatalimeter = _Class("CMNatalimeter")
CMNatalimeterInternal = _Class("CMNatalimeterInternal")
CMMotionAlarmManager = _Class("CMMotionAlarmManager")
CMMotionAlarmManagerInternal = _Class("CMMotionAlarmManagerInternal")
CMErrorUtils = _Class("CMErrorUtils")
CMExerciseMinute = _Class("CMExerciseMinute")
CMExerciseMinuteInternal = _Class("CMExerciseMinuteInternal")
CMSedentaryTimerData = _Class("CMSedentaryTimerData")
CMCallHandednessManager = _Class("CMCallHandednessManager")
CMVO2MaxInputs = _Class("CMVO2MaxInputs")
CMSleepTracker = _Class("CMSleepTracker")
CMSpringTrackerInternal = _Class("CMSpringTrackerInternal")
CMMediaSession = _Class("CMMediaSession")
CMMotionManager = _Class("CMMotionManager")
CMMotionManagerInternal = _Class("CMMotionManagerInternal")
CMGyroDataInternal = _Class("CMGyroDataInternal")
CMRotationRateDataInternal = _Class("CMRotationRateDataInternal")
CMExerciseMinuteData = _Class("CMExerciseMinuteData")
CMOdometer = _Class("CMOdometer")
CMOdometerProxy = _Class("CMOdometerProxy")
CMOdometerSuitability = _Class("CMOdometerSuitability")
CMWakeGestureManager = _Class("CMWakeGestureManager")
CMLogItemInternal = _Class("CMLogItemInternal")
CMSensorRecorder = _Class("CMSensorRecorder")
CMSensorRecorderInternal = _Class("CMSensorRecorderInternal")
CMSensorDataList = _Class("CMSensorDataList")
CMAmbientPressureDataInternal = _Class("CMAmbientPressureDataInternal")
RMConnectionClient = _Class("RMConnectionClient")
RMConnectionClientCachedMessage = _Class("RMConnectionClientCachedMessage")
CMAttitude = _Class("CMAttitude")
CMAttitudeInternal = _Class("CMAttitudeInternal")
CMDeviceMotionInternal = _Class("CMDeviceMotionInternal")
CMWorkout = _Class("CMWorkout")
CMGenericWorkout = _Class("CMGenericWorkout")
CMFitnessMachineWorkout = _Class("CMFitnessMachineWorkout")
CMSwimWorkout = _Class("CMSwimWorkout")
CMFallStats = _Class("CMFallStats")
CMFallStatsItemsIterator = _Class("CMFallStatsItemsIterator")
RMConnectionEndpoint = _Class("RMConnectionEndpoint")
CMVehicleState = _Class("CMVehicleState")
CMVehicleStateData = _Class("CMVehicleStateData")
CMSignificantElevationSample = _Class("CMSignificantElevationSample")
CMAltitudeDataInternal = _Class("CMAltitudeDataInternal")
CMStrideCalibrationHistory = _Class("CMStrideCalibrationHistory")
CMAccelerometerDataInternal = _Class("CMAccelerometerDataInternal")
CMLogItem = _Class("CMLogItem")
CMOnBodyStatus = _Class("CMOnBodyStatus")
CMPose = _Class("CMPose")
CMHeartRateData = _Class("CMHeartRateData")
CMKappaData = _Class("CMKappaData")
CMDeviceOrientation = _Class("CMDeviceOrientation")
CMMotionActivity = _Class("CMMotionActivity")
CMActivity = _Class("CMActivity")
CMMagnetometerData = _Class("CMMagnetometerData")
CMCalorieUserInfo = _Class("CMCalorieUserInfo")
CMMotionTimeRange = _Class("CMMotionTimeRange")
CMCatherineData = _Class("CMCatherineData")
CMRotationRateData = _Class("CMRotationRateData")
CMRecordedRotationRateData = _Class("CMRecordedRotationRateData")
CMGyroData = _Class("CMGyroData")
CMRecordedGyroData = _Class("CMRecordedGyroData")
CMAmbientPressureData = _Class("CMAmbientPressureData")
CMRecordedPressureData = _Class("CMRecordedPressureData")
CMDeviceMotion = _Class("CMDeviceMotion")
CMAltitudeData = _Class("CMAltitudeData")
CMAccelerometerData = _Class("CMAccelerometerData")
CMRecordedAccelerometerData = _Class("CMRecordedAccelerometerData")
CMNatalieData = _Class("CMNatalieData")
CMMotionActivityManager = _Class("CMMotionActivityManager")
CMSedentaryTimer = _Class("CMSedentaryTimer")
CMSedentaryTimer_Internal = _Class("CMSedentaryTimer_Internal")
CLNotifierServiceAdapter = _Class("CLNotifierServiceAdapter")
CLGyroCalibrationDatabaseAdapter = _Class("CLGyroCalibrationDatabaseAdapter")
CLGeomagneticModelProviderAdapter = _Class("CLGeomagneticModelProviderAdapter")
CLCompassDatabaseAdapter = _Class("CLCompassDatabaseAdapter")
CMStrideCalibrationEntryArray = _Class("CMStrideCalibrationEntryArray")
CMGyroDataArray = _Class("CMGyroDataArray")
CMAmbientPressureDataArray = _Class("CMAmbientPressureDataArray")
CMAccelerometerDataArray = _Class("CMAccelerometerDataArray")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-29 09:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0042_import_officerbadgenumber_data'),
]
operations = [
migrations.RemoveField(model_name='allegation', name='add1'),
migrations.AddField(
model_name='allegation',
name='add1',
field=models.CharField(max_length=16, null=True),
),
migrations.RemoveField(model_name='allegation', name='location'),
migrations.AddField(
model_name='allegation',
name='location',
field=models.CharField(blank=True, max_length=64),
),
]
|
#!/usr/opt/bs-python-2.7/bin/python
import os
import sys
import decimal
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
import udf
class Round(udf.TestCase):
def checkType(self, query, type):
self.query('''create or replace table tmp as ''' + query)
rows = self.query('describe tmp')
self.assertEqual(rows[0][1], type)
def setUp(self):
self.query('DROP SCHEMA FN2 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA FN2')
self.query('create or replace table t(val double, digits int)')
self.query('insert into t(digits) values 1,2,3,10,20,30,34,35,36')
self.query('update t set val = 4/3')
def test_round_special_double_type(self):
self.checkType('select round(4/3) a from dual', 'DECIMAL(36,0)')
self.checkType('select round(4/3, 1) a from dual', 'DECIMAL(36,1)')
self.checkType('select round(4/3, 2) a from dual', 'DECIMAL(36,2)')
self.checkType('select round(4/3, 3) a from dual', 'DECIMAL(36,3)')
self.checkType('select round(4/3, 10) a from dual', 'DECIMAL(36,10)')
self.checkType('select round(4/3, 20) a from dual', 'DECIMAL(36,20)')
self.checkType('select round(4/3, 30) a from dual', 'DECIMAL(36,30)')
self.checkType('select round(4/3, 34) a from dual', 'DECIMAL(36,34)')
self.checkType('select round(4/3, -1) a from dual', 'DOUBLE')
def test_round_special_double_type_on_table(self):
self.checkType('select round(val, 1) a from fn2.t where digits = 1', 'DECIMAL(36,1)')
self.checkType('select round(val, 2) a from fn2.t where digits = 2', 'DECIMAL(36,2)')
self.checkType('select round(val, 3) a from fn2.t where digits = 3', 'DECIMAL(36,3)')
self.checkType('select round(val, 10) a from fn2.t where digits = 10', 'DECIMAL(36,10)')
self.checkType('select round(val, 20) a from fn2.t where digits = 20', 'DECIMAL(36,20)')
self.checkType('select round(val, 30) a from fn2.t where digits = 30', 'DECIMAL(36,30)')
self.checkType('select round(val, 34) a from fn2.t where digits = 34', 'DECIMAL(36,34)')
self.checkType('select round(val, -1) a from fn2.t where digits = -1', 'DOUBLE')
self.checkType('select round(val, digits) a from fn2.t where digits = 1', 'DOUBLE')
def test_errors(self):
with self.assertRaisesRegexp(Exception, 'numeric value out of range'):
self.checkType('select round(4/3, 35) a from dual', 'DOUBLE')
with self.assertRaisesRegexp(Exception, 'Too many digits in ROUND for castRoundInputToDecimal'):
self.checkType('select round(4/3, 36) a from dual', 'DOUBLE')
with self.assertRaisesRegexp(Exception, 'numeric value out of range'):
self.checkType('select round(val, 35) a from fn2.t where digits = 35', 'DOUBLE')
with self.assertRaisesRegexp(Exception, 'Too many digits in ROUND for castRoundInputToDecimal'):
self.checkType('select round(val, 36) a from fn2.t where digits = 36', 'DOUBLE')
def test_round_double_special_results(self):
rows = self.query('select round(4/3) a from dual')
self.assertEqual(rows[0][0], 1)
rows = self.query('select round(4/3,1) a from dual')
self.assertEqual(rows[0][0], decimal.Decimal('1.3'))
rows = self.query('select round(4/3,10) a from dual')
self.assertEqual(rows[0][0], decimal.Decimal('1.3333333333'))
rows = self.query('select round(4/3,20) a from dual')
self.assertEqual(rows[0][0], decimal.Decimal('1.33333333333333324595'))
if __name__ == '__main__':
udf.main()
|
import csv
from tqdm import tqdm
import logging
from sklearn.cluster import k_means
from collections import Counter
# Symbols for flosses legend.
SYMBOLS = ['*', '-', '+', 'T', '>', '<', 'V', 'O', 'X', 'U', 'B', 'A', 'X', '||', '^']
def get_text_color(color):
"""Gets color that would be visible on given background."""
return (255, 255, 255) if sum(color) / 3 < 130 else (0, 0, 0)
def get_symbol(color):
"""
Gets symbol for a given color.
We don't care for collisions much. Just probability of two similar colors having the same symbol should be low.
"""
return SYMBOLS[(color[0] * 17 + color[1] * 11 + color[2]) % len(SYMBOLS)]
def get_distance(color1, color2):
"""Distance between two colors."""
return sum([((color1[i] - color2[i]) ** 2) for i in range(3)])
def reduce_to(image, num_colors):
"""Reduces number of image colors to at most `num_colors`."""
logger = logging.getLogger(__name__)
new_image = image.copy()
data = image.reshape((-1, 3))
logger.info(f'Original image had {data.shape[0]} colors. Reducing to {num_colors}.')
clusters, indexes, _ = k_means(data, num_colors)
for x in range(new_image.shape[0]):
for y in range(new_image.shape[1]):
new_image[x, y, :] = clusters[indexes[x * new_image.shape[1] + y], :]
return new_image
class Colors:
"""Covers most of work with flosses."""
def __init__(self):
self._logger = logging.getLogger(__name__)
self._data = {}
# Colors data taken from https://github.com/adrianj/CrossStitchCreator/
with open('data/colors.csv', 'r') as f:
for r in csv.DictReader(f):
self._data[r['Floss#']] = (r['Description'], (int(r['Red']), int(r['Green']), int(r['Blue'])))
self._map = {}
def closest(self, ref):
ref = (int(ref[0]), int(ref[1]), int(ref[2]))
try:
return self._map[ref]
except KeyError:
colors = sorted(self._data, key=lambda x: get_distance(ref, self._data[x][1]))
self._map[ref] = colors[0]
return colors[0]
def get(self, f_id):
return self._data[f_id]
def convert_image(self, image):
self._map = {}
counter = Counter()
new_image = image.copy()
for x in tqdm(range(image.shape[0]), desc='Converting to flosses'):
for y in range(image.shape[1]):
new_image[x, y, :] = self.get(self.closest(image[x, y, :]))[1]
counter[tuple(new_image[x, y, :])] += 1
flosses = {f_id: counter[self._data[f_id][1]] for f_id in self._map.values()}
self._logger.info(f'Image has {len(flosses)} unique flosses.')
return new_image, flosses
|
"Stress test diskcache.core.Cache."
from __future__ import print_function
import collections as co
from diskcache import Cache, UnknownFileWarning, EmptyDirWarning, Timeout
import multiprocessing as mp
import os
import random
import shutil
import sys
import threading
import time
import warnings
try:
import Queue
except ImportError:
import queue as Queue
if sys.hexversion < 0x03000000:
range = xrange
import cPickle as pickle
else:
import pickle
from .utils import display
OPERATIONS = int(1e4)
GET_AVERAGE = 100
KEY_COUNT = 10
DEL_CHANCE = 0.1
WARMUP = 10
EXPIRE = None
def make_keys():
def make_int():
return random.randrange(int(1e9))
def make_long():
value = random.randrange(int(1e9))
return value << 64
def make_unicode():
word_size = random.randint(1, 26)
word = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz', word_size))
size = random.randint(1, int(200 / 13))
return word * size
def make_bytes():
word_size = random.randint(1, 26)
word = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz', word_size)).encode('utf-8')
size = random.randint(1, int(200 / 13))
return word * size
def make_float():
return random.random()
def make_object():
return (make_float(),) * random.randint(1, 20)
funcs = [make_int, make_long, make_unicode, make_bytes, make_float, make_object]
while True:
func = random.choice(funcs)
yield func()
def make_vals():
def make_int():
return random.randrange(int(1e9))
def make_long():
value = random.randrange(int(1e9))
return value << 64
def make_unicode():
word_size = random.randint(1, 26)
word = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz', word_size))
size = random.randint(1, int(2 ** 16 / 13))
return word * size
def make_bytes():
word_size = random.randint(1, 26)
word = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz', word_size)).encode('utf-8')
size = random.randint(1, int(2 ** 16 / 13))
return word * size
def make_float():
return random.random()
def make_object():
return [make_float()] * random.randint(1, int(2e3))
funcs = [make_int, make_long, make_unicode, make_bytes, make_float, make_object]
while True:
func = random.choice(funcs)
yield func()
def key_ops():
keys = make_keys()
vals = make_vals()
key = next(keys)
while True:
value = next(vals)
yield 'set', key, value
for _ in range(int(random.expovariate(1.0 / GET_AVERAGE))):
yield 'get', key, value
if random.random() < DEL_CHANCE:
yield 'delete', key, None
def all_ops():
keys = [key_ops() for _ in range(KEY_COUNT)]
for _ in range(OPERATIONS):
ops = random.choice(keys)
yield next(ops)
def worker(queue, eviction_policy, processes, threads):
timings = co.defaultdict(list)
cache = Cache('tmp', eviction_policy=eviction_policy)
for index, (action, key, value) in enumerate(iter(queue.get, None)):
start = time.time()
try:
if action == 'set':
cache.set(key, value, expire=EXPIRE)
elif action == 'get':
result = cache.get(key)
else:
assert action == 'delete'
cache.delete(key)
except Timeout:
miss = True
else:
miss = False
stop = time.time()
if action == 'get' and processes == 1 and threads == 1 and EXPIRE is None:
assert result == value
if index > WARMUP:
delta = stop - start
timings[action].append(delta)
if miss:
timings[action + '-miss'].append(delta)
queue.put(timings)
cache.close()
def dispatch(num, eviction_policy, processes, threads):
with open('input-%s.pkl' % num, 'rb') as reader:
process_queue = pickle.load(reader)
thread_queues = [Queue.Queue() for _ in range(threads)]
subthreads = [
threading.Thread(
target=worker, args=(thread_queue, eviction_policy, processes, threads)
) for thread_queue in thread_queues
]
for index, triplet in enumerate(process_queue):
thread_queue = thread_queues[index % threads]
thread_queue.put(triplet)
for thread_queue in thread_queues:
thread_queue.put(None)
start = time.time()
for thread in subthreads:
thread.start()
for thread in subthreads:
thread.join()
stop = time.time()
timings = co.defaultdict(list)
for thread_queue in thread_queues:
data = thread_queue.get()
for key in data:
timings[key].extend(data[key])
with open('output-%s.pkl' % num, 'wb') as writer:
pickle.dump(timings, writer, protocol=2)
def percentile(sequence, percent):
if not sequence:
return None
values = sorted(sequence)
if percent == 0:
return values[0]
pos = int(len(values) * percent) - 1
return values[pos]
def stress_test(create=True, delete=True,
eviction_policy=u'least-recently-stored',
processes=1, threads=1):
shutil.rmtree('tmp', ignore_errors=True)
if processes == 1:
# Use threads.
func = threading.Thread
else:
func = mp.Process
subprocs = [
func(target=dispatch, args=(num, eviction_policy, processes, threads))
for num in range(processes)
]
if create:
operations = list(all_ops())
process_queue = [[] for _ in range(processes)]
for index, ops in enumerate(operations):
process_queue[index % processes].append(ops)
for num in range(processes):
with open('input-%s.pkl' % num, 'wb') as writer:
pickle.dump(process_queue[num], writer, protocol=2)
for process in subprocs:
process.start()
for process in subprocs:
process.join()
with Cache('tmp') as cache:
warnings.simplefilter('error')
warnings.simplefilter('ignore', category=UnknownFileWarning)
warnings.simplefilter('ignore', category=EmptyDirWarning)
cache.check()
timings = co.defaultdict(list)
for num in range(processes):
with open('output-%s.pkl' % num, 'rb') as reader:
data = pickle.load(reader)
for key in data:
timings[key] += data[key]
if delete:
for num in range(processes):
os.remove('input-%s.pkl' % num)
os.remove('output-%s.pkl' % num)
display(eviction_policy, timings)
shutil.rmtree('tmp', ignore_errors=True)
def stress_test_lru():
"Stress test least-recently-used eviction policy."
stress_test(eviction_policy=u'least-recently-used')
def stress_test_lfu():
"Stress test least-frequently-used eviction policy."
stress_test(eviction_policy=u'least-frequently-used')
def stress_test_none():
"Stress test 'none' eviction policy."
stress_test(eviction_policy=u'none')
def stress_test_mp():
"Stress test multiple threads and processes."
stress_test(processes=4, threads=4)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-n', '--operations', type=float, default=OPERATIONS,
help='Number of operations to perform',
)
parser.add_argument(
'-g', '--get-average', type=float, default=GET_AVERAGE,
help='Expected value of exponential variate used for GET count',
)
parser.add_argument(
'-k', '--key-count', type=float, default=KEY_COUNT,
help='Number of unique keys'
)
parser.add_argument(
'-d', '--del-chance', type=float, default=DEL_CHANCE,
help='Likelihood of a key deletion',
)
parser.add_argument(
'-w', '--warmup', type=float, default=WARMUP,
help='Number of warmup operations before timings',
)
parser.add_argument(
'-e', '--expire', type=float, default=EXPIRE,
help='Number of seconds before key expires',
)
parser.add_argument(
'-t', '--threads', type=int, default=1,
help='Number of threads to start in each process',
)
parser.add_argument(
'-p', '--processes', type=int, default=1,
help='Number of processes to start',
)
parser.add_argument(
'-s', '--seed', type=int, default=0,
help='Random seed',
)
parser.add_argument(
'--no-create', action='store_false', dest='create',
help='Do not create operations data',
)
parser.add_argument(
'--no-delete', action='store_false', dest='delete',
help='Do not delete operations data',
)
parser.add_argument(
'-v', '--eviction-policy', type=unicode,
default=u'least-recently-stored',
)
args = parser.parse_args()
OPERATIONS = int(args.operations)
GET_AVERAGE = int(args.get_average)
KEY_COUNT = int(args.key_count)
DEL_CHANCE = args.del_chance
WARMUP = int(args.warmup)
EXPIRE = args.expire
random.seed(args.seed)
start = time.time()
stress_test(
create=args.create,
delete=args.delete,
eviction_policy=args.eviction_policy,
processes=args.processes,
threads=args.threads,
)
end = time.time()
print('Total wall clock time: %.3f seconds' % (end - start))
|
from Views.UnitsOptionsView import UnitsOptionsView
class UnitsOptionsController:
def __init__(self):
self.unitsOptionsObservers = []
self.units = {
"P": "bar",
"T": "K",
"V": "m3/mol",
"rho": "kg/m3",
"energy_per_mol": "J/mol",
"energy_per_mol_temp": "J/molK",
}
self.unitsOptionsView = UnitsOptionsView(self)
def createUnitsOptionsView(self):
self.unitsOptionsView.show()
def okPressedUnitsOptions(self):
self.units = {
"P": self.unitsOptionsView.comboBox_pressure.currentText(),
"T": self.unitsOptionsView.comboBox_temperature.currentText(),
"V": self.unitsOptionsView.comboBox_volume.currentText(),
"rho": self.unitsOptionsView.comboBox_density.currentText(),
"energy_per_mol": self.unitsOptionsView.comboBox_energ_per_mol.currentText(),
"energy_per_mol_temp": self.unitsOptionsView.comboBox_energ_per_mol_temp.currentText(),
}
self.notifyUnitsOptionsObserver()
self.unitsOptionsView.close()
def cancePressedUnitsOptions(self):
self.unitsOptionsView.close()
def registerUnitsOptionsObserver(self, o):
self.unitsOptionsObservers.append(o)
def notifyUnitsOptionsObserver(self):
for o in self.unitsOptionsObservers:
o.updateUnitsOptions()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 1 21:02:22 2019
@author: yoelr
"""
from .palette import Palette
from math import floor, ceil
from .color import Color
from .utils import view
class ColorWheel:
"""
Create a ColorWheel object that cycles through Color objects.
Parameters
----------
colors: list[Color]
Colors to cycle through.
Examples
--------
:doc:`GettingStarted`
"""
__slots__ = ('colors', 'index')
def __init__(self, colors):
self.colors = list(colors)
self.restart()
def view(self, *args, **kwargs):
return view(self.colors, *args, **kwargs)
def interpolate(self, x):
colors = self.colors
y = x * (len(colors) - 1)
lb = int(floor(y))
ub = int(ceil(y))
s = y - lb
return Color(
fg = (1 - s) * colors[lb].RGB + s * colors[ub].RGB
)
def restart(self):
self.index = 0
def __getitem__(self, index):
return self.colors[index % len(self.colors)]
def next(self):
colors = self.colors
index = self.index
color = colors[index]
self.index = (index + 1) % len(colors)
return color
def __repr__(self):
return f"{type(self).__name__}({self.colors})"
_ipython_display_ = Palette._ipython_display_
|
# 🤗 Datasets
from datasets import (
concatenate_datasets,
DatasetDict,
)
from thesis.datasets.s2orc.preprocessing import get_dataset
from thesis.datasets.s2orc.read_dataset import s2orc_multichunk_read
# Dataset configuration files
from thesis.config.datasets import S2orcConfig, KeyPHConfig
from thesis.config.execution import RunConfig, LogConfig
from thesis.config.base import fingerprints
from thesis.utils.cache import no_caching, _caching
def s2ortc_loader(
dataset_config: S2orcConfig,
run_config: RunConfig,
log_config: LogConfig,
*args,
**kwarg
) -> DatasetDict:
"""
Args: \\
- dataset_config: `S2orcConfig`, configuration for s2orc dataset. \\
- run_config: `RunConfig`, configuration for running experiments. \\
- *args: `args list`, some extra params not used. \\
- **kwargs: `kwargs dict`, some extra dictionary params not used. \\
\\
Return: \\
- all_datasets: `DatasetDict`, dictionary with fields `train`, `test`, `valid` and `Dataset` values. \
"""
# print(dataset_config)
toread_meta_s2orc, toread_pdfs_s2orc = dataset_config.memory_save_pipelines(
log_config.verbose)
# print(toread_meta_s2orc, toread_pdfs_s2orc)
# for everychunk we get an element composed by 4 elements:
multichunks_lists = s2orc_multichunk_read(
dataset_config, log_config, toread_meta_s2orc, toread_pdfs_s2orc
)
# get dictionary input from config
dictionary_input = dataset_config.get_dictionary_input()
dictionary_columns = sum(dictionary_input.values(), [])
# **(dataset_config.get_fingerprint()), **(run_config.get_fingerprint()), **(log_config.get_fingerprint())
@_caching(
dictionary_columns,
**fingerprints(dataset_config, run_config, log_config),
function_name='s2ortc_loader'
)
def custom_to_dataset_list(
multichunks_lists, dataset_config, run_config, log_config, dictionary_columns
):
return [
get_dataset(
single_chunk,
dataset_config,
run_config,
log_config,
data_field=dictionary_columns,
)
for single_chunk in multichunks_lists
]
# for every chunk we fuse and create a dataset
datasets = custom_to_dataset_list(
multichunks_lists, dataset_config, run_config, log_config, dictionary_columns
)
# print(datasets)
# concatenation of all dataset to form one single dataset
all_datasets: DatasetDict = DatasetDict(
{
"train": concatenate_datasets([dataset["train"] for dataset in datasets]),
"test": concatenate_datasets([dataset["test"] for dataset in datasets]),
"valid": concatenate_datasets([dataset["valid"] for dataset in datasets]),
}
)
return all_datasets
|
import os, cv2
# Create resize_imgs function
def resize_imgs():
"""
Used to quickly resize images to 600 width x auto height. Must be run inside the root folder that contains an existing dataset and resized folder. Additionally, resized folder must contain class subfolders already.
Command example:
python resize_images.py
"""
# Set initial variables
completed = [
'alfalfa', 'allium', 'borage', 'burdock', 'calendula', 'cattail', 'chickweed', 'chicory',
'chive_blossom', 'coltsfoot', 'common_mallow', 'common_milkweed', 'common_vetch', 'common_yarrow',
'coneflower', 'cow_parsley', 'cowslip', 'crimson_clover', 'crithmum_maritimum', 'daisy',
'dandelion', 'fennel', 'fireweed', 'gardenia', 'garlic_mustard', 'geranium', 'ground_ivy',
'harebell', 'henbit', 'knapweed', 'meadowsweet', 'mullein', 'pickerelweed', 'ramsons', 'red_clover'
]
root = os.getcwd() + "\\dataset\\"
new_location = os.getcwd() + "\\resized\\"
# Get each file within dataset directory
for path, _, files in os.walk(root):
# Loop through each file and get the names
for name in files:
# Get folder name
sub = path.split('\\')[-1]
if sub not in completed:
# Set path locations
new_loc_path = os.path.join(new_location, sub, name)
img_path = os.path.join(path, name)
# Open image
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
print(img_path)
# Calculate sizing
width = 600
height = (float(img.shape[0]))
width_percent = (width / float(img.shape[1]))
height = int(height * float(width_percent))
# Resize and save image
img_resize = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
cv2.imwrite(new_loc_path, img_resize)
# Run main function
if __name__ == "__main__":
resize_imgs()
|
from .pages.locators import BasePageLocators
from .pages.main_page import MainPage
def test_guest_cant_see_success_message_after_adding_product_to_basket(browser):
link = "http://selenium1py.pythonanywhere.com/"
page = MainPage(browser, link)
page.open()
page.add_to_basket(*BasePageLocators.BASKET_BUTTON)
page.is_not_element_present(*BasePageLocators.SUCCESS_TEXT)
def test_guest_cant_see_success_message(browser):
link = "http://selenium1py.pythonanywhere.com/en-gb/catalogue/neuromancer_13/"
page = MainPage(browser, link)
page.open()
page.is_not_element_present(*BasePageLocators.SUCCESS_TEXT)
def test_message_disappeared_after_adding_product_to_basket(browser):
link = "http://selenium1py.pythonanywhere.com/en-gb/catalogue/neuromancer_13/"
page = MainPage(browser, link)
page.open()
page.add_to_basket(*BasePageLocators.BASKET_BUTTON)
page.is_disappeared(*BasePageLocators.SUCCESS_TEXT)
|
import pandas as pd
import numpy as np
from scipy.optimize import minimize
import holoviews as hv
class EfficientFrontier:
def __init__(self, daily_returns, op=1, stocks=[]):
self.daily_returns = daily_returns
if op == 1:
self.get_ret_vol_sr = self.get_ret_vol_sr1
else:
self.get_ret_vol_sr = self.get_ret_vol_sr2
self.stocks = stocks
def get_ret_vol_sr1(self, weights):
weights = np.array(weights)
ret = np.sum((self.daily_returns.mean()*weights)*252)
vol = np.sqrt(np.dot(weights.T,np.dot(self.daily_returns.cov()*252, weights)))
sr = ret/vol
return np.array([ret,vol,sr])
def get_ret_vol_sr2(self, weights):
weights = np.array(weights)
ret = np.sum((self.daily_returns.mean()*weights)*252)
# NEW SHARPE
# np.sum((daily_returns.mean()*weights)*252)
vol = np.sqrt(np.dot(weights.T,np.dot(self.daily_returns.cov()*252, weights)))
esg_df = self.read_esg_data()
esg_df = esg_df[esg_df["ticker"].isin(self.stocks)]
avg = np.mean(esg_df["score_value"])
std = np.std(esg_df["score_value"])
esg_df["z-score"] = [(i-avg)/std for i in esg_df["score_value"]]
risk_factor = 0.05
sr = ret/vol + 2*(risk_factor)* np.sum(esg_df["z-score"])
# NEW SHARPE
# risk aversion factor ra
# sr = sqrt(sr*sr + 2 * ra * (esgi - esgU)/std(esgs))
return np.array([ret,vol,sr])
def neg_sharpe(self, weights):
return self.get_ret_vol_sr(weights)[2] * -1
def check_sum(self, weights):
return np.sum(weights) - 1
def minimize_volatility(self, weights):
return self.get_ret_vol_sr(weights)[1]
def read_esg_data(self):
companies_df = pd.read_csv("./data/companies_br.csv").set_index("company_id")
esg_df = pd.read_csv("./data/esg_scores_history_br.csv").set_index("company_id")
#chosen_stocks = ["EMBR3", "ABEV3", "ITUB4", "VALE3", "PETR4"]
esg_cohort_df = esg_df[(esg_df["aspect"] == "S&P Global ESG Score")]
l = []
for i in esg_cohort_df.index:
if i in companies_df.index:
l.append(companies_df.loc[i]["ticker"])
else:
l.append(None)
esg_cohort_df["ticker"] = l
return esg_cohort_df
def efficient_frontier(self, scatter, weights):
cons = ({'type':'eq','fun': self.check_sum})
bounds = []
for i in range(len(weights)):
bounds.append((0, 1))
bounds = tuple(bounds)
opt_results = minimize(self.neg_sharpe,weights, method='SLSQP',bounds=bounds,constraints=cons)
frontier_y = np.linspace(0.1,0.8,100)
frontier_volatility = []
for possible_return in frontier_y:
# function for return
cons = ({'type':'eq','fun': self.check_sum},
{'type':'eq','fun': lambda w: self.get_ret_vol_sr(w)[0] - possible_return})
result = minimize(self.minimize_volatility,weights, method='SLSQP',bounds=bounds,constraints=cons)
frontier_volatility.append(result['fun'])
return scatter*hv.Curve((frontier_volatility, frontier_y)).opts(color='green', line_dash='dashed')
|
from __future__ import absolute_import
from pysoa.client.client import Client
__all__ = (
'Client',
)
|
import boto3, os, json
import smtplib
import email.message
from boto3.dynamodb.conditions import Key, Attr
url_expiration = 172800 # 2days
def sendEmail(to, subject, body):
gmail_sender = os.environ['email_account']
gmail_passwd = os.environ['email_password']
print(gmail_passwd)
msg = email.message.Message()
msg['Subject'] = subject
msg['From'] = gmail_sender
msg['To'] = to
msg.add_header('Content-Type','text/html')
msg.set_payload(body)
# Gmail Sign In
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(gmail_sender, gmail_passwd)
print(msg)
try:
server.sendmail(gmail_sender, [to], msg.as_string())
print ('email sent to ' + to)
except:
print ('error sending mail to ' + to)
return server.quit()
def lambda_handler(event, context):
for record in event['Records']:
payload=record["body"]
print(payload)
payload = json.loads(payload)
if payload["token"] == "12345":
to = payload["to"]
subject = payload["subject"]
body = payload["body"]
sendEmail(to, subject, body)
return "done"
|
from . import _morphology
__all__ = ['iterate_structure', 'generate_binary_structure', # noqa: F822
'binary_erosion', 'binary_dilation', 'binary_opening',
'binary_closing', 'binary_hit_or_miss', 'binary_propagation',
'binary_fill_holes', 'grey_erosion', 'grey_dilation',
'grey_opening', 'grey_closing', 'morphological_gradient',
'morphological_laplace', 'white_tophat', 'black_tophat',
'distance_transform_bf', 'distance_transform_cdt',
'distance_transform_edt']
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
f"scipy.ndimage.morphology has no attribute {name}.")
return getattr(_morphology, name)
|
from setuptools import setup
import taxicab as tc
with open("README.md", "r") as f:
long_description = f.read()
with open("requirements.txt", "r") as f:
INSTALL_REQUIRES = [line.strip() for line in f.readlines()]
setup(
name='Taxicab',
version=tc.__version__,
author=tc.__author__,
author_email='nathanrooy@gmail.com',
url='https://github.com/nathanrooy/taxicab',
description='Accurate routing for Open Street Maps and OSMnx',
long_description=long_description,
long_description_content_type="text/markdown",
packages=['taxicab'],
python_requires='>=3.5',
install_requires=INSTALL_REQUIRES,
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
]
)
|
#!/usr/bin/env python
import re
import sys
keys_for_uri = {}
uris_for_key = {}
for line in sys.stdin:
s,p,o = line.split(None, 2)
uri = s[1:-1]
key = o.split('"')[1]
keys_for_uri.setdefault(uri, [])
keys_for_uri[uri].append(key)
uris_for_key.setdefault(key, [])
uris_for_key[key].append(uri)
def traverse_uris(uri):
"""return all the URIs that directly or indirectly share keys with the given URI"""
seen = set()
uris_to_check = [uri]
while len(uris_to_check) > 0:
uri = uris_to_check.pop()
if uri not in seen:
seen.add(uri)
for key in keys_for_uri[uri]:
for uri2 in uris_for_key[key]:
if uri2 not in seen:
uris_to_check.append(uri2)
return seen
def uri_sort_key(uri):
"""return a sort key for the given URI, based on whether it represents the primary work in the record"""
if uri.startswith('http://urn.fi/URN:NBN:fi:bib:me:'):
priority = int(uri[-2:]) # last two digits are 00 for the primary work, 01+ for other works mentioned
else:
priority = -1 # higher priority for e.g. authorized agents
return (priority, uri)
def select_uri(uris):
"""return the most appropriate URI from the given set of URIs"""
return sorted(uris, key=uri_sort_key)[0]
uri_replacement = {} # cache for storing already computed replacements
for uri in keys_for_uri.keys():
if uri not in uri_replacement:
uris = traverse_uris(uri)
if len(uris) > 1:
replacement = select_uri(uris)
for uri2 in uris: # store in cache for all URIs in the merged set
uri_replacement[uri2] = replacement
if uri in uri_replacement and uri_replacement[uri] != uri:
print "<%s> <http://www.w3.org/2002/07/owl#sameAs> <%s> ." % (uri, uri_replacement[uri])
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['cloudcommunications_v1_0'] = '''
type: group
short-summary: Manage Cloud Communications
'''
helps['cloudcommunications user'] = """
type: group
short-summary: Manage user with cloudcommunications_v1_0
"""
helps['cloudcommunications user create-online-meeting'] = """
type: command
short-summary: "Create new navigation property to onlineMeetings for users."
parameters:
- name: --audio-conferencing
short-summary: "audioConferencing"
long-summary: |
Usage: --audio-conferencing conference-id=XX dialin-url=XX toll-free-number=XX toll-number=XX
dialin-url: A URL to the externally-accessible web page that contains dial-in information.
toll-free-number: The toll-free number that connects to the Audio Conference Provider.
toll-number: The toll number that connects to the Audio Conference Provider.
- name: --chat-info
short-summary: "chatInfo"
long-summary: |
Usage: --chat-info message-id=XX reply-chain-message-id=XX thread-id=XX
message-id: The unique identifier of a message in a Microsoft Teams channel.
reply-chain-message-id: The ID of the reply message.
thread-id: The unique identifier for a thread in Microsoft Teams.
- name: --join-information
short-summary: "itemBody"
long-summary: |
Usage: --join-information content=XX content-type=XX
content: The content of the item.
"""
helps['cloudcommunications user delete-online-meeting'] = """
type: command
short-summary: "Delete navigation property onlineMeetings for users."
"""
helps['cloudcommunications user list-online-meeting'] = """
type: command
short-summary: "Get onlineMeetings from users."
"""
helps['cloudcommunications user show-online-meeting'] = """
type: command
short-summary: "Get onlineMeetings from users."
"""
helps['cloudcommunications user update-online-meeting'] = """
type: command
short-summary: "Update the navigation property onlineMeetings in users."
parameters:
- name: --audio-conferencing
short-summary: "audioConferencing"
long-summary: |
Usage: --audio-conferencing conference-id=XX dialin-url=XX toll-free-number=XX toll-number=XX
dialin-url: A URL to the externally-accessible web page that contains dial-in information.
toll-free-number: The toll-free number that connects to the Audio Conference Provider.
toll-number: The toll number that connects to the Audio Conference Provider.
- name: --chat-info
short-summary: "chatInfo"
long-summary: |
Usage: --chat-info message-id=XX reply-chain-message-id=XX thread-id=XX
message-id: The unique identifier of a message in a Microsoft Teams channel.
reply-chain-message-id: The ID of the reply message.
thread-id: The unique identifier for a thread in Microsoft Teams.
- name: --join-information
short-summary: "itemBody"
long-summary: |
Usage: --join-information content=XX content-type=XX
content: The content of the item.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.