content
stringlengths
5
1.05M
# Import necessary modules import gzip import os import time import concurrent.futures import argparse # Keep track of when the script began start_time = time.time() char = '\n' + ('*' * 70) + '\n' # Argparse Information parser = argparse.ArgumentParser(description='Phases a trio when gVCF files are \ available for each individual. This program requires 14 GB of haplotype \ reference files and these files will automatically be downloaded when the \ program is first executed.') parser.add_argument('child_file', help='Sample (patient) File. Must be gzipped') parser.add_argument('paternal_file', help='Paternal File. Must be gzipped') parser.add_argument('maternal_file', help='Maternal File. Must be gzipped') parser.add_argument('output_file', help='Name and path of output file') parser.add_argument('--number_of_tasks', help='Max number of cores that can be \ used in a given run. If 22 cores are availabe for use, all chromosomes will \ be phased by SHAPEIT4 at the same time, significantly speeding up the overall \ runtime.', default="2") args = parser.parse_args() # Create variables of each argument from argparse child_file = args.child_file paternal_file = args.paternal_file maternal_file = args.maternal_file output_file = args.output_file number_tasks = int(args.number_of_tasks) # Functions def relate_sample_name_to_file(file, title): """This function gives each input file a title of "child", "maternal", or paternal. """ with gzip.open(file, 'rt') as gVCF: for line in gVCF: if line.startswith('##'): continue elif line.startswith("#CHROM"): line_list = line.rstrip("\n").split("\t") sample_id = line_list[-1] sample_ids[title] = sample_id break def bgzip_file(file): os.system(f"zcat {file} | bgzip -f > {file}.gz") os.system(f"rm {file}") def filter_child(file): """Filter child file, remove variants-only sites, and create a dictionary of variant-only sites. """ temp_output = "/tmp/child_parsed.vcf" with gzip.open(file, 'rt') as gVCF, gzip.open(temp_output, 'wb') as parsed: for line in gVCF: if line.startswith('##'): parsed.write(line.encode()) elif line.startswith("#CHROM"): line_list = line.rstrip("\n").split("\t") chrom_index = line_list.index("#CHROM") pos_index = line_list.index("POS") parsed.write(line.encode()) elif "END" not in line: line_list = line.rstrip("\n").split("\t") chrom = line_list[chrom_index] pos = line_list[pos_index] if chrom not in position_dict and chrom[3:].isnumeric() and int(chrom[3:]) in range(1, 23): position_dict[chrom] = {pos} parsed.write(line.encode()) elif chrom in position_dict: position_dict[chrom].add(pos) parsed.write(line.encode()) bgzip_file(temp_output) def filter_parents(file): """Filter each parent file for sites that occur as variants in the child of that family. """ if file == paternal_file: temp_output = f"/tmp/paternal_parsed.vcf" elif file == maternal_file: temp_output = f"/tmp/maternal_parsed.vcf" with gzip.open(file, 'rt') as gVCF, gzip.open(temp_output, 'wb') as parsed: for line in gVCF: if line.startswith("#"): parsed.write(line.encode()) else: line_list = line.split("\t") chrom = line_list[0] pos = line_list[1] if chrom in position_dict and pos in position_dict[chrom]: parsed.write(line.encode()) elif chrom in position_dict and pos not in position_dict[chrom]: if "END" in line: for i in range(int(pos), int(line_list[7].lstrip("END=")) + 1): if str(i) in position_dict[chrom]: parsed.write(line.encode()) bgzip_file(temp_output) print(f"Positions in {file} that correspond to variant-only positions of child have been output to temporary file.") def os_system_task(task): """Function is used to download files or phase at various times throughout the process. """ os.system(task) # Create a dictionary where the key is the family members title, and the value is the sample's ID in the VCF. sample_ids = {} #The dictionary that relate_sample_name_to_file() will use relate_sample_name_to_file(child_file, "child") relate_sample_name_to_file(paternal_file, "paternal") relate_sample_name_to_file(maternal_file, "maternal") print(sample_ids) # Create a dictionary that has all the variant positions of the child, for each # chromosome and output a file that has variant-only positions for the child position_dict = {} #The dictionary that filter_child() will use filter_child(child_file) print("Variant-only positions of the child have been written to a temporary file.") # Output a temporary file for each parent that has positions that occur as variant-only positions in the child with concurrent.futures.ProcessPoolExecutor(max_workers=number_tasks) as executor: executor.map(filter_parents, [paternal_file, maternal_file]) # Use GATK to combine all trios into one temporary vcf and then genotype the combined trio vcf files = ["/tmp/child_parsed.vcf.gz", "/tmp/paternal_parsed.vcf.gz", "/tmp/maternal_parsed.vcf.gz"] temp_combined_name = "/tmp/combined.vcf.gz" try: file_string = "" for file in files: file_string += f"-V {file} " os.system(f"gatk IndexFeatureFile -F {file}") # Extract fasta reference file os.system("unzip /fasta_references.zip -d /fasta_references") os.system("gzip -d /fasta_references/*.gz") os.system(f"gatk CombineGVCFs -R /fasta_references/Homo_sapiens_assembly38.fasta {file_string} -O {temp_combined_name}") print("Trio has been combined and written to a temporary file.") os.system(f"gatk IndexFeatureFile -F {temp_combined_name}") os.system(f"gatk --java-options '-Xmx4g' GenotypeGVCFs -R /fasta_references/Homo_sapiens_assembly38.fasta -V {temp_combined_name} -O {output_file}") print("Trio has been joint-genotyped.") except: print("Trio not combined, there was an error detected by GATK")
import numpy as np MLIR_TYPE_TO_NP_TYPE = { "i8": np.int8, "i16": np.int16, "i32": np.int32, "i64": np.int64, # 'f16': np.float16, # 16-bit floats don't seem to be supported in ctypes "f32": np.float32, "f64": np.float64, }
# Example: List orders using the Mollie API. # import os from mollie.api.client import Client from mollie.api.error import Error def main(): try: # # Initialize the Mollie API library with your API key. # # See: https://www.mollie.com/dashboard/settings/profiles # api_key = os.environ.get("MOLLIE_API_KEY", "test_test") mollie_client = Client() mollie_client.set_api_key(api_key) # # List the most recent orders # # See: https://docs.mollie.com/reference/v2/orders-api/list-orders # body = "" orders = mollie_client.orders.list() if not len(orders): body += "<p>You have no orders. You can create one from the examples.</p>" return body body += f"<p>Showing the last {len(orders)} orders for your API key.</p>" body += """ <table> <thead> <tr> <th>ID</th> <th>Billed to</th> <th>Shipped to</th> <th>Total amount</th> </tr> </thead> <tbody> """ for order in orders: body += "<tr>" body += f"<td>{order.id}</td>" body += f'<td>{order.billing_address["givenName"]} {order.billing_address["familyName"]}</td>' body += f'<td>{order.shipping_address["givenName"]} {order.shipping_address["familyName"]}</td>' body += f'<td>{order.amount["currency"]} {order.amount["value"]}</td>' body += f'<td><a href="{order.checkout_url}" target="_blank">Pay order</a></td>' body += f'<td><a href="/14-cancel-order?order_id={order.id}">Cancel order</a></td>' body += f'<td><a href="/18-ship-order-completely?order_id={order.id}">Ship order</a></td>' body += "</tr>" body += "</tbody></table>" return body except Error as err: return f"API call failed: {err}" if __name__ == "__main__": print(main())
#!/usr/bin/env python # coding: utf-8 import json import os import numpy as np import tensorflow as tf import model, sample, encoder import shanepy import shanepy as spy from shanepy import * # !ln -s ../models models # hack to make models "appear" in two places model_name = '1558M' seed = None nsamples = 10 batch_size = 10 length = 40 temperature = 0.8 # 0 is deterministic top_k = 40 # 0 means no restrictions assert nsamples % batch_size == 0 # from shanepy import * # myembed(globals(), locals()) enc = encoder.get_encoder(model_name, "models") hparams = model.default_hparams() with open(os.path.join('models', model_name, 'hparams.json')) as f: hparams.override_from_dict(json.load(f)) if length is None: length = hparams.n_ctx // 2 elif length > hparams.n_ctx: raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx) sess = tf.InteractiveSession() # replace with this in script: # with tf.Session(graph=tf.Graph()) as sess: context = tf.placeholder(tf.int32, [batch_size, None]) np.random.seed(seed) tf.set_random_seed(seed) output = sample.sample_sequence( hparams=hparams, length=length, context=context, batch_size=batch_size, temperature=temperature, top_k=top_k ) saver = tf.train.Saver() ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name)) saver.restore(sess, ckpt) # from utils.list_all_files import * import unicodedata import os, re, random mapping = { '\xa0': ' ', 'Æ': 'AE', 'æ': 'ae', 'è': 'e', 'é': 'e', 'ë': 'e', 'ö': 'o', '–': '-', '—': '-', '‘': "'", '’': "'", '“': '"', '”': '"' } def remove_special(text): return ''.join([mapping[e] if e in mapping else e for e in text]) def strip_word(word): word = re.sub('^\W*|\W*$', '', word).lower() return word basenames = [] all_poems = {} total_lines = 0 words = set() # for fn in b('glob -b "poetry/*" | s chomp'): # for fn in map(lambda x: "src/poetry/" + x, os.listdir("src/poetry")): for fn in map(lambda x: "src/poetry/" + x, os.listdir("src/poetry")): with open(fn) as f: original = open(fn).read() text = remove_special(original).split('\n') poem = text[3:] basename = os.path.basename(fn) basename = os.path.splitext(basename)[0] basenames.append(basename) all_poems[basename] = { 'url': text[0], 'title': text[1], 'author': text[2], 'poem': poem } total_lines += len(poem) poem = '\n'.join(poem) words.update([strip_word(e) for e in poem.split()]) try: words.remove('') except Exception: pass words = list(words) print(total_lines) def titlecase_word(word): print(word) return word[0].upper() + word[1:] titlecase_word("carpenter's"), "carpenter's".title() def random_chunk(array, length): start = random.randint(0, max(0, len(array) - length - 1)) return array[start:start+length] def random_item(array): return array[random.randint(0, len(array) - 1)] random_chunk(all_poems[basenames[0]]['poem'], 2), titlecase_word(random_item(words)) seeds = '''Solve a Rubik's cube A Rubik's cube The algorithm of a Rubik's cube A Rubik's cube has a faces and many colours'''.split("\n") len(seeds) from utils.progress import progress def clean(text): return text.split('<|endoftext|>')[0] def generate(inspiration, seed): inspiration = remove_special(inspiration).strip() seed = titlecase_word(seed).strip() raw_text = inspiration + '\n' + seed context_tokens = enc.encode(raw_text) n_context = len(context_tokens) results = [] for _ in range(nsamples // batch_size): out = sess.run(output, feed_dict={ context: [context_tokens for _ in range(batch_size)] }) for sample in out: text = enc.decode(sample[n_context:]) result = seed + text results.append(result) return results inspiration_lines = 16 all_results = {} for seed in seeds: print(seed) cur = {} for basename in basenames: inspiration = random_chunk(all_poems[basename]['poem'], inspiration_lines) inspiration = '\n'.join(inspiration) results = generate(inspiration, seed) cur[basename] = results all_results[seed] = cur import json with open('poems.json', 'w') as f: json.dump(all_poems, f, separators=(',', ':')) with open('generated.json', 'w') as f: json.dump(all_results, f, separators=(',', ':'))
APPLY = 1 ARRAY = 2 ASSIGN = 3 BREAK = 4 CASE = 5 CLASS_BEGIN = 6 CLASS_END = 7 CONTINUE = 8 FOR_BEGIN = 9 FOR_END = 10 FUNC_BEGIN = 11 FUNC_END = 12 IF_BEGIN = 13 IF_END = 14 RETURN = 15 SWITCH_BEGIN = 16 SWITCH_END = 17 TRANSPOSE = 18 WHILE_BEGIN = 19 WHILE_END = 20
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from typing import Callable import decimal import datetime import uuid from dateutil import parser as date_parser # noqa from ossdbtoolsservice.parsers import datatypes VALID_TRUE_VALUES = ['true', 't', 'y', 'yes', '1'] VALID_FALSE_VALUES = ['false', 'f', 'n', 'no', '0'] def parse_bool(value: str) -> bool: bool_val = value.lower() if bool_val in VALID_TRUE_VALUES: return True elif bool_val in VALID_FALSE_VALUES: return False else: raise ValueError() def parse_float(value: str) -> float: return float(value) def parse_int(value: str) -> int: return int(value) def parse_decimal(value: str) -> decimal.Decimal: return decimal.Decimal(value) def parse_str(value: str) -> str: return value def parse_char(value: str) -> str: if len(value) > 1: raise ValueError('Value provided is not a character') return value def parse_date(value: str) -> datetime.date: date: datetime.datetime = date_parser.parse(value) return date.date() def parse_time(value: str) -> datetime.time: date: datetime.datetime = date_parser.parse(value) return date.time() def parse_time_with_timezone(value: str) -> datetime.time: date: datetime.datetime = date_parser.parse(value) return date.timetz() def parse_datetime(value: str) -> datetime.datetime: if value == 'now()': return datetime.datetime.now() return date_parser.parse(value) def parse_timedelta(value: str) -> datetime.timedelta: raise NotImplementedError() def parse_uuid(value: str) -> uuid.UUID: return uuid.UUID(value) DATATYPE_PARSER_MAP = { datatypes.DATATYPE_BOOL: parse_bool, datatypes.DATATYPE_REAL: parse_float, datatypes.DATATYPE_DOUBLE: parse_float, datatypes.DATATYPE_SMALLINT: parse_int, datatypes.DATATYPE_INTEGER: parse_int, datatypes.DATATYPE_BIGINT: parse_int, datatypes.DATATYPE_NUMERIC: parse_decimal, datatypes.DATATYPE_CHAR: parse_char, datatypes.DATATYPE_VARCHAR: parse_str, datatypes.DATATYPE_TEXT: parse_str, datatypes.DATATYPE_DATE: parse_date, datatypes.DATATYPE_TIME: parse_time, datatypes.DATATYPE_TIME_WITH_TIMEZONE: parse_time_with_timezone, datatypes.DATATYPE_TIMESTAMP: parse_datetime, datatypes.DATATYPE_TIMESTAMP_WITH_TIMEZONE: parse_datetime, datatypes.DATATYPE_INTERVAL: parse_timedelta, datatypes.DATATYPE_UUID: parse_uuid, datatypes.DATATYPE_NAME: parse_str } def get_parser(column_data_type: str) -> Callable[[str], object]: ''' Returns a parser for the column_data_type provided. If not found returns None ''' return DATATYPE_PARSER_MAP.get(column_data_type.lower())
# coding: utf-8 """ Strava API v3 The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501 OpenAPI spec version: 3.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class ActivityStats(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'biggest_ride_distance': 'float', 'biggest_climb_elevation_gain': 'float', 'recent_ride_totals': 'ActivityTotal', 'recent_run_totals': 'ActivityTotal', 'recent_swim_totals': 'ActivityTotal', 'ytd_ride_totals': 'ActivityTotal', 'ytd_run_totals': 'ActivityTotal', 'ytd_swim_totals': 'ActivityTotal', 'all_ride_totals': 'ActivityTotal', 'all_run_totals': 'ActivityTotal', 'all_swim_totals': 'ActivityTotal' } attribute_map = { 'biggest_ride_distance': 'biggest_ride_distance', 'biggest_climb_elevation_gain': 'biggest_climb_elevation_gain', 'recent_ride_totals': 'recent_ride_totals', 'recent_run_totals': 'recent_run_totals', 'recent_swim_totals': 'recent_swim_totals', 'ytd_ride_totals': 'ytd_ride_totals', 'ytd_run_totals': 'ytd_run_totals', 'ytd_swim_totals': 'ytd_swim_totals', 'all_ride_totals': 'all_ride_totals', 'all_run_totals': 'all_run_totals', 'all_swim_totals': 'all_swim_totals' } def __init__(self, biggest_ride_distance=None, biggest_climb_elevation_gain=None, recent_ride_totals=None, recent_run_totals=None, recent_swim_totals=None, ytd_ride_totals=None, ytd_run_totals=None, ytd_swim_totals=None, all_ride_totals=None, all_run_totals=None, all_swim_totals=None): # noqa: E501 """ActivityStats - a model defined in Swagger""" # noqa: E501 self._biggest_ride_distance = None self._biggest_climb_elevation_gain = None self._recent_ride_totals = None self._recent_run_totals = None self._recent_swim_totals = None self._ytd_ride_totals = None self._ytd_run_totals = None self._ytd_swim_totals = None self._all_ride_totals = None self._all_run_totals = None self._all_swim_totals = None self.discriminator = None if biggest_ride_distance is not None: self.biggest_ride_distance = biggest_ride_distance if biggest_climb_elevation_gain is not None: self.biggest_climb_elevation_gain = biggest_climb_elevation_gain if recent_ride_totals is not None: self.recent_ride_totals = recent_ride_totals if recent_run_totals is not None: self.recent_run_totals = recent_run_totals if recent_swim_totals is not None: self.recent_swim_totals = recent_swim_totals if ytd_ride_totals is not None: self.ytd_ride_totals = ytd_ride_totals if ytd_run_totals is not None: self.ytd_run_totals = ytd_run_totals if ytd_swim_totals is not None: self.ytd_swim_totals = ytd_swim_totals if all_ride_totals is not None: self.all_ride_totals = all_ride_totals if all_run_totals is not None: self.all_run_totals = all_run_totals if all_swim_totals is not None: self.all_swim_totals = all_swim_totals @property def biggest_ride_distance(self): """Gets the biggest_ride_distance of this ActivityStats. # noqa: E501 The longest distance ridden by the athlete. # noqa: E501 :return: The biggest_ride_distance of this ActivityStats. # noqa: E501 :rtype: float """ return self._biggest_ride_distance @biggest_ride_distance.setter def biggest_ride_distance(self, biggest_ride_distance): """Sets the biggest_ride_distance of this ActivityStats. The longest distance ridden by the athlete. # noqa: E501 :param biggest_ride_distance: The biggest_ride_distance of this ActivityStats. # noqa: E501 :type: float """ self._biggest_ride_distance = biggest_ride_distance @property def biggest_climb_elevation_gain(self): """Gets the biggest_climb_elevation_gain of this ActivityStats. # noqa: E501 The highest climb ridden by the athlete. # noqa: E501 :return: The biggest_climb_elevation_gain of this ActivityStats. # noqa: E501 :rtype: float """ return self._biggest_climb_elevation_gain @biggest_climb_elevation_gain.setter def biggest_climb_elevation_gain(self, biggest_climb_elevation_gain): """Sets the biggest_climb_elevation_gain of this ActivityStats. The highest climb ridden by the athlete. # noqa: E501 :param biggest_climb_elevation_gain: The biggest_climb_elevation_gain of this ActivityStats. # noqa: E501 :type: float """ self._biggest_climb_elevation_gain = biggest_climb_elevation_gain @property def recent_ride_totals(self): """Gets the recent_ride_totals of this ActivityStats. # noqa: E501 The recent (last 4 weeks) ride stats for the athlete. # noqa: E501 :return: The recent_ride_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._recent_ride_totals @recent_ride_totals.setter def recent_ride_totals(self, recent_ride_totals): """Sets the recent_ride_totals of this ActivityStats. The recent (last 4 weeks) ride stats for the athlete. # noqa: E501 :param recent_ride_totals: The recent_ride_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._recent_ride_totals = recent_ride_totals @property def recent_run_totals(self): """Gets the recent_run_totals of this ActivityStats. # noqa: E501 The recent (last 4 weeks) run stats for the athlete. # noqa: E501 :return: The recent_run_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._recent_run_totals @recent_run_totals.setter def recent_run_totals(self, recent_run_totals): """Sets the recent_run_totals of this ActivityStats. The recent (last 4 weeks) run stats for the athlete. # noqa: E501 :param recent_run_totals: The recent_run_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._recent_run_totals = recent_run_totals @property def recent_swim_totals(self): """Gets the recent_swim_totals of this ActivityStats. # noqa: E501 The recent (last 4 weeks) swim stats for the athlete. # noqa: E501 :return: The recent_swim_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._recent_swim_totals @recent_swim_totals.setter def recent_swim_totals(self, recent_swim_totals): """Sets the recent_swim_totals of this ActivityStats. The recent (last 4 weeks) swim stats for the athlete. # noqa: E501 :param recent_swim_totals: The recent_swim_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._recent_swim_totals = recent_swim_totals @property def ytd_ride_totals(self): """Gets the ytd_ride_totals of this ActivityStats. # noqa: E501 The year to date ride stats for the athlete. # noqa: E501 :return: The ytd_ride_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._ytd_ride_totals @ytd_ride_totals.setter def ytd_ride_totals(self, ytd_ride_totals): """Sets the ytd_ride_totals of this ActivityStats. The year to date ride stats for the athlete. # noqa: E501 :param ytd_ride_totals: The ytd_ride_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._ytd_ride_totals = ytd_ride_totals @property def ytd_run_totals(self): """Gets the ytd_run_totals of this ActivityStats. # noqa: E501 The year to date run stats for the athlete. # noqa: E501 :return: The ytd_run_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._ytd_run_totals @ytd_run_totals.setter def ytd_run_totals(self, ytd_run_totals): """Sets the ytd_run_totals of this ActivityStats. The year to date run stats for the athlete. # noqa: E501 :param ytd_run_totals: The ytd_run_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._ytd_run_totals = ytd_run_totals @property def ytd_swim_totals(self): """Gets the ytd_swim_totals of this ActivityStats. # noqa: E501 The year to date swim stats for the athlete. # noqa: E501 :return: The ytd_swim_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._ytd_swim_totals @ytd_swim_totals.setter def ytd_swim_totals(self, ytd_swim_totals): """Sets the ytd_swim_totals of this ActivityStats. The year to date swim stats for the athlete. # noqa: E501 :param ytd_swim_totals: The ytd_swim_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._ytd_swim_totals = ytd_swim_totals @property def all_ride_totals(self): """Gets the all_ride_totals of this ActivityStats. # noqa: E501 The all time ride stats for the athlete. # noqa: E501 :return: The all_ride_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._all_ride_totals @all_ride_totals.setter def all_ride_totals(self, all_ride_totals): """Sets the all_ride_totals of this ActivityStats. The all time ride stats for the athlete. # noqa: E501 :param all_ride_totals: The all_ride_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._all_ride_totals = all_ride_totals @property def all_run_totals(self): """Gets the all_run_totals of this ActivityStats. # noqa: E501 The all time run stats for the athlete. # noqa: E501 :return: The all_run_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._all_run_totals @all_run_totals.setter def all_run_totals(self, all_run_totals): """Sets the all_run_totals of this ActivityStats. The all time run stats for the athlete. # noqa: E501 :param all_run_totals: The all_run_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._all_run_totals = all_run_totals @property def all_swim_totals(self): """Gets the all_swim_totals of this ActivityStats. # noqa: E501 The all time swim stats for the athlete. # noqa: E501 :return: The all_swim_totals of this ActivityStats. # noqa: E501 :rtype: ActivityTotal """ return self._all_swim_totals @all_swim_totals.setter def all_swim_totals(self, all_swim_totals): """Sets the all_swim_totals of this ActivityStats. The all time swim stats for the athlete. # noqa: E501 :param all_swim_totals: The all_swim_totals of this ActivityStats. # noqa: E501 :type: ActivityTotal """ self._all_swim_totals = all_swim_totals def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ActivityStats, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ActivityStats): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: pyatv/protocols/mrp/protobuf/VolumeControlAvailabilityMessage.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nCpyatv/protocols/mrp/protobuf/VolumeControlAvailabilityMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\"L\n\x12VolumeCapabilities\"6\n\x04\x45num\x12\x08\n\x04None\x10\x00\x12\x0c\n\x08Relative\x10\x01\x12\x0c\n\x08\x41\x62solute\x10\x02\x12\x08\n\x04\x42oth\x10\x03\"x\n VolumeControlAvailabilityMessage\x12\x1e\n\x16volumeControlAvailable\x18\x01 \x01(\x08\x12\x34\n\x12volumeCapabilities\x18\x02 \x01(\x0e\x32\x18.VolumeCapabilities.Enum:]\n volumeControlAvailabilityMessage\x12\x10.ProtocolMessage\x18\x16 \x01(\x0b\x32!.VolumeControlAvailabilityMessage') VOLUMECONTROLAVAILABILITYMESSAGE_FIELD_NUMBER = 22 volumeControlAvailabilityMessage = DESCRIPTOR.extensions_by_name['volumeControlAvailabilityMessage'] _VOLUMECAPABILITIES = DESCRIPTOR.message_types_by_name['VolumeCapabilities'] _VOLUMECONTROLAVAILABILITYMESSAGE = DESCRIPTOR.message_types_by_name['VolumeControlAvailabilityMessage'] _VOLUMECAPABILITIES_ENUM = _VOLUMECAPABILITIES.enum_types_by_name['Enum'] VolumeCapabilities = _reflection.GeneratedProtocolMessageType('VolumeCapabilities', (_message.Message,), { 'DESCRIPTOR' : _VOLUMECAPABILITIES, '__module__' : 'pyatv.protocols.mrp.protobuf.VolumeControlAvailabilityMessage_pb2' # @@protoc_insertion_point(class_scope:VolumeCapabilities) }) _sym_db.RegisterMessage(VolumeCapabilities) VolumeControlAvailabilityMessage = _reflection.GeneratedProtocolMessageType('VolumeControlAvailabilityMessage', (_message.Message,), { 'DESCRIPTOR' : _VOLUMECONTROLAVAILABILITYMESSAGE, '__module__' : 'pyatv.protocols.mrp.protobuf.VolumeControlAvailabilityMessage_pb2' # @@protoc_insertion_point(class_scope:VolumeControlAvailabilityMessage) }) _sym_db.RegisterMessage(VolumeControlAvailabilityMessage) if _descriptor._USE_C_DESCRIPTORS == False: pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(volumeControlAvailabilityMessage) DESCRIPTOR._options = None _VOLUMECAPABILITIES._serialized_start=123 _VOLUMECAPABILITIES._serialized_end=199 _VOLUMECAPABILITIES_ENUM._serialized_start=145 _VOLUMECAPABILITIES_ENUM._serialized_end=199 _VOLUMECONTROLAVAILABILITYMESSAGE._serialized_start=201 _VOLUMECONTROLAVAILABILITYMESSAGE._serialized_end=321 # @@protoc_insertion_point(module_scope)
""" Main entry module for easy CLI invocation """ from argparse import ArgumentParser from typing import Sequence, Union, Optional import asyncio import json import sys from portscanner.scanner import PortScanner from portscanner.types import ScanInfo, ScanState from portscanner.utils import cleanup from portscanner.parsing import port_range from portscanner.colors import Clr if sys.platform == "win32": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) __all__ = [ "run_scanner", "run", ] TOP_PORTS = port_range( "1,3-4,6-7,9,13,17,19-26,30,32-33,37,42-43,49,53,70,79-85,88-90,99-100,106,109-111,113,119,125,135,139,143-144,146,161,163,179,199,211-212,222,254-256,259,264,280,301,306,311,340,366,389,406-407,416-417,425,427,443-445,458,464-465,481,497,500,512-515,524,541,543-545,548,554-555,563,587,593,616-617,625,631,636,646,648,666-668,683,687,691,700,705,711,714,720,722,726,749,765,777,783,787,800-801,808,843,873,880,888,898,900-903,911-912,981,987,990,992-993,995,999-1002,1007,1009-1011,1021-1100,1102,1104-1108,1110-1114,1117,1119,1121-1124,1126,1130-1132,1137-1138,1141,1145,1147-1149,1151-1152,1154,1163-1166,1169,1174-1175,1183,1185-1187,1192,1198-1199,1201,1213,1216-1218,1233-1234,1236,1244,1247-1248,1259,1271-1272,1277,1287,1296,1300-1301,1309-1311,1322,1328,1334,1352,1417,1433-1434,1443,1455,1461,1494,1500-1501,1503,1521,1524,1533,1556,1580,1583,1594,1600,1641,1658,1666,1687-1688,1700,1717-1721,1723,1755,1761,1782-1783,1801,1805,1812,1839-1840,1862-1864,1875,1900,1914,1935,1947,1971-1972,1974,1984,1998-2010,2013,2020-2022,2030,2033-2035,2038,2040-2043,2045-2049,2065,2068,2099-2100,2103,2105-2107,2111,2119,2121,2126,2135,2144,2160-2161,2170,2179,2190-2191,2196,2200,2222,2251,2260,2288,2301,2323,2366,2381-2383,2393-2394,2399,2401,2492,2500,2522,2525,2557,2601-2602,2604-2605,2607-2608,2638,2701-2702,2710,2717-2718,2725,2800,2809,2811,2869,2875,2909-2910,2920,2967-2968,2998,3000-3001,3003,3005-3007,3011,3013,3017,3030-3031,3052,3071,3077,3128,3168,3211,3221,3260-3261,3268-3269,3283,3300-3301,3306,3322-3325,3333,3351,3367,3369-3372,3389-3390,3404,3476,3493,3517,3527,3546,3551,3580,3659,3689-3690,3703,3737,3766,3784,3800-3801,3809,3814,3826-3828,3851,3869,3871,3878,3880,3889,3905,3914,3918,3920,3945,3971,3986,3995,3998,4000-4006,4045,4111,4125-4126,4129,4224,4242,4279,4321,4343,4443-4446,4449,4550,4567,4662,4848,4899-4900,4998,5000-5004,5009,5030,5033,5050-5051,5054,5060-5061,5080,5087,5100-5102,5120,5190,5200,5214,5221-5222,5225-5226,5269,5280,5298,5357,5405,5414,5431-5432,5440,5500,5510,5544,5550,5555,5560,5566,5631,5633,5666,5678-5679,5718,5730,5800-5802,5810-5811,5815,5822,5825,5850,5859,5862,5877,5900-5904,5906-5907,5910-5911,5915,5922,5925,5950,5952,5959-5963,5987-5989,5998-6007,6009,6025,6059,6100-6101,6106,6112,6123,6129,6156,6346,6389,6502,6510,6543,6547,6565-6567,6580,6646,6666-6669,6689,6692,6699,6779,6788-6789,6792,6839,6881,6901,6969,7000-7002,7004,7007,7019,7025,7070,7100,7103,7106,7200-7201,7402,7435,7443,7496,7512,7625,7627,7676,7741,7777-7778,7800,7911,7920-7921,7937-7938,7999-8002,8007-8011,8021-8022,8031,8042,8045,8080-8090,8093,8099-8100,8180-8181,8192-8194,8200,8222,8254,8290-8292,8300,8333,8383,8400,8402,8443,8500,8600,8649,8651-8652,8654,8701,8800,8873,8888,8899,8994,9000-9003,9009-9011,9040,9050,9071,9080-9081,9090-9091,9099-9103,9110-9111,9200,9207,9220,9290,9415,9418,9485,9500,9502-9503,9535,9575,9593-9595,9618,9666,9876-9878,9898,9900,9917,9929,9943-9944,9968,9998-10004,10009-10010,10012,10024-10025,10082,10180,10215,10243,10566,10616-10617,10621,10626,10628-10629,10778,11110-11111,11967,12000,12174,12265,12345,13456,13722,13782-13783,14000,14238,14441-14442,15000,15002-15004,15660,15742,16000-16001,16012,16016,16018,16080,16113,16992-16993,17877,17988,18040,18101,18988,19101,19283,19315,19350,19780,19801,19842,20000,20005,20031,20221-20222,20828,21571,22939,23502,24444,24800,25734-25735,26214,27000,27352-27353,27355-27356,27715,28201,30000,30718,30951,31038,31337,32768-32785,33354,33899,34571-34573,35500,38292,40193,40911,41511,42510,44176,44442-44443,44501,45100,48080,49152-49161,49163,49165,49167,49175-49176,49400,49999-50003,50006,50300,50389,50500,50636,50800,51103,51493,52673,52822,52848,52869,54045,54328,55055-55056,55555,55600,56737-56738,57294,57797,58080,60020,60443,61532,61900,62078,63331,64623,64680,65000,65129,65389" ) def style(info: ScanInfo, as_json: bool = False, first: bool = False): if not as_json: hstr = f"{str(info.host):<40} " pclr = { ScanState.OPEN: Clr.GRN, ScanState.CLOSED: Clr.RED, ScanState.TIMEOUT: Clr.PRP, ScanState.UNKNOWN: Clr.YLW, }.get(info.state) pstr = f"{pclr}{info.port:<6} {info.state.name:<8}{Clr.RST} " print(Clr.WHT + hstr + pstr + (info.banner or "")) else: data = dict( host=str(info.host), port=str(info.port), state=str(info.state.name), banner=info.banner, ) print(("" if first else ",\n") + json.dumps(data, indent=4), end="", flush=True) async def run_scanner( workers: int, hosts: Sequence[str], ports: Sequence[int], timeout: float, qtype: Union[str, Sequence[str]], open_only: bool, as_json: bool, resolve_all: bool, banner_buffer: Optional[int] = None, verbose: bool = False, ): with PortScanner(workers, timeout, banner_buffer) as scanner: first = True async for info in scanner.scan(hosts, ports, open=open_only, qtype=qtype, all=resolve_all, verbose=verbose): style(info, as_json, first) if first: first = False def run(): ap = ArgumentParser("portscanner", conflict_handler="resolve") ap.add_argument( "targets", type=str, nargs="+", help="Target hostnames, IP Addresses, or CIDRs to perform a port scan" ) ap.add_argument( "-p", "--ports", type=port_range, default=TOP_PORTS, help="Target TCP Ports to scan on each hostname, IP, or CIDR", ) ap.add_argument( "-t", "--timeout", default=1.0, type=float, help="Connection timeout for port scans and DNS resolution", ) ap.add_argument( "-d", "--dns", default=("1.1.1.1", "1.0.0.1", "8.8.8.8", "8.8.4.4"), type=str, nargs="+", help="DNS Servers to use for name resolution", ) ap.add_argument( "-w", "--workers", default=100, type=int, help="Number of concurrent asynchronous workers", ) ap.add_argument( "-b", "--banner-buffer", default=None, type=int, help="Buffer size for reading the target service banner (likely slows scan)", ) ap.add_argument( "--open", action="store_true", help="Show open ports only", ) ap.add_argument( "-f", "--first", action="store_true", help="Only use first resolved IP", ) ap.add_argument( "-j", "--json", action="store_true", help="JSON Output", ) ap.add_argument( "-6", "--ipv6", action="store_true", help="Use IPv6 name resolution", ) ap.add_argument( "-4", "--ipv4", action="store_true", help="Use IPv4 name resolution", ) ap.add_argument( "-v", "--verbose", action="store_true", help="Verbose Output", ) args = ap.parse_args() if not args.targets: raise ValueError("Invalid Targets") if not args.ports: raise ValueError("Invalid Ports") qtypes = [] if args.ipv4: qtypes.append("A") if args.ipv6: qtypes.append("AAAA") qtypes = qtypes or ["A"] task = run_scanner( workers=args.workers, hosts=args.targets, ports=args.ports, timeout=args.timeout, qtype=qtypes, open_only=args.open, as_json=args.json, resolve_all=not args.first, banner_buffer=args.banner_buffer, verbose=args.verbose, ) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: loop.run_until_complete(task) except KeyboardInterrupt: print("[+] Exiting...") except Exception as e: import traceback traceback.print_exc() finally: cleanup(loop) if __name__ == "__main__": Clr.init() run()
from model import Model from elements.create import Create from elements.process import Process from elements.dispose import Dispose # region Task 5 c = Create(2.0) p1 = Process(2.0, 3) p1.max_queue = 5 p2 = Process(1.0) p2.max_queue = 5 p3 = Process(1.0) p3.max_queue = 5 p4 = Process(1.0) p4.max_queue = 5 d1, d2 = Dispose('DISPOSE1'), Dispose('DISPOSE2') c.next_element = [p1] p1.next_element = [p2, p3] p1.probability = [0.5, 0.5] p2.next_element = [d1] p3.next_element = [p4] p4.next_element = [p1, d2] p4.probability = [0.5, 0.5] print('id0 = {0}, id1 = {1}, id2 = {2}, id3 = {3}, id4 = {4}'.format( c.id_el, p1.id_el, p2.id_el, p3.id_el, p4.id_el)) elements = [c, p1, p2, p3, p4, d1, d2] model = Model(elements) model.simulate(1000.0) # endregion # region Task from PR3 # c = Create(2.0) # p = Process(1.0, 1) # # print('id0 = ' + str(c.get_id_el()) + '\t\tid1 = ' + str(p.get_id_el())) # # c.set_next_element(p) # p.set_max_queue(5) # c.set_name('CREATOR') # p.set_name('PROCESSOR') # c.set_distribution('exp') # p.set_distribution('exp') # # el_list = [c, p] # model = Model(el_list) # model.simulate(1000.0) # endregion
from random import shuffle, randint class HackProof(object): def __init__(self): self.alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] self.alphabetCaps = [] for char in self.alphabet: self.alphabetCaps.append(char.upper()) self.specialChars = ['!', '@', '#', '$', '%', '^', '&', ',', '*', '(', ')', '-', '+', '=', '[', ']', ':', ';', '"', "'", " ", "_", '.'] self.nums = [] for x in range(0, 10): self.nums.append(str(x)) self.cypherChars = self.alphabet + self.alphabetCaps + self.specialChars + self.nums openFile = self.whileInput("Would you like to convert a file or open an exisiting one? 1 or 2: ", ['1', '2']) if openFile == '1': fileName = input("File name: ") fileName = self.formatFileName(fileName) self.shiftLength = randint(1, len(self.cypherChars) - 1) fileUser = input("What is your userName: ") filePass = input("What is your password: ") file = open(fileName, 'r') data = file.readlines() file.close() data = self.joinList(["{}_{}\n".format(fileUser, filePass)], data) data = self.encrypt(data) data = self.joinList(["{}\n".format(self.numEncrypt(self.shiftLength))], data) file = open(fileName, 'w') file.writelines(data) file.close() else: fileName = input("File name: ") fileName = self.formatFileName(fileName) file = open(fileName, 'r') fileUser = input("What is your userName: ") filePass = input("What is your password: ") data = file.readlines() self.shiftLength = self.decryptNum(data[0].strip('\n')) passAndUser = self.decrytLine(data[1]).split('_') passAndUser[1] = passAndUser[1].strip('\n') if fileUser == passAndUser[0] and filePass == passAndUser[1]: temp = [] del data[0] del data[0] for line in data: temp.append(self.decrytLine(line)) file = open(fileName, 'w') file.writelines(temp) file.close() def genPassword(self): password = [] numLower = randint(1, 2) numUpper = randint(1, 2) numNum = randint(1, 2) numSpec = 8 - numLower - numUpper - numNum for x in range(numLower): password.append(self.alphabet[randint(0, len(self.alphabet) - 1)]) for x in range(numUpper): password.append(self.alphabetCaps[randint(0, len(self.alphabetCaps) - 1)]) for x in range(numNum): password.append(self.nums[randint(0, len(self.nums) - 1)]) for x in range(numSpec): password.append(self.specialChars[randint(0, len(self.specialChars) - 1)]) tempPassword = "" shuffle(password) tempPassword = "" for char in password: tempPassword = tempPassword + char return tempPassword def whileInput(self, inputText, correctAns): while 1: temp = input(inputText) if temp in correctAns: return temp print("That is not a vailid input\n") def encrypt(self, lst): tempLst = [''] for line in lst: for char in line: tempLst[len(tempLst) - 1] = tempLst[len(tempLst) - 1] + self.returnChar(char) tempLst.append('') del tempLst[len(tempLst) - 1] return tempLst def returnChar(self, char): if not char == '\n': pos = self.cypherChars.index(char) pos += self.shiftLength if pos >= len(self.cypherChars): pos = pos - len(self.cypherChars) return self.cypherChars[pos] return char def decryptChar(self, char): if not char == '\n': pos = self.cypherChars.index(char) pos -= self.shiftLength if pos < 0: pos = int(len(self.cypherChars) + pos) return self.cypherChars[int(pos)] return char def numEncrypt(self, num): return int(num) * 5 - 15 def decryptNum(self, num): return (int(num) + 15) / 5 def joinList(self, ls1, ls2): return ls1 + ls2 def formatFileName(self, fileName): fileName = fileName.split('.txt') if len(fileName) == 1: fileName.append('.txt') elif len(fileName) == 2: fileName[1] = '.txt' return fileName[0] + fileName[1] def decrytLine(self, string): string2 = "" for char in string: string2 = string2 + self.decryptChar(char) return string2 H = HackProof()
from flask import Flask, session from flask_socketio import SocketIO socketio = SocketIO() games = {} def create_app(debug=False): app = Flask(__name__) app.debug = debug app.config['SECRET_KEY'] = 'SomeSecretKey' from .play import play app.register_blueprint(play) socketio.init_app(app) return app
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import calendar import os from glob import glob from io import StringIO from json import dumps, loads from pathlib import Path import dash_core_components as dcc import dash_html_components as html import geopandas as gpd import src.dash_configs as dcf import src.dash_helpers as dh from azure.storage.blob import BlockBlobService from dash import Dash, dependencies from pandas import read_csv, read_json, to_datetime def load_prep_geodata(gpd_path: str, da_choice: str) -> gpd.GeoDataFrame: """ Load and preprocess geodata for dividing city Note: this function is the same as that found in pyviz_panel/app/src/visualization_helpers_altair.py """ # 2. Load *.shp boundary file gdf_out = gpd.read_file(gpd_path) # 3. (for beat, sector and district only) Change dtype of columns, # that will be used in a merge, into integer dtype if da_choice in ["beat", "district"]: gdf_out[["beat", "beat_num", "sector", "district"]] = gdf_out[ ["beat", "beat_num", "sector", "district"] ].astype(int) # 4. Calculate area of beat (since this is always the smallest # geographical region) gdf_out["area"] = ( gdf_out["geometry"] .to_crs("epsg:3395") # .to_crs({"init": "epsg:3395"}) .map(lambda p: p.area / 10 ** 6) ) return gdf_out # Flask port = int(os.environ.get("PORT", 80)) # Dash options external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"] four_cols = "four columns" hidden_div_style = {"display": "none"} # Azure blobs_dict = { "choro_map": "blobedesz4", "heat_map": "blobedesz5", "exp_summry": "blobedesz6", } az_storage_container_name = "myconedesx7" blob_service = BlockBlobService( account_name=os.environ.get("AZURE_STORAGE_ACCOUNT"), account_key=os.environ.get("AZURE_STORAGE_KEY"), ) # Script inputs (based on Notebook) PROJECT_DIR = str(Path().cwd()) data_dir = str(Path(PROJECT_DIR) / "data") heat_data_dir = str(Path(data_dir) / "processed" / "heat_mapping_inputs.csv") choro_data_dir = str(Path(data_dir) / "processed" / "choro_mapping_inputs.csv") es_fname_str = "experiment_summary_*.csv" primary_types_all = [ "CRIMINAL_DISTURBANCE", "VIOLENCE_TO_HUMAN", "PROPERTY_DAMAGE", ] da_choices = ["district"] pf = "Police Beats (current)" ca = "Community Areas (current)" nb = "Neighborhoods" da = { "neighbourhood": { "basic_view_cols": "pri|sec|geometry", "pre-post-explosition-compare": "pri_neigh", "left_join_col": "pri_neigh_x", }, "district": { "basic_view_cols": "district|sect|geometry", "pre-post-explosition-compare": "district", "left_join_col": "district", }, "community_area": { "basic_view_cols": "area_num_1|community|geometry", "pre-post-explosition-compare": "comarea", "left_join_col": "area_num_1_x", }, } general_plot_specs = { "choromap_projectiontype": "mercator", "color_by_column": "datetime|count", "colorscheme": "YlOrRd", "choro_map_figsize": {"width": 600, "height": 535}, "legend_title": ["Occurrences"], "heatmap_xy": {"x": "month:O", "y": "day:O", "yscale": "linear"}, "heat_map_figsize": {"width": 400, "height": 535}, } dt_hmap = { "x": {"value": "month", "title": "Month", "type": "int", "format": 0}, "y": {"value": "day", "title": "Day", "type": "int", "format": 0}, "z": { "value": "datetime|count", "title": "Occurrences", "type": "int", "format": 0, }, "e1": { "value": "arrest|sum", "title": "Arrests", "type": "int", "format": 0, }, "e2": { "value": "probability_of_max_class|mean", "title": "Probability (Avg.)", "type": "float", "format": 2, }, } dt_choro = { "district": {"title": "District"}, "area": {"title": "Area (sq. km)"}, "side": {"title": "Side"}, "datetime|count": {"title": "Ocurrences"}, "arrest|sum": {"title": "Arrests"}, "probability_of_max_class|mean": {"title": "Probability (Avg.)"}, } district_to_side = { s: k for k, v in { "North": [11, 14, 15, 16, 17, 19, 20, 24, 25], "Central": [1, 2, 3, 8, 9, 10, 12, 13, 18], "South": [4, 5, 6, 7, 22], }.items() for s in v } # App-specific inputs cloud_run = True figs_dir = str(Path(PROJECT_DIR) / "reports" / "figures") da_choice = da_choices[0] years_wanted = [2018, 2019] months_wanted = [1, 2, 3] training_size = 48348 testing_size = 16117 naive_strategy_descr = { "uniform": "uniformly at random", "stratified": ( "generates predictions by respecting the training set’s " "class distribution (classifier will predict a probability that " "each new observation encountered possesses the target property)" ), "most_frequent": "as the most frequent label in the training set", "prior": "by predicting the class that maximizes the class prior", } model_descr = { "LogisticRegression": ( "https://www.sciencedirect.com/topics/medicine-and-dentistry/" "logistic-regression-analysis" ) } # Instantiate variables for geospatial data for geojson, blob_name, shp_dir_name, boundaryf in zip( ["Community", "eighborhoods", "Police_Beats_current"], ["blobedesz7", "blobedesz8", "blobedesz9"], [ca, nb, pf], ["community_area", "neighbourhood", "district"], ): if geojson == "Police_Beats_current": if cloud_run: geojson = "CPD_Districts" else: geojson = "CPD districts" da[boundaryf]["geojson"] = glob( str(Path(data_dir) / "raw" / f"*{geojson}*.geojson") )[0] if cloud_run: shp_dir_name = ( shp_dir_name.replace("(", "").replace(")", "").replace(" ", "_") ) da[boundaryf]["file"] = glob( str(Path(data_dir) / "raw" / shp_dir_name / "*.shp") )[0] # Load data if cloud_run: df_h = read_csv( StringIO( blob_service.get_blob_to_text( container_name=az_storage_container_name, blob_name=blobs_dict["heat_map"], ).content ) ) df_ch = read_csv( StringIO( blob_service.get_blob_to_text( container_name=az_storage_container_name, blob_name=blobs_dict["choro_map"], ).content ) ) df_es = read_csv( StringIO( blob_service.get_blob_to_text( container_name=az_storage_container_name, blob_name=blobs_dict["exp_summry"], ).content ) ) else: df_h = read_csv(heat_data_dir) df_ch = read_csv(choro_data_dir) df_es = read_csv( max(glob(str(Path(data_dir) / "processed" / es_fname_str))) ) data = dh.load_add_district_or_side_to_geojson( district_geojson_file_path=da[da_choice]["geojson"], key="dist_num", division_type=da_choice, district_to_side=district_to_side, ) gdf_out = load_prep_geodata(da[da_choice]["file"], da_choice=da_choice) df_choro_data = dh.filter_geodata_and_merge( gdf_out=gdf_out, df_ch=df_ch, da_choice=da_choice, district_to_side=district_to_side, ) # Extract model and dummy classifier scores best_naive_model = ( df_es[(df_es["model"].str.contains("Dummy"))] .set_index("model")["Test"] .idxmax() ) best_model = ( df_es[~(df_es["model"].str.contains("Dummy"))] .set_index("model")["Test"] .idxmax() ) best_naive_model_score = df_es[(df_es["model"] == best_naive_model)][ "Test" ].values[0] best_model_score = df_es[(df_es["model"] == best_model)]["Test"].values[0] df_h["month"] = to_datetime(df_h["month"], format="%m").dt.month_name() df_h["probability_of_max_class|mean"] *= 100 markdown_text = f""" ##### Types of Crime Committed Regarding assignment of crime categories: Crime types taken from topic modeling literature ([Da Kuang et. al. 2017] (https://link.springer.com/content/pdf/10.1186%2Fs40163-017-0074-0.pdf)). For curent dataset, crime type clusters colored in purple and red (see Fig. 7, pg 16/20) produced two classes each with significantly smaller number of crime records than remaining two classes. Purple and red clusters are closer to eachother than to other clusters and so, as a simplification in order to improve balance of classes, purple and red clusters were combined for modeling purposes. ##### Predictive Modeling Summary AI/ML model was trained on {training_size:,} crime records across years {', '.join([str(y) for y in years_wanted])} and months {', '.join([str(calendar.month_name[y]) for y in months_wanted])}. Best model ([{best_model}]({model_descr[best_model]})) accuracy was {best_model_score:.2f}, compared to baseline accuracy of {best_naive_model_score:.2f} by generating predictions such that {naive_strategy_descr[best_naive_model.split('__')[1]]}. """ app = Dash(__name__, external_stylesheets=external_stylesheets) # To improve update speed app.css.config.serve_locally = True app.scripts.config.serve_locally = True server = app.server app.layout = html.Div( children=[ # Heading dcf.gen_header( children="VISUALIZE CRIME PREDICTIONS IN CHICAGO", style={ "backgroundColor": "darkred", "color": "white", "textAlign": "center", }, ), # Hidden div inside the app that stores the intermediate value dcf.gen_hidden_div(id="intermediate-value", style=hidden_div_style), # Heading text to show number of selected records dcf.gen_h3_header_show_selected_records( id="total_recshown", style={"textAlign": "center"} ), # Dropdown menu to select type of crime html.Div( [ html.Div( [ dcf.gen_html_label( c="CATEGORY", style={ # "height": "45px", "width": "100%", "font-size": "180%", "font-weight": "bold", # "min-height": "45px", "textAlign": "left", # "vertical-align": "middle", # "display": "inline-block", }, ), dcf.gen_dropdown( id="primary_type", value=primary_types_all[0], multi=False, options=[ {"label": i, "value": i} for i in primary_types_all ], style={ "height": "45px", "width": "100%", "font-size": "115%", "min-height": "45px", "textAlign": "left", "vertical-align": "middle", # "display": "inline-block", }, ), dcf.gen_rule(type="horizontal"), dcf.gen_markdown_text(children=[markdown_text]), ], className=four_cols, ), html.Div( dcc.Loading( id="loading-1", children=[ html.Div( [dcc.Graph(id="choro")], className=four_cols ) ], type="default", fullscreen=False, ), className=four_cols, ), html.Div( dcc.Loading( id="loading-2", children=[ html.Div( [dcc.Graph(id="hmap")], className=four_cols ) ], type="default", fullscreen=False, ), className=four_cols, ), ], className="row", ), ], style={"height": "75vh"}, ) @app.callback( dependencies.Output("intermediate-value", "children"), [dependencies.Input("primary_type", "value")], ) def update_figure(primary_type: str): df_mapping_heat = df_h.loc[df_h["primary_type"] == primary_type] df_mapping_choro = df_choro_data.loc[ df_choro_data["primary_type"] == primary_type ] datasets = { "dfh": df_mapping_heat.to_json(orient="split", date_format="iso"), "dfch": df_mapping_choro.to_json(orient="split", date_format="iso"), } return dumps(datasets) # Update totals text @app.callback( dependencies.Output("total_recshown", "children"), [dependencies.Input("intermediate-value", "children")], ) def update_selection_summary(jsonified_cleaned_data): datasets = loads(jsonified_cleaned_data) df_selected = read_json(datasets["dfh"], orient="split") distinct_observations = df_selected["datetime|count"].sum() arrests = df_selected["arrest|sum"].sum() return ( f"Total Records Selected: {distinct_observations:,} " f"(Arrests: {arrests:,}) of {testing_size:,}" ) # Update heatmap @app.callback( dependencies.Output("hmap", "figure"), [dependencies.Input("intermediate-value", "children")], ) def update_heatmap(jsonified_cleaned_data): datasets = loads(jsonified_cleaned_data) df_hmap = read_json(datasets["dfh"], orient="split") fig = dh.plot_heatmap( df=df_hmap, x="month", y="day", z="datetime|count", xtitle="month", ytitle="day", xautorange=True, yautorange="reversed", c="YlOrRd", hover_data=dt_hmap, viz=True, margins={"r": 0, "t": 0, "l": 75, "b": 0, "pad": 0}, fig_size=(600, 535), ) return fig # Update choromap @app.callback( dependencies.Output("choro", "figure"), [dependencies.Input("intermediate-value", "children")], ) def update_choromap(jsonified_cleaned_data): datasets = loads(jsonified_cleaned_data) df_mapping_choro = read_json(datasets["dfch"], orient="split") primary_type = df_mapping_choro["primary_type"].unique().tolist()[0] fig = dh.plot_choro( df=df_mapping_choro, geodata=data, color_by_col=general_plot_specs["color_by_column"], colorscheme=general_plot_specs["colorscheme"], da_choice=da_choice, choro_tooltip_dict=dt_choro, projection_type=general_plot_specs["choromap_projectiontype"], margins={"r": 0, "t": 0, "l": 0, "b": 0, "pad": 0}, figsize=( general_plot_specs["choro_map_figsize"]["width"], general_plot_specs["choro_map_figsize"]["height"], ), file_path=Path(figs_dir) / f"choromap_{primary_type}_dash.html", ) return fig if __name__ == "__main__": app.run_server(host="0.0.0.0", debug=False, port=port)
import socket from sqlite3_class import SQLite3_Class from tkinter import * import datetime import os "homework, output numbers to database (with timestamp)" class Client: def __init__(self, ip="127.0.0.1", port=5050, window_title="Server-Client Communication"): self.ip = ip self.port = port self.root = Tk() if window_title is not None: self.root.title(window_title) os.remove("numbers_database.db") self.database = SQLite3_Class("numbers_database.db") self.database.custom_execute_command("CREATE TABLE IF NOT EXISTS numbers_table (numbers TEXT, date TEXT)", output=False) def return_time_formatted(self): return datetime.datetime.now().strftime("%Y-%m-%d %I:%M:%S") def get_random_numbers(self): self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.connect((self.ip, self.port)) self.s.send("GET /random_numbers HTTP/1.1".encode()) http_code, random_numbers = self.s.recv(1024).decode().split(" | ") random_numbers = list(map(int, random_numbers.split())) self.random_numbers = random_numbers self.s.close() return self.random_numbers def sort(self, list): copyList = list.copy() if len(list) != 1: middle = len(copyList)//2 left = copyList[:middle] right = copyList[middle:] mergeSortLeft = self.sort(left) mergeSortRight = self.sort(right) current, currentLeft, currentRight = 0, 0, 0 while currentLeft < len(mergeSortLeft) and currentRight < len(mergeSortRight): if mergeSortLeft[currentLeft] < mergeSortRight[currentRight]: copyList[current] = mergeSortLeft[currentLeft] currentLeft+=1 else: copyList[current] = mergeSortRight[currentRight] currentRight+=1 current+=1 while currentLeft < len(mergeSortLeft): copyList[current] = mergeSortLeft[currentLeft] currentLeft+=1 current+=1 while currentRight < len(mergeSortRight): copyList[current] = mergeSortRight[currentRight] currentRight+=1 current+=1 return copyList def UI_setup(self): self.display_random_numbers() new_numbers = Button(self.root, text="New Numbers", command=self.display_random_numbers) sort_button = Button(self.root, text="Sort Numbers", command=self.display_sorted_numbers) new_numbers.grid(row=1, column=3) sort_button.grid(row=1, column=6) def display_random_numbers(self): self.get_random_numbers() self.database.insert("numbers_table", (' '.join(str(element) for element in self.random_numbers), self.return_time_formatted())) for index, random_number in enumerate(self.random_numbers): label = Label(self.root, text=random_number, font=["Arial", 12]) label.grid(row=0, column=index, padx=20) def display_sorted_numbers(self): sorted_numbers = self.sort(self.random_numbers) print("Updating, Previous Contents: ") print(self.database.select_specific(table_name = "numbers_table", query=f"numbers='{' '.join(str(element) for element in self.random_numbers)}'")) self.database.update_specific("numbers_table", {"numbers": ' '.join(str(element) for element in sorted_numbers), "date": self.return_time_formatted()}, f"numbers='{' '.join(str(element) for element in self.random_numbers)}'") self.random_numbers = sorted_numbers for index, random_number in enumerate(self.random_numbers): label = Label(self.root, text=random_number, font=["Arial", 12]) label.grid(row=0, column=index, padx=20) def UI_listen(self): self.root.mainloop() def close(self): self.database.commit() self.database.close() client1 = Client(ip=input("Enter IP: ")) client1.UI_setup() client1.UI_listen() client1.close()
#################################################################################### # Copyright (c) 2022 TasteIt # # Author: Paolo Pertino # # # # Permission is hereby granted, free of charge, to any person obtaining a copy # # of this software and associated documentation files (the "Software"), to deal # # in the Software without restriction, including without limitation the rights # # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # # copies of the Software, and to permit persons to whom the Software is # # furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # # THE SOFTWARE. # #################################################################################### from telegram import Update from telegram.ext import CallbackContext from data import fetchLang, insertChat def verifyChatData(update: Update, context: CallbackContext) -> None: """Verify that all needed chat_data are correctly set, otherwise it fetches them from the database.\\ Currently the followings datas need to be stored in chat_data to make the bot work properly: * `lang` - language code for the current chat Args: update (Update): _description_ context (CallbackContext): _description_ """ if context.chat_data.get("lang") == None: chatLanguage = fetchLang(update.effective_chat.id) if chatLanguage: context.chat_data.update({"lang": chatLanguage[0]}) else: insertChat( chatId=update.effective_chat.id, language=update.effective_user.language_code, ) context.chat_data.update({"lang": update.effective_user.language_code}) # If other data needs to be refreshed or checked it will be set below.
from .version import VERSION __author__ = 'Tomoki Kozuru' __version__ = VERSION from .stream import BaseStreamEvent, StreamClient from .auth import BasicAuth, OAuth1 from .utils import Utils
#!/usr/bin/env python # This script sets the attenuation values based on a csv file import numpy as np import katconf import time import StringIO from katcorelib import ( user_logger, standard_script_options, verify_and_connect, colors) def color_code_eq(value, test, errorv=0.01): """ This function returns the color code string bassed on if the values are within a range Example: $ color_code_eq(1., 2.,errorv=0.01) returns yellow color code $ color_code_eq(1., 1.0005,errorv=0.01) returns green color code value, test,errorv are floating point numbers value and test are the 2 values tested and errorv is the equality range. """ code_color = colors.Green if value >= test + errorv or value <= test - errorv: code_color = colors.Yellow return code_color def measure_atten(ant, pol, atten_ref=None, band='l'): """ This function returns the attenuation of an antenna and colors the logging if this number differs from the reference value Example: $ measure_atten('m064', 'h',atten_ref=5) returns 4 with log message: <<date time>> band l: m064 h Attenuation : <yellow> 4 <default color> " ant is an katcp antenna object pol is a string value and test are the antenna name and the polorisation and atten_ref is the expected values. """ sensor = "dig_%s_band_rfcu_%spol_attenuation" % (band, pol) atten = ant.sensor[sensor].get_value() color_d = color_code_eq(atten, atten_ref) string = "'%s' band: %s %s Attenuation : %s %-2i %s " % ( band, ant.name, pol, color_d, atten, colors.Normal) user_logger.info(string) return atten def get_ant_band(ant): """ This function returns the selected band of an antenna Example: $ get_ant_band('m064') returns 'x' ant is an katcp antenna object """ sensor = "dig_selected_band" band = ant.sensor[sensor].get_value() return band # Set up standard script options usage = "%prog " # <atten_ref.csv> description = 'Sets the attenuation according to a attenuation reference file ' parser = standard_script_options(usage=usage, description=description) # Set default value for any option (both standard and experiment-specific options) parser.set_defaults(description='Set Attenuate', nd_params='off') # Parse the command line opts, args = parser.parse_args() with verify_and_connect(opts) as kat: atten_ref = {} for band in ['l', 'u']: # ,'s','x' # Read in the bands if not len(args) == 0: raise RuntimeError( 'This script no longer takes in an attenuation file. Please raise an issue if you need this ') user_logger.info("This script used values found in katconf/katconfig") user_logger.info("Reading file katconf:'katconfig/user/attenuation/mkat/dig_attenuation_%s.csv'" % (band)) file_string = katconf.resource_string( 'katconfig/user/attenuation/mkat/dig_attenuation_%s.csv' % (band)) tmp_data = np.loadtxt(StringIO.StringIO(file_string), dtype=np.str, delimiter=',') for ant, value_h, value_v in tmp_data: try: atten_ref['%s_%s_%s' % (band, ant, 'h')] = np.int(value_h) atten_ref['%s_%s_%s' % (band, ant, 'v')] = np.int(value_v) except ValueError: user_logger.warning( "'%s' band %s: attenuation value '%s','%s' is not an integer " % (band, ant, value_h, value_v)) if not kat.dry_run: for pol in {'h', 'v'}: for ant in kat.ants: # note ant is an katcp antenna object band = get_ant_band(ant) key_lookup = '%s_%s_%s' % (band, ant.name, pol) if key_lookup not in atten_ref: user_logger.error("'%s' band %s %s: Has no attenuation value in the file " % ( band, ant.name, pol)) continue atten = measure_atten( ant, pol, atten_ref=atten_ref[key_lookup], band=band) if atten != atten_ref[key_lookup]: user_logger.info("'%s' band %s %s: Changing attenuation from %idB to %idB " % ( band, ant.name, pol, atten, atten_ref[key_lookup])) ant.req.dig_attenuation( pol, atten_ref[key_lookup]) user_logger.info("Sleeping for 30 seconds ") time.sleep(30) # The sleep is because there is a potential +/-30sec loop in the # state machine in the digitiser and sending a second command # would clobber the values.
"""Show Display information about the keys and users that have access to your account. """ import os import re import sys import socket import textwrap import promus.core.ssh as ssh import promus.core.git as git RE_LINE = re.compile('(?P<stuff>.*?)ssh-(?P<type>.*?) ' '(?P<key>.*?) (?P<desc>.*)') DESC = """ use this command to quickly view your ssh keys or the users that are allowed to connect to your account. """ def add_parser(subp, raw): "Add a parser to the main subparser. " tmpp = subp.add_parser('show', help='display account status', formatter_class=raw, description=textwrap.dedent(DESC)) tmpp.add_argument('type', type=str, choices=['users', 'keys'], help='information type') def show_keys(): """Display your public key and your git key. """ display = sys.stdout.write host = socket.gethostname() master = os.environ['USER'] alias = git.config('host.alias') id_key, git_key = ssh.get_keys() id_key = ssh.get_public_key(id_key) display('# ID_RSA:\n') display('%s %s@%s - %s\n' % (id_key, master, host, alias)) git_key = ssh.get_public_key(git_key) display('# GIT_KEY:\n') display('%s %s@%s - %s - git\n' % (git_key, master, host, alias)) def show_users(): """Display all the users that have access to your account. """ users, pending, unknown = ssh.read_authorized_keys() disp = sys.stdout.write disp('\n') emails = sorted(users.keys()) for user in emails: disp(' %s:\n' % user) for key, content in users[user].items(): disp(' ...%s: %s, %s, %s\n' % (key[-6:], content[1], content[0], content[2])) disp('\n') keys = pending.keys() if keys: disp('There are [%d] pending requests:\n\n' % len(keys)) data = [(key[-6:], pending[key][0]) for key in keys] data = sorted(data, key=lambda x: x[1]) for item in data: disp(' ...%s: %s\n' % item) disp('\n') if unknown: msg = 'There are [%d] unknown entries in ~/.ssh/authorized_keys:\n\n' disp(msg % len(unknown)) for item in unknown: match = RE_LINE.match(item) if match: disp(' ...%s: %s\n' % (match.group('key')[-6:], match.group('desc'))) else: disp(' NO MATCH ON: ...%s\n' % item[-10:]) disp('\n') def run(arg): """Run command. """ func = { 'keys': show_keys, 'users': show_users, } func[arg.type]()
import json import os with open("data/datapackage.json") as f: content = f.read() json_data = json.loads(content) print("Updating metadata...") # Set correct metadata json_data["name"] = "covid-19" json_data["title"] = "Novel Coronavirus 2019" json_data["views"] = [ { "title": "Total world to date", "resources": ["worldwide-aggregate"], "specType": "simple", "spec": { "group": "Date", "series": ["Confirmed", "Deaths"], "type": "line", }, }, { "title": "Number of confirmed cases in key countries", "resources": ["key-countries-pivoted"], "specType": "simple", "spec": { "group": "Date", "series": [ "China", "US", "United_Kingdom", "Italy", "France", "Germany", "Spain", "Iran", ], "type": "line", }, }, { "title": "Mortality rate in percentage", "resources": [ { "name": "worldwide-aggregate", "transform": [ { "type": "formula", "expressions": [ "data['Deaths'] / data['Confirmed'] * 100 + '%'" ], "asFields": ["Mortality rate"], } ], } ], "specType": "simple", "spec": { "group": "Date", "series": ["Mortality rate"], "type": "bar", }, }, { "title": "Increase rate from previous day in confirmed cases worldwide", "resources": ["worldwide-aggregate"], "specType": "simple", "spec": {"group": "Date", "series": ["Increase rate"], "type": "bar",}, }, ] # Set the correct format for dates for resource in json_data["resources"]: for field in resource["schema"]["fields"]: if field.get("name") == "Date": field["format"] = "%Y-%m-%d" with open("datapackage.json", "w") as f: json.dump(json_data, f, sort_keys=True, indent=2) os.unlink("data/datapackage.json")
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from rlgraph import get_backend from rlgraph.components.component import Component from rlgraph.utils.decorators import rlgraph_api if get_backend() == "tf": import tensorflow as tf elif get_backend() == "pytorch": import torch class Clipping(Component): """ Clipping utility (e.g. to clip rewards). API: clip(values) -> returns clipped values. """ def __init__(self, clip_value=0.0, scope="clipping", **kwargs): super(Clipping, self).__init__(scope=scope, **kwargs) self.clip_value = clip_value @rlgraph_api(must_be_complete=False) def _graph_fn_clip_if_needed(self, values): """ Clips values if cli pvalue specified, otherwise passes through. Args: values (SingleDataOp): Values to clip. Returns: SingleDataOp: Clipped values. """ if self.clip_value == 0.0: return values elif get_backend() == "tf": return tf.clip_by_value(t=values, clip_value_min=-self.clip_value, clip_value_max=self.clip_value) elif get_backend() == "pytorch": torch.clamp(values, min=-self.clip_value, max=-self.clip_value)
#!/usr/bin/env python3 from itertools import product n = sum(1 for a, b, c in product(range(0, 10), repeat=3) if a + b + c == 10) print("réponse:", n)
''' (0-1 Knapsack) example The example solves the 0/1 Knapsack Problem: how we get the maximum value, given our knapsack just can hold a maximum weight of w, while the value of the i-th item is a1[i], and the weight of the i-th item is a2[i]? i = total item w = total weigh of knapsack can carry ''' # a1: item value a1 = [100, 70, 50, 10] # a2: item weight a2 = [10, 4, 6, 12] def knapsack01(items, weight): if (w == 0) or (i < 0): return 0 elif (a2[i] > w): return knapsack01(i-1, w) else: return max(a1[i] + knapsack01(i-1, w-a2[i], knapsack01(i-1, w))) if __name__ == "__main__": i = 3 w = 12 print (knapsack01(items=i, weight=w))
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('threshold_value', '0004_auto_20141030_1248'), ] operations = [ migrations.AlterField( model_name='thresholdvalue', name='time', field=models.DateTimeField(default=datetime.datetime(2014, 11, 14, 11, 4, 11, 111000, tzinfo=utc)), preserve_default=True, ), ]
from selenium import webdriver import time class Scraper(object): def __init__(self): self.path = r'/Users/brunopaes/Documents/OneDrive/Acadêmico/ESPM/misc/05.4 - Python_Playground/drivers/chromedriver' self.driver = webdriver.Chrome(self.path) self.url = 'https://www.youtube.com/watch?v=6YPd1Foae_Q' self.time = 254 def search(self): while True: self.driver.get(self.url) time.sleep(self.time) def main(self): self.search() if __name__ == '__main__': obj = Scraper() obj.main()
import logging import pathlib import sys class AssetLoader: """ Load model for each element when render is needed. """ loader = None asset_path = None @staticmethod def init_loader(pg_world): """ Due to the feature of Panda3d, keep reference of loader in static variable """ root_path = pathlib.PurePosixPath(__file__).parent.parent if sys.platform != "win32" else pathlib.Path( __file__ ).resolve().parent.parent AssetLoader.asset_path = root_path.joinpath("assets") if pg_world.win is None: logging.debug("Physics world mode") return logging.debug("Onscreen/Offscreen mode, Render/Load Elements") AssetLoader.loader = pg_world.loader @classmethod def get_loader(cls): assert AssetLoader.loader, "Initialize AssetLoader before getting it" return cls.loader @staticmethod def windows_style2unix_style(win_path): path = win_path.as_posix() panda_path = "/" + path[0].lower() + path[2:] return panda_path @staticmethod def file_path(*path_string): path = AssetLoader.asset_path.joinpath(*path_string) return AssetLoader.windows_style2unix_style(path) if sys.platform.startswith("win") else str(path) @classmethod def initialized(cls): return cls.asset_path is not None def initialize_asset_loader(pg_world): if AssetLoader.initialized(): logging.warning( "AssetLoader is initialize to root path: {}! But you are initializing again!".format( AssetLoader.asset_path ) ) return AssetLoader.init_loader(pg_world)
#!/usr/bin/env python # # Author: Thamme Gowda [tg (at) isi (dot) edu] # Created: 5/29/20 import logging as log import collections as coll from pathlib import Path from rtg import TranslationExperiment as Experiment import numpy as np from scipy import stats from functools import partial log.basicConfig(level=log.INFO) def get_training_frequencies(freqs_file, n_classes, has_header =True): log.info(f"Reading tgt side freqs from {freqs_file}") freqs = np.zeros(n_classes, dtype=np.int) with freqs_file.open() as rdr: term_freqs = [line.rstrip('\n') for line in rdr] if has_header: # format used by rtg.eval.datastat assert term_freqs[0].startswith("#") assert term_freqs[1].startswith("#") assert not term_freqs[2] term_freqs = term_freqs[3:] for line in term_freqs: term_idx, term, freq = line.split('\t') freqs[int(term_idx)] = int(freq) return freqs def evaluate(sys_lines, ref_lines): assert len(sys_lines) == len(ref_lines) match_count = coll.defaultdict(int) sys_count = coll.defaultdict(int) ref_count = coll.defaultdict(int) for sys, ref in zip(sys_lines, ref_lines): sys = coll.Counter(sys) ref = coll.Counter(ref) for key in sys.keys() | ref.keys(): sys_count[key] += sys.get(key, 0) ref_count[key] += ref.get(key, 0) match_count[key] += min(sys.get(key, 0), ref.get(key, 0)) # all keys assert match_count.keys() == sys_count.keys() assert match_count.keys() == ref_count.keys() precision, recall = {}, {} for key, count in match_count.items(): assert 0 <= count <= sys_count[key] assert 0 <= count <= ref_count[key] precision[key] = (count / sys_count[key]) if sys_count[key] > 0 else np.nan recall[key] = (count / ref_count[key]) if ref_count[key] > 0 else np.nan return precision, recall def frequency_bias(freqs, scores): """freqs is list[idx]=freq scores is map[idx]=score; frequencies should be exhastive, scores can be only for subset of classes """ pairs = [(freqs[idx], sc) for idx, sc in scores.items()] sorted_pairs= list(sorted(pairs, reverse=True)) sorted_freqs = np.array([f for f, s in sorted_pairs]) sorted_scores = np.array([s for f, s in sorted_pairs]) return stats.pearsonr(sorted_freqs, sorted_scores) #coeff = np.corrcoef(sorted_freqs, sorted_scores) #return np.round(coeff, 3) def main(args=None): args = args or parse_args() exp = Experiment(args.exp, read_only=True) n_classes = len(exp.tgt_vocab) freqs = get_training_frequencies(args.freq, n_classes=n_classes) tokr = partial(exp.tgt_vocab.encode_as_ids, add_bos=False, add_eos=False) sys = [tokr(line.strip()) for line in args.sys] ref = [tokr(line.strip()) for line in args.ref] assert len(sys) == len(ref) precision, recall = evaluate(sys, ref) print('Precsion bias: ', frequency_bias(freqs, precision)) print('Recall bias: ', frequency_bias(freqs, recall)) def parse_args(): import argparse import sys p = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) p.add_argument('exp', type=Path, help='Path to experiment') p.add_argument('-s', '--sys', type=argparse.FileType('r'), default=sys.stdin, help='System outputs; Multiple systems are allowed.') p.add_argument('-r', '--ref', type=argparse.FileType('r'), required=True, help='Reference; Multiple files are allowed - one per system with the matching order') p.add_argument('-f', '--freq', type=Path, required=True, help='File that has training frequencies on tgt side. ' 'Get this from "python -m rtg.eval.datastat <exp> tgt -o <freqs.tsv>"') return p.parse_args() if __name__ == '__main__': main()
from .base import * def build_renderer() -> BaseRenderer: return BaseRenderer()
#!/usr/bin/env python ### FUNCTIONS - START ### def printHelp(scriptname): print('\nUsage: ' + scriptname + ' [options]\n') print('\twhere options are:\n'); print('\t\t-f --conf-file: parse the specified configuration file.\n') print('\t\t-h --help: print this help message.\n') print('\nAuthor: Daniele Linaro -- daniele.linaro@unige.it\n') #### FUNCTIONS - END #### from sys import argv from os.path import isfile if len(argv) == 2 and (argv[1] == '-h' or argv[1] == '--help'): printHelp(argv[0]) exit(1) conffile = 'pll.cfg' if len(argv) > 2: if argv[1] == '-f' or argv[1] == '--config-file': conffile = argv[2] else: printHelp(argv[0]) exit(1) from ConfigParser import ConfigParser if not isfile(conffile): printHelp(argv[0]) print(conffile + ' is not a valid configuration file. Aborting...') exit(1) fid = open(conffile,'r') config = ConfigParser() config.readfp(fid) fid.close() from pybal import bal from pybal import util # the dynamical system pll = bal.DynamicalSystem() pll.create('PLL') # the parameters par = bal.Parameters(pll.npar) parnames = ['fref','r1','fvco','vdd','rho0','rhoap','k0','krho','kap','alpha','kvcoa','kvcob','kvcoc','tuning'] for k,p in enumerate(parnames): steps = config.getint(p,'steps') if steps == 1: pmin = config.getfloat(p,'value') pmax = pmin elif steps > 1: pmin = config.getfloat(p,'min') pmax = config.getfloat(p,'max') else: print('The number of steps for ' + p + ' must be equal to or greater than 1.') exit(1) par.bifpar(k,[pmin,pmax,steps]) if p == 'vdd': vdd = pmin # the solver solver = bal.ODESolver(pll,par) solver.x0 = [0,vdd,0,0] solver.intersections = 1e7 solver.dt = 5e-11 solver.ttran = config.getfloat('Simulation','ttran') solver.tstop = config.getfloat('Simulation','tout') if config.getint('Simulation','trajectory'): solver.mode = 'trajectory + events' else: solver.mode = 'events' print('Starting simulation...') solver.run() print('Simulation finished...') s = solver.solution() util.saveH5file([s],'pll.h5') from pylab import figure, plot, xlabel, ylabel, title, show, axis figure() plot(s.data['t'],s.data['x'][3::4],'k') xlabel('t (s)') ylabel('w (V)') axis('tight') show()
from click.testing import CliRunner from envadmin.cli import cli from tests.utilities import constants from tests.utilities.fixtures import runner, temp_folder, temp_git_folder, temp_envadmin_folder # noqa: F401 def test_e2e_namespace_create(runner, temp_git_folder, temp_envadmin_folder): # noqa: F811 runner = CliRunner() result = runner.invoke(cli, ["namespace", "-c", temp_git_folder, "create", "-n", constants.NAMESPACE2]) assert result.exit_code == 0 result = runner.invoke(cli, ["namespace", "-c", temp_git_folder, "list"]) assert result.exit_code == 0 assert constants.NAMESPACE2 in result.output def test_e2e_namespace_delete_success(runner, temp_git_folder, temp_envadmin_folder): # noqa: F811 runner = CliRunner() result = runner.invoke(cli, ["namespace", "-c", temp_git_folder, "delete", "-n", constants.NAMESPACE2], input="y") assert result.exit_code == 0 result = runner.invoke(cli, ["namespace", "-c", temp_git_folder, "list"]) assert result.exit_code == 0 assert constants.NAMESPACE2 not in result.output def test_e2e_namespace_delete_failure(runner, temp_git_folder, temp_envadmin_folder): # noqa: F811 runner = CliRunner() result = runner.invoke(cli, ["namespace", "-c", temp_git_folder, "delete", "-n", constants.NAMESPACE1], input="n") assert result.exit_code == 0 result = runner.invoke(cli, ["namespace", "-c", temp_git_folder, "list"]) assert result.exit_code == 0 assert constants.NAMESPACE1 in result.output
import warnings import torchvision try: import torch_extension _nms = torch_extension.nms except ImportError: if torchvision.__version__ >= '0.3.0': _nms = torchvision.ops.nms else: from .python_nms import python_nms _nms = python_nms warnings.warn('You are using python version NMS, which is very very slow. Try compile c++ NMS ' 'using `cd ext & python build.py build_ext develop`') def boxes_nms(boxes, scores, nms_thresh, max_count=-1): """ Performs non-maximum suppression, run on GPU or CPU according to boxes's device. Args: boxes(Tensor): `xyxy` mode boxes, use absolute coordinates(or relative coordinates), shape is (n, 4) scores(Tensor): scores, shape is (n, ) nms_thresh(float): thresh max_count (int): if > 0, then only the top max_proposals are kept after non-maximum suppression Returns: indices kept. """ keep = _nms(boxes, scores, nms_thresh) if max_count > 0: keep = keep[:max_count] return keep
from django.contrib import admin from .models import Reaction class ReactionAdmin(admin.ModelAdmin): list_display = [ 'song', 'comment', 'date', ] ordering = ['-date'] admin.site.register(Reaction, ReactionAdmin)
import sys class InvalidArgument(Exception): """Raised by anything under main() to propagate errors to user. """ def __init__(self, message): self.message = message Exception.__init__(self, message) class NoVirtualenvName(InvalidArgument): """No virtualenv name was given (insufficient arguments). """ pass class NoVirtualenvsDirectory(InvalidArgument): """There is no directory to find named virtualenvs in. """ pass class OtherShell(InvalidArgument): """The given argument to --shell-config is not recognized. """ pass class UnknownArguments(InvalidArgument): """Unknown arguments were given on the command line. This is a byproduct of having to use parse_known_args. """ pass class InvalidVexrc(InvalidArgument): """config file specified or required but absent or unparseable. """ pass class InvalidVirtualenv(InvalidArgument): """No usable virtualenv was found. """ pass class InvalidCommand(InvalidArgument): """No runnable command was found. """ pass class InvalidCwd(InvalidArgument): """cwd specified or required but unusable. """ pass class BadConfig(InvalidArgument): """raised to halt on fatal conditions on the way to run. """ pass class VirtualenvAlreadyMade(InvalidArgument): """could not make virtualenv as one already existed. """ pass class VirtualenvNotMade(InvalidArgument): """could not make virtualenv. """ pass class VirtualenvNotRemoved(InvalidArgument): """raised when virtualenv could not be removed. """ pass if sys.version_info > (3, 3): CommandNotFoundError = FileNotFoundError else: CommandNotFoundError = OSError
#!/usr/bin/env python3 a = 111 b = 20 print(a / b) # 除算 print(a % b) # 余り print(a ** b) # べき乗 print(a // b) # 切り捨て除算 # divmod, 商と余りをタプルで返す res, mod = divmod(a, b) print(res, mod) # ビット演算子 print(~a) # ビット反転 print(a & b) # AND:論理積(aもbも1のビットが1) print(a | b) # OR:論理和(aまたはbが1のビットが1) print(a ^ b) # XOR:排他的論理和(aまたはbが1のビットが1) print(a << b) # b ビット左シフト print(a >> b) # b ビット右シフト # 比較演算子 print(a == b) # a が b と等しい print(a != b) # a が b と異なる print(a < b) # a が b よりも小さい print(a > b) # a が b よりも大きい print(a <= b) # a が b 以下である print(a >= b) # a が b 以上である print(a is b) # a が b と等しい print(a is not b) # a が b と異なる print(a in b) # a が b に含まれる print(a not in b) # a が b に含まれない
# import hashlib # f = open('C:/Users/Darius/Desktop/Projects/helloworld/Udel/AppliedCrypto/test.txt', 'r+') # words = [word.strip() for word in f] # print(words) # f.close() import hashlib, cProfile f=open('C:/Users/Darius/Desktop/Projects/helloworld/Udel/AppliedCrypto/test.txt','r') words = [word.strip() for word in f] f.close() secretHash=hashlib.sha512("banana").hexdigest() def checkDictionary(secret): return [word for word in words if hashlib.sha512(word).hexdigest() == secret] cProfile.run('checkDictionary(secretHash)')
from typing import TypeVar, List, Optional, Generic from pydantic import BaseModel from pydantic.generics import GenericModel ResultT = TypeVar("ResultT") class ListResponse(GenericModel, Generic[ResultT]): resources: List[ResultT] next_page_token: Optional[str] class ListRequest(BaseModel): filter: Optional[str] max_page_size: Optional[int] = 20 page_token: Optional[str]
#!/usr/bin/env python # -*- coding: utf-8 -*- from .cos_client import CosClient from .cos_client import CosConfig from .cos_client import CredInfo from .cos_request import UploadFileRequest from .cos_request import UploadSliceFileRequest from .cos_request import UploadFileFromBufferRequest from .cos_request import UploadSliceFileFromBufferRequest from .cos_request import UpdateFileRequest from .cos_request import UpdateFolderRequest from .cos_request import DelFolderRequest from .cos_request import DelFileRequest from .cos_request import CreateFolderRequest from .cos_request import StatFileRequest from .cos_request import StatFolderRequest from .cos_request import ListFolderRequest from .cos_request import DownloadFileRequest from .cos_request import DownloadObjectRequest from .cos_request import MoveFileRequest from .cos_auth import Auth from .cos_cred import CredInfo import logging try: from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler())
#!/usr/bin/env python # -*- coding: utf-8 -*- import pandas as pd #for pandas see http://keisanbutsuriya.hateblo.jp/entry/201\ import argparse import numpy as np import math import subprocess import glob import os from matplotlib import pylab as plt from numpy.lib.stride_tricks import as_strided S=['fhs', 'fms', 'mkk', 'mko','mmt','mnh','mym'] L=[1,2,3,4,5,6,7,8,9,10] D=['zero','ichi','ni','san','si','go','roku','nana','hachi','kyu'] R=[1,0.8,0.6,0.4,0.2] def myshell(cmd): #no stop even when error occured try: retcode=subprocess.Popen(cmd, shell=True) if retcode < 0: print "my Child was terminated by signal", -retcode else: print "my Child returned", retcode except OSError as e: print "Execution failed:", cmd, e return retcode.wait() # http://optie.hatenablog.com/entry/2018/03/21/185647#32-ガウシアンフィルタ def norm2d(x,y,sigma): Z = np.exp(-(x**2 + y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2) return Z def gaussian_kernel(size): if size%2==0: # print('kernel size should be odd') # return size+=1 sigma = (size-1)/2 # [0,size]→[-sigma, sigma] にずらす x = y = np.arange(0,size) - sigma X,Y = np.meshgrid(x,y) mat = norm2d(X,Y,sigma/2.) # 総和が1になるように kernel = mat / np.sum(mat) return kernel def convolve2d_1ch(img, kernel, padding='0'): # エラー処理 if len(img.shape) != 2: print('img should be 2d-array') return if kernel.shape[0]%2 == 0: print('Kernel size shold be odd') return edge = int(kernel.shape[0]/2) # 境界をどうするか if padding=='edge': img = np.pad(img, [edge,edge], mode='edge') elif padding=='reflect': img = np.pad(img, [edge,edge], mode='reflect') else: img = np.pad(img, [edge,edge], mode='constant', constant_values=0) sub_shape = tuple(np.subtract(img.shape, kernel.shape) + 1) # (rows, cols, N, N) conv_shape = sub_shape + kernel.shape # A.strides は (Aのrow方向の一要素ごとのbytes数, col方向のbytes数) のタプル # (shape[0]*bytes, bytes, shape[0]*bytes, bytes) strides = img.strides + img.strides submatrices = as_strided(img, conv_shape, strides) # (i,j)配列と(k,l,i,j)配列で, (i,j)要素同士の積和を要素とした(k,l)配列を返す convolved_mat = np.einsum('ij, klij->kl', kernel, submatrices) return convolved_mat if __name__ == "__main__": #oob4speakerdigit+sX_2018 sp:fhs:fms tx:zero:ichi ntxi:9 k:36 mbas:$mbas dir:$dir1 dir2:$dir2 s:-1 N:${N} sX:${sX} #oob4speakerdigit+ sp:fhs:fms:mkk:mko:mmt:mnh:mym tx:zero:ichi:ni:san:yon:go:roku:nana:hachi:kyu ntxi:10 k:36 mbas:2:300:1.6:1 dir:$dira s:-1 N:40 parser = argparse.ArgumentParser(description='speech normalize') parser.add_argument('-S', default='fhs:fms:mkk:mko:mmt:mnh:mym', type=str, help='speaker') parser.add_argument('-D', default='zero:ichi:ni:san:si:go:roku:nana:hachi:kyu', type=str, help='text') parser.add_argument('-L', default='1:2:3:4:5:6:7:8:9:10', type=str, help='datetime index') parser.add_argument('-R', default='1:0.8:0.6:0.4:0.2:0.1', type=str, help='lambda') parser.add_argument('-dl', default='../../12voicedata_ueki_all', type=str, help='folder involving original') parser.add_argument('-dn', default='../../12voicedata_ueki_all_n', type=str, help='folder involving normalized') parser.add_argument('-dm', default='../../12voicedata_ueki_all_m', type=str, help='folder involving mixed') parser.add_argument('-dq', default='../../12voicedata_ueki_all_q', type=str, help='folder involving q') parser.add_argument('-dp', default='../../12voicedata_ueki_all_p', type=str, help='folder involving p') parser.add_argument('-df', default='../../12voicedata_ueki_all_f', type=str, help='folder involving f') parser.add_argument('-N', default=24, type=int, help='Number of units') parser.add_argument('-sql', default='0', type=str, help='1 for search q lack') # parser.add_argument('-lm', default='tspredv3', type=str, choices=('tspredv3', 'tspredv3er'), help='learning machine ') parser.add_argument('-DISP', default='10', type=str, help='DISP[0]==1 to make files, DISP[1]==1 to display') args = parser.parse_args() Sa=(args.S).split(':') #speakers Da=(args.D).split(':') #text La=(args.L).split(':') #index Ra=(args.R).split(':') #lambda dl=args.dl dn=args.dn dm=args.dm dq=args.dq dp=args.dp df=args.df N=args.N DISP=args.DISP fnerr=['mmt-san2', 'fhs-hachi5'] nS=len(S) nL=len(L) nD=len(D) np.random.seed(seed=32) for d in [dm, dq, dp, df]:# for d in [dl, dn, dm, dq, dp, df]: if not os.path.exists(d): os.mkdir(d) if args.sql != '0': #search lack for r in Ra: fnq='{}/*R{}*'.format(dq,r) Fq=[(f.split('/')[-1])[:-6] for f in glob.glob(fnq)] #file list Fqb=[] for f in Fq: fl=f.split('-') Fqb.append(fl[0]+'-'+fl[1]) # import pdb;pdb.set_trace(); #for debug for s in S: # print "#s=",s for d in D: for l in L: sm='{}-{}{}'.format(s,d,l) # if sm in fnerr or s == 'mmt': # import pdb;pdb.set_trace(); #for debug if not sm in Fqb: print('#no {} for R{}'.format(sm,r)) else:#args.sql == '0': # import pdb;pdb.set_trace(); #for debug # fnq='{}/*'.format(dm) # Fq=[r.split('/')[-1][:-6] for r in glob.glob(fnq)] #file list GK=gaussian_kernel(21) for r in Ra: fnq='{}/*R{}*'.format(dq,r) Fq=[(f.split('/')[-1])[:-6] for f in glob.glob(fnq)] #file list for s in Sa: # fnout='{}/M-s{}R{}.csv'.format(df,s,r) # fpout=open(fnout,'w') for d in Da: for l in La: fn=s +'-'+ d + l if not fn in fnerr: myshell('make data-clean') if r == '1': # sm='{}-{}{}'.format(s,d,l) #sm=s +'-'+ d + l + '-R'+ r sm='{}-{}{}-R{}'.format(s,d,l,r) #sm=s +'-'+ d + l + '-R'+ r cmd='cp {}/{}-{}{}.dat {}/{}.dat'.format(dn,s,d,l,dm,sm) # import pdb;pdb.set_trace(); #for debug # myshell('cp {}/{}-{}{}.dat {}/{}-{}{}-R{}.dat'.format(dn,s,d,l,r,dm,s,d,l,r)) # myshell('cp {}/{}-{}{}.dat {}/{}.dat'.format(dn,s,d,l,r,dm,sm)) # myshell('cp {}/{}-{}{}-R{}.dat {}/{}'.format(dn,s,d,l,r,dm,sm)) else: errflag=1 while errflag == 1: s2=S[np.random.randint(nS)] d2=D[np.random.randint(nD)] l2=L[np.random.randint(nL)] fn2='{}-{}{}'.format(s2,d2,l2) sm='{}-{}{}-{}-{}{}-R{}'.format(s,d,l,s2,d2,l2,r) if (not fn2 in fnerr) and (not sm in Fq): errflag=0 # import pdb;pdb.set_trace(); #for debug cmd='python speechmix.py -y0 {}/{}-{}{}.dat -y1 {}/{}-{}{}.dat -r {} > {}/{}.dat'.format(dn,s,d,l,dn,s2,d2,l2,r,dm,sm) print(cmd) cmd='cp tmp/M.dat {}/s{}R{}d{}l{}-M.dat'.format(df,s,R,d,l) myshell(cmd) # cmd='poledistribv2+ st:{} N:24 k:8 m:poles4 dir:{} rsa:2:0.7:1:20 DiffMode:0 tt:0:400 T:100 DISP:{}'.format(sm,dm,DISP) #poledistribv2+.c #(1)calls tspredv3 (creates tmp/*poles0.dat), and #(2)creates tmp/_sprecog-{}-poles0-N{}.eps k=8;N=24;t0=0;t1=400 cmd='poledistribv2- st:{} N:{} k:{} m:poles4 dir:{} rsa:2:0.7:1:20 DiffMode:0 tt:{}:{} T:100 DISP:{}'.format(sm,N,k,dm,t0,t1,DISP) # print(cmd); myshell(cmd) cmd='cp tmp/M.dat {}/M-s{}R{}.dat'.format(dq,s,R) myshell(cmd); #poledistribv2-.c #(1)cp tmp/M.dat tmp/fhs-zero1-R1-k8N24t0-3170-M.dat #M:read_nested_list("tmp/M.dat"); # fnM='tmp/M.dat' #fnM='tmp/'+sm+'-k{}N{}t{}-{}-M.dat'.format(k,N,t0,t1) # M=np.array(pd.read_csv(fnM,delim_whitespace=True,dtype=np.float32,header=None)) # N,k1=M.shape # fnmac='tmp/M.mac' # fpmac=open(fnmac,'a') # for n in range(N): # fpmac.write('M[{}]:[{}'.format(n+1,M[n,0])) # for i in range(1,k1): # fpmac.write(',{}'.format(M[n,i])) # fpmac.write('];\n') # fpmac.close() # myshell('cat /dev/null > tmp/poles-xyC.csv') # cmd='echo {}bach("M2xyC.mac");{}|maxima'.format("'",k,n,"'") cmd='maxima < M2xyC.mac' #read tmp/M.dat produce tmp/xyC.dat myshell(cmd); cmd='cp tmp/M.dat {}/M-s{}R{}.dat'.format(dq,s,R) myshell(cmd); cmd='cp tmp/xyC.dat {}/syC-s{}R{}.dat'.format(dq,s,R) for n1 in range(N): n=n1+1 cmd='echo {}k:{};n:{};batch("fext.mac");quit();{}|maxima'.format("'",k,n,"'") print(cmd); myshell(cmd) # import pdb;pdb.set_trace(); #for debug 20191105 #tmp/poles-xyC[n].dat fnxyC='tmp/poles-xyC.csv';# fnxyC='tmp/poles-xyC-s{}d{}l{}.csv'.format(n); P=np.array(pd.read_csv(fnxyC,delim_whitespace=True,dtype=np.float32,header=None)) nP=P.shape[0] #=k nW=100 nW0=nW/2 w=4.0 #[-2,2] w=6.0 #[-3,3] w0=w/2.0 wp=2. # width of pole F=np.zeros((nW,nW), dtype=np.float32) # F=np.zeros((nW,nW), dtype=np.uint8) for i in range(nP): X=int(nW0*(P[i,0]+w0)/w0) Y=int(nW0*(P[i,1]+w0)/w0) if X<0 or X>=nW or Y<0 or Y>=nW or P[i,1]==0: print('({},{}<={},{})'.format(X,Y,P[i,0],P[i,1])) pass else: # F[Y,X]=math.log1p(P[i,2]) F[Y,X]=P[i,2] FGK = np.zeros(F.shape) FGK = convolve2d_1ch(F, GK, padding='edge') FGK /=np.linalg.norm(FGK) for i in range(nW): for j in range(nW): fpout.write('{:g} '.format(FGK[i,j])) fpout.write('{}\n'.format(s)) # import pdb;pdb.set_trace(); #for debug 20191105 # print P # plt.imshow(FGK);plt.title("Feature");plt.colorbar();plt.show(); # plt.imshow(FGK);plt.title("Feature");plt.colorbar();plt.pause(0.05) plt.imshow(FGK);plt.title("Feature");plt.pause(0.05) # fpout.close() print('#Saved {}.'.format(fnout)) # import pdb;pdb.set_trace(); #for debug 20191105 # cmd='cp tmp/*-poles0.dat {}/'.format(dp) # print(cmd); # myshell(cmd) # cmd='mkpoleptv2+ st:{} k:8 Np:24 dir:tmp dir2:{} dout:{} rsa:2:20 tt:0:400 nt:18 nr:2 r_max:2 DISP:{}'.format(sm,dm,dq,DISP) # print(cmd); # myshell(cmd) # #mkpoleptv2+.c # #(1)reads <dir2>/<st> #ex: <dm>/<fms-zero9-R1> ?? # #(2)calls mkpoleptv1+, which # # reads <dir>/*-poles0.dat and # # creates q (*-p.dat) in <dq>, # # creates tmp/q2.plt, q2.obj, q2.eps, q2.dat co # #myshell('sleep 3') # if DISP[0]=='1': # cmd='mv tmp/q2.obj {}/{}-q2.obj'.format(df,sm) # myshell(cmd) # cmd='mv tmp/q2.eps {}/{}-q2.eps'.format(df,sm) # myshell(cmd) # cmd='mv tmp/q2.dat {}/{}-q2.dat'.format(df,sm) # myshell(cmd) # cmd='mv tmp/_sprecog-{}-poles0-N{}.obj {}/sprecog-{}-poles0-N{}.obj'.format(sm,N,df,sm,N) # myshell(cmd) # cmd='mv tmp/_sprecog-{}-poles0-N{}.eps {}/sprecog-{}-poles0-N{}.eps'.format(sm,N,df,sm,N) # myshell(cmd) # #$dp/fms-zero9-mmt-nana10-R0.4-k8N24t0-3277-poles0.dat #Poles(complex number) #$df/fms-zero9-mmt-ichi4-R0.4-q2.obj #bar graph #$df/sprecog-fms-zero9-mmt-ichi4-R0.4-poles0-N24.obj #polar graph
import telebot import time bot_token = '1038832449:AAHG2kCCqUecwUW_1vn36FfC6kQg1lbvI7I' bot = telebot.TeleBot(token=bot_token) def find_at(msg): for text in msg: if '@' in text: return text @bot.message_handler(commands=['start']) def send_welcome(message): bot.reply_to(message, 'Welcome, My name is EduBot created to help you learn \n if you need help on how to command me, please select "/help" To start now, kindly Type your matric number starting with @ for Example @mat1121122.') @bot.message_handler(commands=['help']) def send_welcome(message): bot.reply_to(message, 'Hello, this is an interactive Bot created with the aim of helping you learn anytime with quiz. To start, simply select "/start". You can also check available course by selecting "/course"') @bot.message_handler(func=lambda msg: msg.text is not None and '@' in msg.text) def at_answer(message): texts = message.text.split() at_text = find_at(texts) bot.reply_to(message,'So nice to meet you. Please select /course to start learning right now'.format(at_text[1:]),) @bot.message_handler(commands=['course']) def send_welcome(message): bot.reply_to(message, 'These are the available courses you can learn on this Bot\n /comp1 Introduction to computer \n /comp2 Cyber Security \n so which would you have me load for you righ now? \n remember you can always select /course to select another.') @bot.message_handler(commands=['comp1']) def send_welcome(message): bot.reply_to(message, 'Welcome to Introduction to Computer \n Course Outline \n 1.What is a computer \n 2.Your Personal Computer Hardware \n 3.The Processor and Memory \n 4.Internet and Networking \n /startcomp1') @bot.message_handler(commands=['startcomp1']) def send_welcome(message): bot.reply_to(message, 'What is a computer \n A computer is a machine that can be programmed to accept data (input), process it into useful information (output), and store it away (in a secondary storage device) for safekeeping or later reuse. ... Input devices accept data in a form that the computer can use; they then send the data to the processing unit. \n /startcomp2') @bot.message_handler(commands=['startcomp2']) def send_welcome(message): bot.reply_to(message, ' Your Personal Computer Hardware \n The input device, in this case, is a keyboard, which you use to type in the original essay and any changes you want to make to it. All computers, large and small, must have a central processing unit within the personal computer housing. The central processing unit under the direction of the word processing software accepts the data you input through the keyboard. Processed data from your personal computer is usually output in two forms: on a screen and eventually by a printer. As you key in the essay on the keyboard, it appears on the screen in front of you. After you examine the essay on the screen, make changes, and determine that it is acceptable, you can print the essay on the printer. Your secondary storage device in this case is a diskette, a magnetic medium that stores the essay until it is needed again. \n /startcomp3') @bot.message_handler(commands=['startcomp3']) def send_welcome(message): bot.reply_to(message, ' Your Personal Computer Hardware \n The input device, in this case, is a keyboard, which you use to type in the original essay and any changes you want to make to it. All computers, large and small, must have a central processing unit within the personal computer housing. The central processing unit under the direction of the word processing software accepts the data you input through the keyboard. Processed data from your personal computer is usually output in two forms: on a screen and eventually by a printer. As you key in the essay on the keyboard, it appears on the screen in front of you. After you examine the essay on the screen, make changes, and determine that it is acceptable, you can print the essay on the printer. Your secondary storage device in this case is a diskette, a magnetic medium that stores the essay until it is needed again. \n /startcomp4') @bot.message_handler(commands=['startcomp4']) def send_welcome(message): bot.reply_to(message, ' The Processor and Memory \n In a computer the processor is the center of activity. The processor, as we noted, is also called the central processing unit (CPU). The central processing unit consists of electronic circuits that interpret and execute program instructions, as well as communicate with the input, output, and storage devices. \n /compquiz') @bot.message_handler(commands=['compquiz']) def send_welcome(message): bot.reply_to(message, 'Hello, now that you are done with the four outlines for this course, its time to test if you really understand. \n To start the quiz select /compquiz \n To take the course again select /comp1 ') while True: try: bot.polling() except Exception: time.sleep(15)
from redisgears import executeCommand as execute SIMPLE_HASH_BACKEND_PK = 'SimpleHashBackendPK' SIMPLE_HASH_BACKEND_TABLE = 'SimpleHashBackendTable' class SimpleHashConnector(): def __init__(self, newPefix): self.newPefix = newPefix def TableName(self): return SIMPLE_HASH_BACKEND_TABLE def PrimaryKey(self): return SIMPLE_HASH_BACKEND_PK def WriteData(self, data): for e in data: pk = e.pop(SIMPLE_HASH_BACKEND_PK) streamId = e.pop('streamId') newKey = '%s:%s' % (self.newPefix, pk) d = [[k, v] for k,v in e.items() if not k.startswith('_')] res = execute('hset', newKey, *sum(d, [])) if 'ERR' in str(res): raise Exception(res)
''' Feature Extraction and Image Processing Mark S. Nixon & Alberto S. Aguado http://www.southampton.ac.uk/~msn/book/ Chapter 10 Reprojection: Compute a projection from seven corresponding image and 3D points and re-project the image to create a new view of the scene ''' # Set module functions from ImageUtilities import imageReadRGB, imageReadL, showImageRGB, createImageRGB from GeometricUtilities import projectionCubePoints, computeProjection, getPointColours, fillImageColours # Math from math import sin, cos ''' Parameters: pathToDir = Input image directory imageName = Input image name maskName = Mask image name ''' pathToDir = "../../Images/Chapter10/Input/" imageName = "cube1.png" maskName = "mask1.png" # Read image data inputImage, width, height = imageReadRGB(pathToDir + imageName) maskImage, width, height = imageReadL(pathToDir + maskName) showImageRGB(inputImage) centreX, centreY = width/2, height/2 # Corresponding points in the cube image and 3D world pts = [[131-centreX,378-centreY],[110-centreX,188-centreY], [200-centreX,70-centreY],[412-centreX,100-centreY], [410-centreX,285-centreY],[349-centreX,418-centreY], [345-centreX,220-centreY]] q = [[0,0,1],[0,1,1], [0,1,0],[1,1,0], [1,0,0],[1,0,1], [1,1,1]] # Obtain the projection p = computeProjection(pts,q) # Get the image position of the 3D cube points npts = 100 xy = projectionCubePoints(npts, p, centreX, centreY) # Get the colour of the points colours = getPointColours(xy, maskImage, inputImage) # Transform the q points and store in qt qT = [ ] angY = .3 angX = -.2 for pointNum in range(0,len(q)): s = [q[pointNum][0]-.5, q[pointNum][1]-.5, q[pointNum][2]-.5] rx = .5 + cos(angY)*s[0] + sin(angY)*s[2] ry = .5 + sin(angX)*sin(angY)*s[0] + cos(angX)*s[1] - sin(angX)*cos(angY)*s[2] rz = .5 - cos(angX)*sin(angY)*s[0] + sin(angX)*s[1] + cos(angX)*cos(angY)*s[2] qT.append([rx,ry,rz]) # Get the projection of the transformed points p = computeProjection(pts,qT) # The position of the cube points according to the projection of the transformed data xy = projectionCubePoints(npts, p, centreX, centreY) # Use the colours of the original image and the points of the transformed projection to generate an image tImage = createImageRGB(width, height) fillImageColours(colours, xy, tImage) showImageRGB(tImage)
import abc import array import ctypes import dataclasses import enum import sys import typing from dataclasses import field from typing import Dict, List, Optional, Tuple, Union from ._utils import ChannelLifeCycle, _SimpleReprEnum if typing.TYPE_CHECKING: from ._fields import BitSet, FieldDesc AddressTuple = Tuple[str, int] class UserFacingEndian(str, _SimpleReprEnum): LITTLE_ENDIAN = '<' BIG_ENDIAN = '>' class StatusType(enum.IntEnum): OK = -1 OK_VERBOSE = 0 WARNING = 1 ERROR = 2 FATAL = 3 MAX_INT32 = 2 ** 31 - 1 PVA_SERVER_PORT, PVA_BROADCAST_PORT = 5075, 5076 LITTLE_ENDIAN = UserFacingEndian.LITTLE_ENDIAN BIG_ENDIAN = UserFacingEndian.BIG_ENDIAN SYS_ENDIAN = (LITTLE_ENDIAN if sys.byteorder == 'little' else BIG_ENDIAN) QOS_PRIORITY_MASK = 0x7f if sys.version_info >= (3, 8): Endian = typing.Literal['<', '>'] else: Endian = typing.NewType('Endian', str) class TypeCode(enum.IntEnum): """ Type code information for structures. Optionally used in FieldDesc descriptions, it allows for the synchronization of FieldDesc caches between client and server. """ # No introspection data (also implies no data). NULL_TYPE_CODE = 0xFF # Serialization contains only an ID that was assigned by one of the # previous FULL_WITH_ID_TYPE_CODE or FULL_TAGGED_ID_TYPE_CODE descriptions. ONLY_ID_TYPE_CODE = 0xFE # + ID # Serialization contains an ID (that can be used later, if cached) and full # interface description. Any existing definition with the same ID is # overriden. FULL_WITH_ID_TYPE_CODE = 0xFD # + ID + FieldDesc # Not implemented: FULL_TAGGED_ID_TYPE_CODE = 0xFC # + ID + tag + FieldDesc # RESERVED = 0xFB to 0xE0 # FieldDesc FULL_TYPE_CODE = (0xDF - 0x00) class FieldArrayType(enum.IntEnum): """ The field array type information, indicating whether the associated field (of type :class:`FieldType`) is a scalar or array. """ scalar = 0b00 fixed_array = 0b11 bounded_array = 0b10 variable_array = 0b01 @property def has_field_desc_size(self): 'Fixed and bounded arrays have size information in FieldDesc' return self.value in (FieldArrayType.fixed_array, FieldArrayType.bounded_array) @property def has_serialization_size(self): 'Bounded and variable arrays serialize size information' return self.value in (FieldArrayType.bounded_array, FieldArrayType.variable_array) def summary_with_size(self, size=None): if size is None: size = '' if self == FieldArrayType.fixed_array: return f'[{size}]' if self == FieldArrayType.bounded_array: return f'<{size}>' if self == FieldArrayType.variable_array: return f'[{size}]' return '' class FieldDescByte(ctypes.Structure): """ Field description first byte. Attributes ---------- field_type : FieldType The field type information, indicating whether it's a structure, string, int, union, etc. array_type : FieldArrayType The array type - scalar, variable_array, etc. """ _fields_ = [ ('_type_specific', ctypes.c_ubyte, 3), ('_array_type', ctypes.c_ubyte, 2), ('_type', ctypes.c_ubyte, 3), ] def serialize(self, endian: Endian = None) -> typing.List[bytes]: return [bytes(self)] @classmethod def deserialize(cls, data, endian: Endian = None) -> 'Deserialized': fd = cls.from_buffer(bytearray([data[0]])) return Deserialized(data=fd, buffer=data[1:], offset=1) @property def field_type(self) -> 'FieldType': return FieldType((self._type_specific << 5) | self._type) @property def array_type(self) -> FieldArrayType: return FieldArrayType(self._array_type) def __repr__(self): return ( f'{self.__class__.__name__}(field_type={self.field_type!r}, ' f'array_type={self.array_type!r})' ) @classmethod def from_field(cls, field: 'FieldDesc') -> 'FieldDescByte': """Create a FieldDescByte from the given FieldDesc.""" return cls(field.field_type._type_specific, field.array_type, field.field_type._type) class FieldType(enum.IntEnum): """ The field type information, indicating whether it's a structure, string, int, union, etc. Used in conjunction with FieldArrayType. """ # complex_reserved1 = 11100100 # complex_reserved2 = 11000100 # complex_reserved3 = 10100100 # complex_reserved4 = 10000100 bounded_string = 0b01100100 string = 0b00000011 any = 0b01000100 union = 0b00100100 struct = 0b00000100 float16 = 0b00100010 float32 = 0b01000010 float64 = 0b01100010 float128 = 0b10000010 uint64 = 0b11100001 int64 = 0b01100001 uint32 = 0b11000001 int32 = 0b01000001 uint16 = 0b10100001 int16 = 0b00100001 uint8 = 0b10000001 int8 = 0b00000001 boolean = 0b00000000 @property def _type_specific(self) -> int: return (0b11100000 & self.value) >> 5 @property def _type(self) -> int: return 0b111 & self.value @property def is_complex(self) -> bool: """Is the FieldType a union, struct, or any field?""" return self in {FieldType.union, FieldType.struct, FieldType.any} @property def is_numeric(self) -> bool: """Is the FieldType integer or floating point?""" return self.is_integral or self.is_floating @property def is_integral(self) -> bool: """Is the FieldType integer-based?""" return self in { FieldType.uint64, FieldType.int64, FieldType.uint32, FieldType.int32, FieldType.uint16, FieldType.int16, FieldType.uint8, FieldType.int8, } @property def is_floating(self) -> bool: """Is the FieldType floating point?""" return self in { FieldType.float16, FieldType.float32, FieldType.float64, FieldType.float128, } @property def has_value(self) -> bool: 'Can this field contain data directly?' return self not in {FieldType.union, FieldType.struct} @dataclasses.dataclass class CacheContext: """ Per-VirtualCircuit cache context. Tracks Field Description information between clients and servers, and also those associated with specific I/O identifiers (ioids) Notes ----- ``ours[fd_hash] = FieldDesc(..)`` ``theirs[identifier] = fd_hash`` ``ioid_interfaces[ioid] = FieldDesc(...)`` """ ours: Dict[int, 'FieldDesc'] = field(default_factory=dict) theirs: Dict[int, int] = field(default_factory=dict) # TODO: it may be possible to factor this out (I hope...) ioid_interfaces: Dict[int, 'FieldDesc'] = field(default_factory=dict) def clear(self): for dct in (self.ours, self.theirs, self.ioid_interfaces): dct.clear() class Deserialized(typing.Iterable): """ Deserialization result container. Attributes ---------- data : object The deserialized object. buffer : bytearray The remaining buffer contents, after consuming `data`. offset : int The number of bytes consumed in deserializing `data`, i.e., the offset to the buffer passed in to ``deserialize()``. """ data: typing.Any buffer: Union[bytes, memoryview] offset: int SUPER_DEBUG = False if not SUPER_DEBUG: def __init__(self, data: typing.Any, buffer: bytes, offset: int): self.data = data self.buffer = buffer self.offset = offset else: def __init__(self, data: typing.Any, buffer: bytes, offset: int): self.data = data self.buffer = buffer self.offset = offset import inspect import textwrap for idx in (3, 4): caller = inspect.stack()[idx] print(caller.filename, caller.lineno) print(textwrap.dedent('\n'.join(caller.code_context or [])).rstrip()) print('------> deserialized', repr(self.data), 'next is', bytes(self.buffer)[:10], self.offset) def __repr__(self): return ( f"{self.__class__.__name__}(data={self.data!r}, " f"buffer={self.buffer!r}, " f"offset={self.offset})" ) def __iter__(self): return iter((self.data, self.buffer, self.offset)) class SegmentDeserialized(typing.Iterable): """ Serialized messages may be segmented when sent over TCP with pvAccess. This class contains additional deserialization information necessary to track it, along with the usual :class:`Deserialized` information. Between segments, control messages can be interspersed according to the pvAccess specification. Attributes ---------- deserialized : Deserialized Contains the deserialized message, remaining data, bytes consumed. bytes_needed : int The number of bytes needed to finish the segment. segment_state : ChannelLifeCycle, bytes, or None Segmentation control information. """ data: Deserialized bytes_needed: int segment_state: Optional[Union[ChannelLifeCycle, bytes]] def __init__(self, data: Deserialized, bytes_needed: int, segment_state: Optional[Union[ChannelLifeCycle, bytes]]): self.data = data self.bytes_needed = bytes_needed self.segment_state = segment_state def __iter__(self): return iter((self.data, self.bytes_needed, self.segment_state)) def __repr__(self): return ( f"{self.__class__.__name__}(data={self.data!r}, " f"bytes_needed={self.bytes_needed!r}, " f"segment_state={self.segment_state})" ) class CoreSerializable(abc.ABC): """ A serializable item. May be instantiated (and hold state). """ @abc.abstractmethod def serialize(self, endian: Endian) -> List[bytes]: ... @abc.abstractmethod def deserialize(cls, data: bytes, *, endian: Endian) -> Deserialized: ... class CoreStatelessSerializable(abc.ABC): """ A stateless, serializable item. Instance-level data may not be used as serialize and deserialize are class methods. """ @abc.abstractmethod def serialize(self, value, endian: Endian) -> List[bytes]: ... @abc.abstractmethod def deserialize(cls, data: bytes, *, endian: Endian) -> Deserialized: ... class CoreSerializableWithCache(abc.ABC): """ A serializable item which uses the serialization cache context. May be instantiated (and hold state). If additional state is necessary for deserialization, ``deserialize`` may be an instance method. """ @abc.abstractmethod def serialize(self, endian: Endian, cache: CacheContext) -> List[bytes]: ... @abc.abstractmethod def deserialize(cls, data: bytes, *, endian: Endian, cache: CacheContext) -> Deserialized: ... class _DataSerializer(abc.ABC): """ABC for DataSerializer in caproto.pva._data.""" @abc.abstractmethod def serialize(cls, field: 'FieldDesc', value: typing.Any, endian: Endian, bitset: 'BitSet' = None, cache: CacheContext = None, ) -> List[bytes]: ... @abc.abstractmethod def deserialize(cls, field: 'FieldDesc', data: bytes, *, endian: Endian, bitset: 'BitSet' = None, cache: CacheContext = None, ) -> Deserialized: ... class _ArrayBasedDataSerializer: """ A data serializer which works not on an element-by-element basis, but rather with an array of elements. Used for arrays of basic data types. """ @abc.abstractmethod def serialize(cls, field: 'FieldDesc', value: typing.Any, endian: Endian, bitset: 'BitSet' = None, cache: CacheContext = None, ) -> List[Union[bytes, array.array]]: ... @abc.abstractmethod def deserialize(cls, field: 'FieldDesc', data: bytes, count: int, # <-- note the count here *, endian: Endian, bitset: Optional['BitSet'] = None, cache: Optional[CacheContext] = None, ) -> Deserialized: ...
import pickle from py.create_data import BOW, build_vocab, preprocess_df, dump_excel from sklearn.metrics import classification_report import numpy as np import json import subprocess from py.calculate_coverage import process_rules, generate_mask from py.bert_utils import train_bert, test from py.util import get_distinct_labels, most_frequent from scipy.stats import entropy import sys import pandas as pd import os import operator def get_pseudo_labels_soft(df, rules, labels, label_to_index): X = [] y_true = [] y = [] ind_to_label_probs = {} for rule in rules: inds = rule["inds"] for i in inds: try: temp = ind_to_label_probs[i] except: ind_to_label_probs[i] = {} for lbl_ in labels: try: ind_to_label_probs[i][lbl_] += rule["probs"][label_to_index[lbl_]] except: ind_to_label_probs[i][lbl_] = rule["probs"][label_to_index[lbl_]] label_to_inds = {} for i in ind_to_label_probs: lbl = max(ind_to_label_probs[i].items(), key=operator.itemgetter(1))[0] try: label_to_inds[lbl].append(i) except: label_to_inds[lbl] = [i] for l in label_to_inds: inds = list(label_to_inds[l]) X += list(df.iloc[inds]["text"]) y_true += list(df.iloc[inds]["label"]) for index in inds: y.append(l) return X, y, y_true def generate_pseudo_labels(df, labels, label_term_dict, tokenizer): def argmax_label(count_dict): maxi = 0 max_label = None for l in count_dict: count = 0 for t in count_dict[l]: count += count_dict[l][t] if count > maxi: maxi = count max_label = l return max_label y = [] X = [] y_true = [] index_word = {} for w in tokenizer.word_index: index_word[tokenizer.word_index[w]] = w for index, row in df.iterrows(): line = row["text"] label = row["label"] tokens = tokenizer.texts_to_sequences([line])[0] words = [] for tok in tokens: words.append(index_word[tok]) count_dict = {} flag = 0 for l in labels: seed_words = set() for w in label_term_dict[l]: seed_words.add(w) int_labels = list(set(words).intersection(seed_words)) if len(int_labels) == 0: continue for word in words: if word in int_labels: flag = 1 try: temp = count_dict[l] except: count_dict[l] = {} try: count_dict[l][word] += 1 except: count_dict[l][word] = 1 if flag: lbl = argmax_label(count_dict) if not lbl: continue y.append(lbl) X.append(line) y_true.append(label) return X, y, y_true def associate_rules_to_labels(rules, word_index, bow_train, labels, label_to_index): for rule in rules: mask = generate_mask(rule, word_index, bow_train) inds = list(np.where(mask)[0]) sampled_labels = [] for i in inds: sampled_labels.append(labels[i]) count_arr = [] for l in label_to_index: count_arr.append(sampled_labels.count(l)) count_arr = np.array(count_arr) rule["probs"] = count_arr / np.linalg.norm(count_arr) rule["label"] = most_frequent(sampled_labels) rule["inds"] = set(inds) rule["entropy"] = entropy(rule["probs"], base=2) return rules def get_conflict_pseudolabels(label_to_inds): ints_inds = set() for l in label_to_inds: for j in label_to_inds: if l == j: continue ints_inds.update(label_to_inds[l].intersection(label_to_inds[j])) return ints_inds def arrange_label_to_rules(rules): label_to_rules = {} for rule in rules: try: label_to_rules[rule["label"]].append(rule) except: label_to_rules[rule["label"]] = [rule] for l in label_to_rules: label_to_rules[l] = sorted(label_to_rules[l], key=lambda x: x["entropy"]) return label_to_rules def normalize_entropy(label_to_rules): for l in label_to_rules: max_ent = -1 for r in label_to_rules[l]: max_ent = max(max_ent, r["entropy"]) for i in range(len(label_to_rules[l])): label_to_rules[l][i]["entropy"] = label_to_rules[l][i]["entropy"] / max_ent return label_to_rules def get_pseudo_labels(df, label_to_rules, intersection_threshold=50): X = [] y_true = [] y = [] flagged = [] label_to_inds = {} for l in label_to_rules: label_to_inds[l] = set([]) i = 0 while len(set(label_to_rules.keys()) - set(flagged)) > 0: for label in label_to_rules: if label in flagged: continue rules = label_to_rules[label] if i >= len(rules): flagged.append(label) continue rule = rules[i] intersection = 0 inds = rule["inds"] intersection_inds = set() for l in label_to_inds: if l == label: continue intersection_inds.update(inds.intersection(label_to_inds[l])) if len(intersection_inds) > intersection_threshold: intersection = 1 break # compute intersection of rule with all other pseudo labels if intersection: flagged.append(label) else: # generate pseudo labels using inds selected_inds = inds - intersection_inds label_to_inds[label].update(selected_inds) i += 1 for l in label_to_inds: inds = list(label_to_inds[l]) X += list(df.iloc[inds]["text"]) y_true += list(df.iloc[inds]["label"]) for index in inds: y.append(l) return X, y, y_true def filter_rules(label_to_rules, entropy_threshold=1.5): mod_rules = [] mod_label_to_rules = {} for l in label_to_rules: mod_label_to_rules[l] = [] for rule in label_to_rules[l]: if rule["entropy"] < entropy_threshold: mod_rules.append(rule) mod_label_to_rules[l].append(rule) return mod_rules, mod_label_to_rules if __name__ == "__main__": # export PYTHONPATH="${PYTHONPATH}:/home/dheeraj/DPPred/py" home_path = "/home/dheeraj/DPPred/" # home_path = "/Users/dheerajmekala/Work/DPPred/" out_path = home_path + "output/" # use_gpu = 0 # threshold = 0.4 # gpu_id = 0 use_gpu = int(sys.argv[1]) threshold = float(sys.argv[2]) gpu_id = int(sys.argv[3]) base_path = "/data4/dheeraj/discpattern/" # base_path = "/Users/dheerajmekala/Work/DPPred/data/" dataset = "nyt_coarse" data_path = base_path + dataset + "/" dppred_data_path = base_path + "data/" # Path from which DPPred model takes in data. df = pickle.load(open(data_path + "df.pkl", "rb")) # df = df[~df.label.isin(["science"])] # df = df.reset_index(drop=True) df = preprocess_df(df) tokenizer = pickle.load(open(data_path + "tokenizer.pkl", "rb")) word_index, index_word = build_vocab(tokenizer) label_term_dict = json.load(open(data_path + "seedwords.json", "r")) # label_term_dict.pop("science", None) labels, label_to_index, index_to_label = get_distinct_labels(df) bow_train = BOW(df["text"], tokenizer, index_word) it = 5 rules = [] for iteration in range(it): # i = 1 # high_quality_inds = range(len(df)) print("Iteration: ", iteration, flush=True) if iteration == 0: print("Generating pseudo labels from seed words") X, y, y_true = generate_pseudo_labels(df, labels, label_term_dict, tokenizer) print("****************** CLASSIFICATION REPORT FOR Seedwords Pseudolabels ********************") print(classification_report(y_true, y), flush=True) else: # get high probs predictions for every class # if iteration == 1: # high_quality_inds = pickle.load(open(data_path + "high_quality_inds_first_it.pkl", "rb")) # pred_labels = pickle.load(open(data_path + "pred_labels_first_it.pkl", "rb")) dic = {"text": [], "label": []} for high_qual_index in high_quality_inds: dic["text"].append(df["text"][high_qual_index]) dic["label"].append(pred_labels[high_qual_index]) df_tmp = pd.DataFrame.from_dict(dic) # create data for DPPred tmp tmp_path = dppred_data_path + dataset + "/" os.makedirs(tmp_path, exist_ok=True) print(df_tmp.label.value_counts()) dump_excel(df_tmp, tmp_path, tokenizer, mode="all", is_categorical=True) print("Getting discriminative patterns", flush=True) rc = subprocess.call(home_path + "run.sh " + dataset + " classification", shell=True) print("End of DPPred", flush=True) f = open(out_path + dataset + "/rules.txt", "r") lines = f.readlines() f.close() rules = process_rules(lines) rules = associate_rules_to_labels(rules, word_index, bow_train, pred_labels, label_to_index) label_to_rules = arrange_label_to_rules(rules) label_to_rules = normalize_entropy(label_to_rules) if len(label_to_rules) != len(labels): raise Exception("Rules missing for labels: ", set(labels) - set(label_to_rules.keys())) pickle.dump(rules, open(data_path + "rules_" + str(iteration) + ".pkl", "wb")) rules, label_to_rules = filter_rules(label_to_rules, entropy_threshold=0.5) # X, y, y_true = get_pseudo_labels_soft(df, rules, labels, label_to_index) X, y, y_true = get_pseudo_labels(df, label_to_rules, intersection_threshold=10) # # Get the intersection ones and remove them # ints_inds = get_conflict_pseudolabels(label_to_inds) # print("Size of conflicting samples: ", len(ints_inds)) # # X = [] # y = [] # y_true = [] # # for l in label_to_inds: # inds = list(label_to_inds[l] - ints_inds) # X += list(df.iloc[inds]["text"]) # y_true += list(df.iloc[inds]["label"]) # for i in inds: # y.append(l) print("****************** CLASSIFICATION REPORT FOR Rules Pseudolabels ********************", flush=True) print(classification_report(y_true, y), flush=True) y_vec = [] for lbl_ in y: y_vec.append(label_to_index[lbl_]) model = train_bert(X, y_vec, use_gpu, gpu_id) y_true_all = [] for lbl_ in df.label: y_true_all.append(label_to_index[lbl_]) predictions = test(model, df["text"], y_true_all, use_gpu, gpu_id) for i, p in enumerate(predictions): if i == 0: pred = p else: pred = np.concatenate((pred, p)) pred_labels = [] high_quality_inds = [] for i, p in enumerate(pred): pred_labels.append(index_to_label[p.argmax(axis=-1)]) if (p.max(axis=-1) >= threshold): high_quality_inds.append(i) print("****************** CLASSIFICATION REPORT ON ALL DATA ********************", flush=True) print(classification_report(df["label"], pred_labels), flush=True) print("*" * 80, flush=True) if iteration == 0: pickle.dump(pred_labels, open(data_path + "pred_labels_first_it.pkl", "wb")) pickle.dump(high_quality_inds, open(data_path + "high_quality_inds_first_it.pkl", "wb")) else: pickle.dump(pred_labels, open(data_path + "pred_labels.pkl", "wb")) pickle.dump(high_quality_inds, open(data_path + "high_quality_inds.pkl", "wb")) res_dic = {"text": df["text"], "pred_label": pred_labels, "true_label": df["label"]} for l in labels: res_dic[l] = [0] * len(df["text"]) for rule in rules: inds = rule["inds"] for ind in inds: res_dic[rule["label"]][ind] += 1 df_res = pd.DataFrame.from_dict(res_dic) df_res.to_csv(data_path + "df_res_it_" + str(iteration) + ".csv") # generate pseudo labels from rules # train_classifier() # using predictions on whole dataset, get the high confidence predictions and get the rules.
# 使用Sobel查找图像中的边缘。 from skimage.filters import sobel, sobel_h, sobel_v from skimage import io, img_as_float from skimage.morphology import disk from skimage.color import rgb2gray image = io.imread('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/4.jpg') # image = rgb2gray(image) io.imshow(image) io.show() md = sobel(image) io.imshow(md) io.show() md = sobel_h(image) io.imshow(md) io.show() md = sobel_v(image) io.imshow(md) io.show()
class Solution: def rearrangeBarcodes(self, barcodes: List[int]) -> List[int]:
import botocore.session import json import os import socket import pytest import mock from botocore.stub import Stubber from botocore.exceptions import ClientError from botocore.vendored.requests import ConnectionError as \ RequestsConnectionError from pytest import fixture from chalice import __version__ as chalice_version from chalice import Rate from chalice.app import Chalice from chalice.app import CORSConfig from chalice.awsclient import TypedAWSClient from chalice.awsclient import ResourceDoesNotExistError from chalice.awsclient import LambdaClientError from chalice.awsclient import DeploymentPackageTooLargeError from chalice.awsclient import LambdaErrorContext from chalice.config import Config, DeployedResources from chalice.policy import AppPolicyGenerator from chalice.deploy.deployer import ChaliceDeploymentError from chalice import constants from chalice.deploy.deployer import APIGatewayDeployer from chalice.deploy.deployer import ApplicationPolicyHandler from chalice.deploy.deployer import Deployer from chalice.deploy.deployer import LambdaDeployer from chalice.deploy.deployer import validate_configuration from chalice.deploy.deployer import validate_routes from chalice.deploy.deployer import validate_route_content_types from chalice.deploy.deployer import validate_python_version from chalice.deploy.deployer import validate_unique_function_names from chalice.deploy.packager import LambdaDeploymentPackager from chalice.utils import UI _SESSION = None class SimpleStub(object): def __init__(self, stubber): pass class InMemoryOSUtils(object): def __init__(self, filemap=None): if filemap is None: filemap = {} self.filemap = filemap def file_exists(self, filename): return filename in self.filemap def get_file_contents(self, filename, binary=True): return self.filemap[filename] def set_file_contents(self, filename, contents, binary=True): self.filemap[filename] = contents @fixture def stubbed_api_gateway(): return stubbed_client('apigateway') @fixture def stubbed_lambda(): return stubbed_client('lambda') @fixture def in_memory_osutils(): return InMemoryOSUtils() @fixture def app_policy(in_memory_osutils): return ApplicationPolicyHandler( in_memory_osutils, AppPolicyGenerator(in_memory_osutils)) def stubbed_client(service_name): global _SESSION if _SESSION is None: _SESSION = botocore.session.get_session() client = _SESSION.create_client(service_name, region_name='us-west-2') stubber = Stubber(client) return client, stubber @fixture def config_obj(sample_app): config = Config.create( chalice_app=sample_app, stage='dev', api_gateway_stage='api', ) return config @fixture def ui(): return mock.Mock(spec=UI) def test_api_gateway_deployer_initial_deploy(config_obj, ui): aws_client = mock.Mock(spec=TypedAWSClient, region_name='us-west-2') # The rest_api_id does not exist which will trigger # the initial import aws_client.get_rest_api_id.return_value = None aws_client.import_rest_api.return_value = 'rest-api-id' lambda_arn = 'arn:aws:lambda:us-west-2:account-id:function:func-name' d = APIGatewayDeployer(aws_client, ui) d.deploy(config_obj, None, {'api_handler_arn': lambda_arn}) # mock.ANY because we don't want to test the contents of the swagger # doc. That's tested exhaustively elsewhere. # We will do a basic sanity check to make sure it looks like a swagger # doc. aws_client.import_rest_api.assert_called_with(mock.ANY) first_arg = aws_client.import_rest_api.call_args[0][0] assert isinstance(first_arg, dict) assert 'swagger' in first_arg aws_client.deploy_rest_api.assert_called_with('rest-api-id', 'api') aws_client.add_permission_for_apigateway_if_needed.assert_called_with( 'func-name', 'us-west-2', 'account-id', 'rest-api-id', mock.ANY ) def test_api_gateway_deployer_redeploy_api(config_obj, ui): aws_client = mock.Mock(spec=TypedAWSClient, region_name='us-west-2') # The rest_api_id does not exist which will trigger # the initial import deployed = DeployedResources( None, None, None, 'existing-id', 'api', None, None, {}) aws_client.rest_api_exists.return_value = True lambda_arn = 'arn:aws:lambda:us-west-2:account-id:function:func-name' d = APIGatewayDeployer(aws_client, ui) d.deploy(config_obj, deployed, {'api_handler_arn': lambda_arn}) aws_client.update_api_from_swagger.assert_called_with('existing-id', mock.ANY) second_arg = aws_client.update_api_from_swagger.call_args[0][1] assert isinstance(second_arg, dict) assert 'swagger' in second_arg aws_client.deploy_rest_api.assert_called_with('existing-id', 'api') aws_client.add_permission_for_apigateway_if_needed.assert_called_with( 'func-name', 'us-west-2', 'account-id', 'existing-id', mock.ANY ) def test_api_gateway_deployer_delete(config_obj, ui): aws_client = mock.Mock(spec=TypedAWSClient, region_name='us-west-2') rest_api_id = 'abcdef1234' deployed = DeployedResources( None, None, None, rest_api_id, 'api', None, None, {}) aws_client.rest_api_exists.return_value = True d = APIGatewayDeployer(aws_client, ui) d.delete(deployed) aws_client.delete_rest_api.assert_called_with(rest_api_id) def test_api_gateway_deployer_delete_already_deleted(ui): rest_api_id = 'abcdef1234' aws_client = mock.Mock(spec=TypedAWSClient, region_name='us-west-2') aws_client.delete_rest_api.side_effect = ResourceDoesNotExistError( rest_api_id) deployed = DeployedResources( None, None, None, rest_api_id, 'api', None, None, {}) aws_client.rest_api_exists.return_value = True d = APIGatewayDeployer(aws_client, ui) d.delete(deployed) output = [call[0][0] for call in ui.write.call_args_list] assert "No rest API with id %s found.\n" % rest_api_id in output aws_client.delete_rest_api.assert_called_with(rest_api_id) def test_policy_autogenerated_when_enabled(app_policy, in_memory_osutils): in_memory_osutils.filemap[os.path.join('.', 'app.py')] = '' config = Config.create(project_dir='.', autogen_policy=True) generated = app_policy.generate_policy_from_app_source(config) # We don't actually need to validate the exact policy, we'll just # check that it looks ok. assert 'Statement' in generated assert 'Version' in generated def test_can_load_non_stage_specific_name(app_policy, in_memory_osutils): # This is a test for backcompat loading of .chalice/policy.json # for the dev stage. The default name is suppose to include # the chalice stage name, e.g. policy-dev.json, but to support # existing use cases we'll look for .chalice/policy.json only # if you're in dev stage. previous_policy = '{"Statement": ["foo"]}' filename = os.path.join('.', '.chalice', 'policy-dev.json') in_memory_osutils.filemap[filename] = previous_policy config = Config.create(project_dir='.', autogen_policy=False) generated = app_policy.generate_policy_from_app_source(config) assert generated == json.loads(previous_policy) def test_can_provide_stage_specific_policy_file(app_policy, in_memory_osutils): policy_filename = 'my-custom-policy.json' config = Config.create(project_dir='.', autogen_policy=False, iam_policy_file=policy_filename, chalice_stage='dev') previous_policy = '{"Statement": ["foo"]}' filename = os.path.join('.', '.chalice', policy_filename) in_memory_osutils.filemap[filename] = previous_policy generated = app_policy.generate_policy_from_app_source(config) assert generated == json.loads(previous_policy) def test_can_provide_stage_specific_policy_for_other_stage(app_policy, in_memory_osutils): policy_filename = 'my-prod-filename.json' config = Config.create(project_dir='.', autogen_policy=False, iam_policy_file=policy_filename, chalice_stage='prod') previous_policy = '{"Statement": ["foo"]}' filename = os.path.join('.', '.chalice', policy_filename) in_memory_osutils.filemap[filename] = previous_policy generated = app_policy.generate_policy_from_app_source(config) assert generated == json.loads(previous_policy) def test_autogen_policy_for_non_dev_stage(app_policy, in_memory_osutils): in_memory_osutils.filemap[os.path.join('.', 'app.py')] = '' config = Config.create( project_dir='.', chalice_stage='prod', autogen_policy=True, ) generated = app_policy.generate_policy_from_app_source(config) assert 'Statement' in generated assert 'Version' in generated def test_no_policy_generated_when_disabled_in_config(app_policy, in_memory_osutils): previous_policy = '{"Statement": ["foo"]}' filename = os.path.join('.', '.chalice', 'policy-dev.json') in_memory_osutils.filemap[filename] = previous_policy config = Config.create(project_dir='.', autogen_policy=False) generated = app_policy.generate_policy_from_app_source(config) assert generated == json.loads(previous_policy) def test_load_last_policy_returns_policy_autogen_true_no_file(app_policy): expected_policy = {'Version': '2012-10-17', 'Statement': []} config = Config.create(project_dir='.', autogen_policy=True) loaded = app_policy.load_last_policy(config) assert expected_policy == loaded def test_load_last_policy_raises_error_when_file_does_not_exist(app_policy): with pytest.raises(RuntimeError): app_policy.load_last_policy(Config.create(project_dir='.')) def test_load_policy_raises_error_invalid_json(app_policy, in_memory_osutils): filename = os.path.join('.', '.chalice', 'policy-dev.json') in_memory_osutils.filemap[filename] = '{invalid json}' with pytest.raises(RuntimeError): app_policy.load_last_policy(Config.create(project_dir='.')) def test_load_policy_from_disk_when_file_exists(app_policy, in_memory_osutils): previous_policy = '{"Statement": ["foo"]}' config = Config.create(project_dir='.') filename = os.path.join('.', '.chalice', 'policy-dev.json') in_memory_osutils.filemap[filename] = previous_policy loaded = app_policy.load_last_policy(config) assert loaded == json.loads(previous_policy) def test_can_record_policy_to_disk(app_policy): latest_policy = {"Statement": ["policy"]} config = Config.create(project_dir='.') app_policy.record_policy(config, latest_policy) assert app_policy.load_last_policy(config) == latest_policy def test_trailing_slash_routes_result_in_error(): app = Chalice('appname') app.routes = {'/trailing-slash/': None} config = Config.create(chalice_app=app) with pytest.raises(ValueError): validate_configuration(config) def test_empty_route_results_in_error(): app = Chalice('appname') app.routes = {'': {}} config = Config.create(chalice_app=app) with pytest.raises(ValueError): validate_configuration(config) def test_validate_python_version_invalid(): config = mock.Mock(spec=Config) config.lambda_python_version = 'python1.0' with pytest.warns(UserWarning): validate_python_version(config) def test_python_version_invalid_from_real_config(): config = Config.create() with pytest.warns(UserWarning): validate_python_version(config, 'python1.0') def test_python_version_is_valid(): config = Config.create() with pytest.warns(None) as record: validate_python_version(config, config.lambda_python_version) assert len(record) == 0 def test_manage_iam_role_false_requires_role_arn(sample_app): config = Config.create(chalice_app=sample_app, manage_iam_role=False, iam_role_arn='arn:::foo') assert validate_configuration(config) is None def test_validation_error_if_no_role_provided_when_manage_false(sample_app): # We're indicating that we should not be managing the # IAM role, but we're not giving a role ARN to use. # This is a validation error. config = Config.create(chalice_app=sample_app, manage_iam_role=False) with pytest.raises(ValueError): validate_configuration(config) def test_validate_unique_lambda_function_names(sample_app): @sample_app.lambda_function() def foo(event, context): pass # This will cause a validation error because # 'foo' is already registered as a lambda function. @sample_app.lambda_function(name='foo') def bar(event, context): pass config = Config.create(chalice_app=sample_app, manage_iam_role=False) with pytest.raises(ValueError): validate_unique_function_names(config) def test_validate_names_across_function_types(sample_app): @sample_app.lambda_function() def foo(event, context): pass @sample_app.schedule('rate(1 hour)', name='foo') def bar(event): pass config = Config.create(chalice_app=sample_app, manage_iam_role=False) with pytest.raises(ValueError): validate_unique_function_names(config) def test_validate_names_using_name_kwarg(sample_app): @sample_app.authorizer(name='duplicate') def foo(auth_request): pass @sample_app.lambda_function(name='duplicate') def bar(event): pass config = Config.create(chalice_app=sample_app, manage_iam_role=False) with pytest.raises(ValueError): validate_unique_function_names(config) class TestChaliceDeploymentError(object): def test_general_exception(self): general_exception = Exception('My Exception') deploy_error = ChaliceDeploymentError(general_exception) deploy_error_msg = str(deploy_error) assert ( 'ERROR - While deploying your chalice application' in deploy_error_msg ) assert 'My Exception' in deploy_error_msg def test_lambda_client_error(self): lambda_error = LambdaClientError( Exception('My Exception'), context=LambdaErrorContext( function_name='foo', client_method_name='create_function', deployment_size=1024 ** 2 ) ) deploy_error = ChaliceDeploymentError(lambda_error) deploy_error_msg = str(deploy_error) assert ( 'ERROR - While sending your chalice handler code to ' 'Lambda to create function \n"foo"' in deploy_error_msg ) assert 'My Exception' in deploy_error_msg def test_lambda_client_error_wording_for_update(self): lambda_error = LambdaClientError( Exception('My Exception'), context=LambdaErrorContext( function_name='foo', client_method_name='update_function_code', deployment_size=1024 ** 2 ) ) deploy_error = ChaliceDeploymentError(lambda_error) deploy_error_msg = str(deploy_error) assert ( 'sending your chalice handler code to ' 'Lambda to update function' in deploy_error_msg ) def test_gives_where_and_suggestion_for_too_large_deployment_error(self): too_large_error = DeploymentPackageTooLargeError( Exception('Too large of deployment pacakge'), context=LambdaErrorContext( function_name='foo', client_method_name='create_function', deployment_size=1024 ** 2, ) ) deploy_error = ChaliceDeploymentError(too_large_error) deploy_error_msg = str(deploy_error) assert ( 'ERROR - While sending your chalice handler code to ' 'Lambda to create function \n"foo"' in deploy_error_msg ) assert 'Too large of deployment pacakge' in deploy_error_msg assert ( 'To avoid this error, decrease the size of your chalice ' 'application ' in deploy_error_msg ) def test_include_size_context_for_too_large_deployment_error(self): too_large_error = DeploymentPackageTooLargeError( Exception('Too large of deployment pacakge'), context=LambdaErrorContext( function_name='foo', client_method_name='create_function', deployment_size=58 * (1024 ** 2), ) ) deploy_error = ChaliceDeploymentError( too_large_error) deploy_error_msg = str(deploy_error) print(repr(deploy_error_msg)) assert 'deployment package is 58.0 MB' in deploy_error_msg assert '50.0 MB or less' in deploy_error_msg assert 'To avoid this error' in deploy_error_msg def test_error_msg_for_general_connection(self): lambda_error = DeploymentPackageTooLargeError( RequestsConnectionError( Exception( 'Connection aborted.', socket.error('Some vague reason') ) ), context=LambdaErrorContext( function_name='foo', client_method_name='create_function', deployment_size=1024 ** 2 ) ) deploy_error = ChaliceDeploymentError(lambda_error) deploy_error_msg = str(deploy_error) assert 'Connection aborted.' in deploy_error_msg assert 'Some vague reason' not in deploy_error_msg def test_simplifies_error_msg_for_broken_pipe(self): lambda_error = DeploymentPackageTooLargeError( RequestsConnectionError( Exception( 'Connection aborted.', socket.error(32, 'Broken pipe') ) ), context=LambdaErrorContext( function_name='foo', client_method_name='create_function', deployment_size=1024 ** 2 ) ) deploy_error = ChaliceDeploymentError(lambda_error) deploy_error_msg = str(deploy_error) assert ( 'Connection aborted. Lambda closed the connection' in deploy_error_msg ) def test_simplifies_error_msg_for_timeout(self): lambda_error = DeploymentPackageTooLargeError( RequestsConnectionError( Exception( 'Connection aborted.', socket.timeout('The write operation timed out') ) ), context=LambdaErrorContext( function_name='foo', client_method_name='create_function', deployment_size=1024 ** 2 ) ) deploy_error = ChaliceDeploymentError(lambda_error) deploy_error_msg = str(deploy_error) assert ( 'Connection aborted. Timed out sending your app to Lambda.' in deploy_error_msg ) class TestDeployer(object): def test_can_deploy_apig_and_lambda(self, sample_app, ui): lambda_deploy = mock.Mock(spec=LambdaDeployer) apig_deploy = mock.Mock(spec=APIGatewayDeployer) lambda_deploy.deploy.return_value = { 'api_handler_name': 'lambda_function', 'api_handler_arn': 'my_lambda_arn', } apig_deploy.deploy.return_value = ('api_id', 'region', 'stage') d = Deployer(apig_deploy, lambda_deploy, ui) cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, project_dir='.') d.deploy(cfg) lambda_deploy.deploy.assert_called_with(cfg, None, 'dev') apig_deploy.deploy.assert_called_with(cfg, None, { 'rest_api_id': 'api_id', 'chalice_version': chalice_version, 'region': 'region', 'api_gateway_stage': 'stage', 'api_handler_name': 'lambda_function', 'api_handler_arn': 'my_lambda_arn', 'backend': 'api' }) def test_deployer_returns_deployed_resources(self, sample_app, ui): cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, project_dir='.', ) lambda_deploy = mock.Mock(spec=LambdaDeployer) apig_deploy = mock.Mock(spec=APIGatewayDeployer) apig_deploy.deploy.return_value = ('api_id', 'region', 'stage') lambda_deploy.deploy.return_value = { 'api_handler_name': 'lambda_function', 'api_handler_arn': 'my_lambda_arn', } d = Deployer(apig_deploy, lambda_deploy, ui) deployed_values = d.deploy(cfg) assert deployed_values == { 'dev': { 'backend': 'api', 'api_handler_arn': 'my_lambda_arn', 'api_handler_name': 'lambda_function', 'rest_api_id': 'api_id', 'api_gateway_stage': 'stage', 'region': 'region', 'chalice_version': chalice_version, } } def test_deployer_delete_calls_deletes(self, ui): # Check that the deployer class calls other deployer classes delete # methods. lambda_deploy = mock.Mock(spec=LambdaDeployer) apig_deploy = mock.Mock(spec=APIGatewayDeployer) cfg = mock.Mock(spec=Config) deployed_resources = DeployedResources.from_dict({ 'backend': 'api', 'api_handler_arn': 'lambda_arn', 'api_handler_name': 'lambda_name', 'rest_api_id': 'rest_id', 'api_gateway_stage': 'dev', 'region': 'us-west-2', 'chalice_version': '0', 'lambda_functions': {}, }) cfg.deployed_resources.return_value = deployed_resources d = Deployer(apig_deploy, lambda_deploy, ui) d.delete(cfg) lambda_deploy.delete.assert_called_with(deployed_resources) apig_deploy.delete.assert_called_with(deployed_resources) def test_deployer_does_not_call_delete_when_no_resources(self, ui): # If there is nothing to clean up the deployer should not call delete. lambda_deploy = mock.Mock(spec=LambdaDeployer) apig_deploy = mock.Mock(spec=APIGatewayDeployer) cfg = mock.Mock(spec=Config) deployed_resources = None cfg.deployed_resources.return_value = deployed_resources d = Deployer(apig_deploy, lambda_deploy, ui) d.delete(cfg) output = [call[0][0] for call in ui.write.call_args_list] assert 'No existing resources found for stage dev.\n' in output lambda_deploy.delete.assert_not_called() apig_deploy.delete.assert_not_called() def test_raises_deployment_error_for_botcore_client_error(self, sample_app, ui): lambda_deploy = mock.Mock(spec=LambdaDeployer) apig_deploy = mock.Mock(spec=APIGatewayDeployer) lambda_deploy.deploy.side_effect = ClientError( { 'Error': { 'Code': 'AccessDenied', 'Message': 'Denied' } }, 'CreateFunction' ) d = Deployer(apig_deploy, lambda_deploy, ui) cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, project_dir='.', ) with pytest.raises(ChaliceDeploymentError) as excinfo: d.deploy(cfg) assert excinfo.match('ERROR - While deploying') assert excinfo.match('Denied') def test_raises_deployment_error_for_lambda_client_error(self, sample_app, ui): lambda_deploy = mock.Mock(spec=LambdaDeployer) apig_deploy = mock.Mock(spec=APIGatewayDeployer) lambda_deploy.deploy.side_effect = LambdaClientError( Exception('my error'), context=LambdaErrorContext( function_name='foo', client_method_name='create_function', deployment_size=1024 ** 2 ) ) d = Deployer(apig_deploy, lambda_deploy, ui) cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, project_dir='.', ) with pytest.raises(ChaliceDeploymentError) as excinfo: d.deploy(cfg) assert excinfo.match('ERROR - While sending') assert excinfo.match('my error') def test_raises_deployment_error_for_apig_error(self, sample_app, ui): lambda_deploy = mock.Mock(spec=LambdaDeployer) apig_deploy = mock.Mock(spec=APIGatewayDeployer) lambda_deploy.deploy.return_value = { 'api_handler_name': 'lambda_function', 'api_handler_arn': 'my_lambda_arn', } apig_deploy.deploy.side_effect = ClientError( { 'Error': { 'Code': 'AccessDenied', 'Message': 'Denied' } }, 'CreateStage' ) d = Deployer(apig_deploy, lambda_deploy, ui) cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, project_dir='.', ) with pytest.raises(ChaliceDeploymentError) as excinfo: d.deploy(cfg) assert excinfo.match('ERROR - While deploying') assert excinfo.match('Denied') def test_deployer_does_not_reuse_pacakge_on_python_version_change( app_policy, sample_app, ui): osutils = InMemoryOSUtils({'packages.zip': b'package contents'}) aws_client = mock.Mock(spec=TypedAWSClient) packager = mock.Mock(spec=LambdaDeploymentPackager) def write_deployment_file(*args, **kwargs): osutils.set_file_contents('package2.zip', b'changed contents') return 'package2.zip' packager.deployment_package_filename.return_value = 'packages2.zip' packager.create_deployment_package.side_effect = write_deployment_file # Given the lambda function already exists: aws_client.lambda_function_exists.return_value = True aws_client.update_function.return_value = {"FunctionArn": "myarn"} # And given we don't want chalice to manage our iam role for the lambda # function: cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, manage_iam_role=False, app_name='appname', iam_role_arn='role-arn', project_dir='./myproject', environment_variables={"FOO": "BAR"}, lambda_timeout=120, lambda_memory_size=256, tags={'mykey': 'myvalue'} ) # Pick a fake python version that will not match our current runtime under # both 2.7 and 3.6. aws_client.get_function_configuration.return_value = { 'Runtime': 'python1.0', } ui.confirm.return_value = True d = LambdaDeployer(aws_client, packager, ui, osutils, app_policy) lambda_function_name = 'lambda_function_name' deployed = DeployedResources( 'api', 'api_handler_arn', lambda_function_name, None, 'dev', None, None, {}) d.deploy(cfg, deployed, 'dev') # Since the python version changed only the create_deployment_package # method should be called. Injecting the lastest app would only get called # if the python dependences could stay the same, so it must not be called # in this case. assert packager.create_deployment_package.called assert packager.inject_latest_app.called is False def test_lambda_deployer_repeated_deploy(app_policy, sample_app, ui): osutils = InMemoryOSUtils({'packages.zip': b'package contents'}) aws_client = mock.Mock(spec=TypedAWSClient) packager = mock.Mock(spec=LambdaDeploymentPackager) packager.deployment_package_filename.return_value = 'packages.zip' # Given the lambda function already exists: aws_client.lambda_function_exists.return_value = True aws_client.update_function.return_value = {"FunctionArn": "myarn"} # And given we don't want chalice to manage our iam role for the lambda # function: cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, manage_iam_role=False, app_name='appname', iam_role_arn='role-arn', project_dir='./myproject', environment_variables={"FOO": "BAR"}, lambda_timeout=120, lambda_memory_size=256, tags={'mykey': 'myvalue'} ) aws_client.get_function_configuration.return_value = { 'Runtime': cfg.lambda_python_version, } d = LambdaDeployer(aws_client, packager, ui, osutils, app_policy) # Doing a lambda deploy: lambda_function_name = 'lambda_function_name' deployed = DeployedResources( 'api', 'api_handler_arn', lambda_function_name, None, 'dev', None, None, {}) d.deploy(cfg, deployed, 'dev') # Should result in injecting the latest app code. packager.inject_latest_app.assert_called_with('packages.zip', './myproject') # And should result in the lambda function being updated with the API. aws_client.update_function.assert_called_with( function_name=lambda_function_name, zip_contents=b'package contents', runtime=cfg.lambda_python_version, environment_variables={"FOO": "BAR"}, tags={ 'aws-chalice': 'version=%s:stage=%s:app=%s' % ( chalice_version, 'dev', 'appname'), 'mykey': 'myvalue' }, timeout=120, memory_size=256, role_arn='role-arn' ) def test_lambda_deployer_delete(ui): aws_client = mock.Mock(spec=TypedAWSClient) aws_client.get_role_arn_for_name.return_value = 'arn_prefix/role_name' lambda_function_name = 'api-handler' deployed = DeployedResources( 'api', 'api_handler_arn/lambda_name', lambda_function_name, None, 'dev', None, None, {'name': {'arn': 'auth-arn'}}) ui.confirm.return_value = True d = LambdaDeployer( aws_client, None, ui, None, None) d.delete(deployed) aws_client.get_role_arn_for_name.assert_called_with(lambda_function_name) assert aws_client.delete_function.call_args_list == [ mock.call('api-handler'), mock.call('auth-arn'), ] aws_client.delete_role.assert_called_with('role_name') def test_lambda_deployer_delete_rule(ui): aws_client = mock.Mock(spec=TypedAWSClient) aws_client.get_role_arn_for_name.return_value = 'arn_prefix/role_name' lambda_function_name = 'api-handler' deployed = DeployedResources( 'api', 'api_handler_arn/lambda_name', lambda_function_name, None, 'dev', None, None, {'name': {'arn': 'schedule-arn', 'type': 'scheduled_event'}}) ui.confirm.return_value = True d = LambdaDeployer( aws_client, None, ui, None, None) d.delete(deployed) assert aws_client.delete_function.call_args_list == [ mock.call('api-handler'), mock.call('schedule-arn'), ] aws_client.delete_rule.assert_called_with(rule_name='name') def test_lambda_deployer_delete_already_deleted(ui): lambda_function_name = 'lambda_name' aws_client = mock.Mock(spec=TypedAWSClient) aws_client.get_role_arn_for_name.return_value = 'arn_prefix/role_name' aws_client.delete_function.side_effect = ResourceDoesNotExistError( lambda_function_name) deployed = DeployedResources( 'api', 'api_handler_arn/lambda_name', lambda_function_name, None, 'dev', None, None, {}) d = LambdaDeployer( aws_client, None, ui, None, None) d.delete(deployed) # check that we printed that no lambda function with that name was found output = [call[0][0] for call in ui.write.call_args_list] assert ("No lambda function named %s found.\n" % lambda_function_name in output) aws_client.delete_function.assert_called_with(lambda_function_name) def test_can_reject_policy_change(sample_app, ui): app_policy = mock.Mock(spec=ApplicationPolicyHandler) app_policy.generate_policy_from_app_source.return_value = { 'Statement': [{'policy': 1}, {'policy': 2}], } osutils = InMemoryOSUtils({'packages.zip': b'package contents'}) aws_client = mock.Mock(spec=TypedAWSClient) packager = mock.Mock(spec=LambdaDeploymentPackager) aws_client.get_role_arn_for_name.side_effect = ResourceDoesNotExistError( 'function-name') cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, manage_iam_role=True, app_name='appname', project_dir='.', ) ui.confirm.side_effect = RuntimeError("Aborted") d = LambdaDeployer(aws_client, packager, ui, osutils, app_policy) with pytest.raises(RuntimeError): d.deploy(cfg, existing_resources=None, stage_name='dev') # Assert the policy was written to stdout ui.write.assert_called_with( '{\n' ' "Statement": [\n' ' {\n' ' "policy": 1\n' ' },\n' ' {\n' ' "policy": 2\n' ' }\n' ' ]\n' '}\n' ) def test_prompted_on_runtime_change_can_reject_change(app_policy, sample_app, ui): osutils = InMemoryOSUtils({'packages.zip': b'package contents'}) aws_client = mock.Mock(spec=TypedAWSClient) packager = mock.Mock(spec=LambdaDeploymentPackager) packager.deployment_package_filename.return_value = 'packages.zip' aws_client.lambda_function_exists.return_value = True aws_client.get_function_configuration.return_value = { 'Runtime': 'python1.0', } aws_client.update_function.return_value = {"FunctionArn": "myarn"} cfg = Config.create( chalice_stage='dev', chalice_app=sample_app, manage_iam_role=False, app_name='appname', iam_role_arn=True, project_dir='./myproject', environment_variables={"FOO": "BAR"}, ) ui.confirm.side_effect = RuntimeError("Aborted") d = LambdaDeployer(aws_client, packager, ui, osutils, app_policy) # Doing a lambda deploy with a different runtime: lambda_function_name = 'lambda_function_name' deployed = DeployedResources( 'api', 'api_handler_arn', lambda_function_name, None, 'dev', None, None, {}) with pytest.raises(RuntimeError): d.deploy(cfg, deployed, 'dev') assert not packager.inject_latest_app.called assert not aws_client.update_function.called assert ui.confirm.called message = ui.confirm.call_args[0][0] assert 'runtime will change' in message def test_lambda_deployer_initial_deploy(app_policy, sample_app, ui): osutils = InMemoryOSUtils({'packages.zip': b'package contents'}) aws_client = mock.Mock(spec=TypedAWSClient) aws_client.create_function.return_value = 'lambda-arn' packager = mock.Mock(spec=LambdaDeploymentPackager) packager.create_deployment_package.return_value = 'packages.zip' cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', environment_variables={"FOO": "BAR"}, lambda_timeout=120, lambda_memory_size=256, tags={'mykey': 'myvalue'} ) d = LambdaDeployer(aws_client, packager, ui, osutils, app_policy) deployed = d.deploy(cfg, None, 'dev') assert deployed == { 'api_handler_arn': 'lambda-arn', 'api_handler_name': 'myapp-dev', 'lambda_functions': {}, } aws_client.create_function.assert_called_with( function_name='myapp-dev', role_arn='role-arn', zip_contents=b'package contents', environment_variables={"FOO": "BAR"}, runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % chalice_version, 'mykey': 'myvalue' }, handler='app.app', timeout=120, memory_size=256, ) class TestValidateCORS(object): def test_cant_have_options_with_cors(self, sample_app): @sample_app.route('/badcors', methods=['GET', 'OPTIONS'], cors=True) def badview(): pass with pytest.raises(ValueError): validate_routes(sample_app.routes) def test_cant_have_differing_cors_configurations(self, sample_app): custom_cors = CORSConfig( allow_origin='https://foo.example.com', allow_headers=['X-Special-Header'], max_age=600, expose_headers=['X-Special-Header'], allow_credentials=True ) @sample_app.route('/cors', methods=['GET'], cors=True) def cors(): pass @sample_app.route('/cors', methods=['PUT'], cors=custom_cors) def different_cors(): pass with pytest.raises(ValueError): validate_routes(sample_app.routes) def test_can_have_same_cors_configurations(self, sample_app): @sample_app.route('/cors', methods=['GET'], cors=True) def cors(): pass @sample_app.route('/cors', methods=['PUT'], cors=True) def same_cors(): pass try: validate_routes(sample_app.routes) except ValueError: pytest.fail( 'A ValueError was unexpectedly thrown. Applications ' 'may have multiple view functions that share the same ' 'route and CORS configuration.' ) def test_can_have_same_custom_cors_configurations(self, sample_app): custom_cors = CORSConfig( allow_origin='https://foo.example.com', allow_headers=['X-Special-Header'], max_age=600, expose_headers=['X-Special-Header'], allow_credentials=True ) @sample_app.route('/cors', methods=['GET'], cors=custom_cors) def cors(): pass same_custom_cors = CORSConfig( allow_origin='https://foo.example.com', allow_headers=['X-Special-Header'], max_age=600, expose_headers=['X-Special-Header'], allow_credentials=True ) @sample_app.route('/cors', methods=['PUT'], cors=same_custom_cors) def same_cors(): pass try: validate_routes(sample_app.routes) except ValueError: pytest.fail( 'A ValueError was unexpectedly thrown. Applications ' 'may have multiple view functions that share the same ' 'route and CORS configuration.' ) def test_can_have_one_cors_configured_and_others_not(self, sample_app): @sample_app.route('/cors', methods=['GET'], cors=True) def cors(): pass @sample_app.route('/cors', methods=['PUT']) def no_cors(): pass try: validate_routes(sample_app.routes) except ValueError: pytest.fail( 'A ValueError was unexpectedly thrown. Applications ' 'may have multiple view functions that share the same ' 'route but only one is configured for CORS.' ) def test_cant_have_mixed_content_types(sample_app): @sample_app.route('/index', content_types=['application/octet-stream', 'text/plain']) def index(): return {'hello': 'world'} with pytest.raises(ValueError): validate_route_content_types(sample_app.routes, sample_app.api.binary_types) def test_can_validate_updated_custom_binary_types(sample_app): sample_app.api.binary_types.extend(['text/plain']) @sample_app.route('/index', content_types=['application/octet-stream', 'text/plain']) def index(): return {'hello': 'world'} assert validate_route_content_types(sample_app.routes, sample_app.api.binary_types) is None class TestAuthHandlersAreAuthorized(object): def tests_apigateway_adds_auth_handler_policy(self, sample_app_with_auth, ui): # When we create authorizers in API gateway, we also need to # give the authorizers permission to invoke the lambda functions # we've created. aws_client = mock.Mock(spec=TypedAWSClient, region_name='us-west-2') cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app_with_auth, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.' ) d = APIGatewayDeployer(aws_client, ui) deployed_resources = { 'api_handler_arn': ( 'arn:aws:lambda:us-west-2:1:function:myapp-dev' ), 'api_handler_name': 'myapp-dev', 'lambda_functions': { 'myapp-dev-myauth': {'arn': 'myauth:arn', 'type': 'authorizer'}, }, } aws_client.import_rest_api.return_value = 'rest-api-id' d.deploy(cfg, None, deployed_resources) # We should have add permission for the authorizer to invoke # the auth lambda function. aws_client.add_permission_for_authorizer.assert_called_with( 'rest-api-id', 'myauth:arn', mock.ANY) class TestLambdaInitialDeploymentWithConfigurations(object): @fixture(autouse=True) def setup_deployer_dependencies(self, app_policy, ui): # This autouse fixture is used instead of ``setup_method`` because it: # * Is ran for every test # * Allows additional fixtures to be passed in to reduce the number # of fixtures that need to be supplied for the test methods. # * ``setup_method`` is called before fixtures get applied so # they cannot be applied to the ``setup_method`` or you will # will get a TypeError for too few arguments. self.package_name = 'packages.zip' self.package_contents = b'package contents' self.lambda_arn = 'lambda-arn' self.osutils = InMemoryOSUtils( {self.package_name: self.package_contents}) self.aws_client = mock.Mock(spec=TypedAWSClient) self.aws_client.create_function.side_effect = [self.lambda_arn] # Return a python Runtime that will never match our local runtime so # the deployment package is not reused. self.aws_client.get_function_configuration.return_value = { 'Runtime': 'FakePythonVersion' } self.packager = mock.Mock(spec=LambdaDeploymentPackager) self.packager.create_deployment_package.return_value =\ self.package_name self.packager.deployment_package_filename.return_value =\ self.package_name self.app_policy = app_policy self.ui = ui def create_config_obj(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.' ) return cfg def test_can_create_auth_handlers(self, sample_app_with_auth): config = self.create_config_obj(sample_app_with_auth) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) self.aws_client.lambda_function_exists.return_value = False self.aws_client.create_function.side_effect = [ self.lambda_arn, 'arn:auth-function'] deployed = deployer.deploy(config, None, stage_name='dev') assert 'lambda_functions' in deployed assert deployed['lambda_functions'] == { 'myapp-dev-myauth': {'arn': 'arn:auth-function', 'type': 'authorizer'} } self.aws_client.create_function.assert_called_with( environment_variables={}, function_name='myapp-dev-myauth', handler='app.myauth', memory_size=constants.DEFAULT_LAMBDA_MEMORY_SIZE, role_arn='role-arn', # The python runtime versions are tested elsewhere. runtime=mock.ANY, # The tag format is tested elsewhere. tags=mock.ANY, timeout=constants.DEFAULT_LAMBDA_TIMEOUT, zip_contents=b'package contents', ) def test_can_create_scheduled_events(self, sample_app): @sample_app.schedule('rate(1 hour)') def foo(event): pass config = self.create_config_obj(sample_app) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) self.aws_client.lambda_function_exists.return_value = False self.aws_client.get_or_create_rule_arn.return_value = 'rule-arn' self.aws_client.create_function.side_effect = [ self.lambda_arn, 'arn:event-function'] deployed = deployer.deploy(config, None, stage_name='dev') assert 'lambda_functions' in deployed assert deployed['lambda_functions'] == { 'myapp-dev-foo': {'arn': 'arn:event-function', 'type': 'scheduled_event'} } self.aws_client.create_function.assert_called_with( environment_variables={}, function_name='myapp-dev-foo', handler='app.foo', memory_size=constants.DEFAULT_LAMBDA_MEMORY_SIZE, role_arn='role-arn', # The python runtime versions are tested elsewhere. runtime=mock.ANY, # The tag format is tested elsewhere. tags=mock.ANY, timeout=constants.DEFAULT_LAMBDA_TIMEOUT, zip_contents=b'package contents', ) self.aws_client.get_or_create_rule_arn.assert_called_with( 'myapp-dev-foo', 'rate(1 hour)') self.aws_client.connect_rule_to_lambda.assert_called_with( 'myapp-dev-foo', 'arn:event-function') self.aws_client.add_permission_for_scheduled_event.assert_called_with( 'rule-arn', 'arn:event-function') def test_can_create_scheduled_events_with_obj(self, sample_app): @sample_app.schedule(Rate(value=1, unit=Rate.HOURS)) def foo(event): pass config = self.create_config_obj(sample_app) self.aws_client.lambda_function_exists.return_value = False self.aws_client.get_or_create_rule_arn.return_value = 'rule-arn' self.aws_client.create_function.side_effect = [ self.lambda_arn, 'arn:event-function'] deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(config, None, stage_name='dev') # For this test we just want to double check that the # Rate object was properly converted to a string value. self.aws_client.get_or_create_rule_arn.assert_called_with( 'myapp-dev-foo', 'rate(1 hour)') def test_can_create_pure_lambda_functions(self, sample_app): @sample_app.lambda_function() def foo(event, context): pass config = self.create_config_obj(sample_app) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) self.aws_client.lambda_function_exists.return_value = False self.aws_client.create_function.side_effect = [ self.lambda_arn, 'arn:foo-function'] deployed = deployer.deploy(config, None, stage_name='dev') assert 'lambda_functions' in deployed assert deployed['lambda_functions'] == { 'myapp-dev-foo': {'arn': 'arn:foo-function', 'type': 'pure_lambda'} } self.aws_client.create_function.assert_called_with( environment_variables={}, function_name='myapp-dev-foo', handler='app.foo', memory_size=constants.DEFAULT_LAMBDA_MEMORY_SIZE, role_arn='role-arn', # The python runtime versions are tested elsewhere. runtime=mock.ANY, # The tag format is tested elsewhere. tags=mock.ANY, timeout=constants.DEFAULT_LAMBDA_TIMEOUT, zip_contents=b'package contents', ) def test_can_update_auth_handlers(self, sample_app_with_auth): config = self.create_config_obj(sample_app_with_auth) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) self.aws_client.lambda_function_exists.return_value = True self.aws_client.update_function.return_value = { 'FunctionArn': 'arn:auth-function' } deployed = deployer.deploy(config, None, stage_name='dev') assert 'lambda_functions' in deployed assert deployed['lambda_functions'] == { 'myapp-dev-myauth': {'arn': 'arn:auth-function', 'type': 'authorizer'} } self.aws_client.update_function.assert_called_with( environment_variables={}, function_name='myapp-dev-myauth', memory_size=constants.DEFAULT_LAMBDA_MEMORY_SIZE, role_arn='role-arn', # The python runtime versions are tested elsewhere. runtime=mock.ANY, # The tag format is tested elsewhere. tags=mock.ANY, timeout=constants.DEFAULT_LAMBDA_TIMEOUT, zip_contents=b'package contents', ) def test_can_create_auth_with_different_config(self, sample_app_with_auth): # We're not using create_config_obj because we want to approximate # loading config from disk which contains per-lambda configuration. disk_config = { 'app_name': 'myapp', 'iam_role_arn': 'role-arn', 'manage_iam_role': False, 'stages': { 'dev': { 'lambda_timeout': 10, 'lambda_memory_size': 128, 'lambda_functions': { 'myauth': { 'lambda_timeout': 20, 'lambda_memory_size': 512, } } } } } config = Config( 'dev', config_from_disk=disk_config, user_provided_params={'chalice_app': sample_app_with_auth, 'project_dir': '.'} ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) self.aws_client.lambda_function_exists.return_value = False self.aws_client.create_function.side_effect = [ self.lambda_arn, 'arn:auth-function'] deployer.deploy(config, None, stage_name='dev') create_function_calls = self.aws_client.create_function.call_args_list assert create_function_calls == [ mock.call( environment_variables={}, function_name='myapp-dev', handler='app.app', role_arn='role-arn', runtime=mock.ANY, tags=mock.ANY, zip_contents=b'package contents', # These come frmo the stage level config above. timeout=10, memory_size=128, ), mock.call( environment_variables={}, function_name='myapp-dev-myauth', handler='app.myauth', role_arn='role-arn', runtime=mock.ANY, tags=mock.ANY, zip_contents=b'package contents', # These come from the 'lambda_functions.myauth' section # in the config above. timeout=20, memory_size=512, ) ] def test_unreferenced_functions_are_deleted(self, sample_app_with_auth): # Existing resources is the set of resources that have # *previously* been deployed. existing_lambda_functions = { 'old-function': 'arn:not-referenced-anymore', } existing = DeployedResources( 'api', 'api-handler-arn', 'api-handler-name', 'existing-id', 'dev', None, None, existing_lambda_functions) self.aws_client.lambda_function_exists.return_value = True self.aws_client.update_function.return_value = { 'FunctionArn': 'arn:new-auth-function' } config = self.create_config_obj(sample_app_with_auth) self.aws_client.get_function_configuration.return_value = { 'Runtime': config.lambda_python_version, } deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployed = deployer.deploy(config, existing, stage_name='dev') # Because the "old-function" was not referenced in the update # function calls, we should expect that it was deleted. self.aws_client.delete_function.assert_called_with( 'arn:not-referenced-anymore') # And the old-arn is not in the deployed resources assert deployed['lambda_functions'] == { 'api-handler-name-myauth': { 'arn': 'arn:new-auth-function', 'type': 'authorizer' } } def test_lambda_deployer_defaults(self, sample_app): cfg = self.create_config_obj(sample_app) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, None, 'dev') self.aws_client.create_function.assert_called_with( function_name='myapp-dev', role_arn='role-arn', zip_contents=b'package contents', runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version) }, environment_variables={}, handler='app.app', timeout=60, memory_size=128 ) def test_lambda_deployer_with_environment_vars(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', environment_variables={'FOO': 'BAR'} ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, None, 'dev') self.aws_client.create_function.assert_called_with( function_name='myapp-dev', role_arn='role-arn', zip_contents=b'package contents', runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version) }, environment_variables={'FOO': 'BAR'}, handler='app.app', timeout=60, memory_size=128 ) def test_lambda_deployer_with_timeout_configured(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', lambda_timeout=120 ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, None, 'dev') self.aws_client.create_function.assert_called_with( function_name='myapp-dev', role_arn='role-arn', zip_contents=b'package contents', runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version) }, environment_variables={}, handler='app.app', timeout=120, memory_size=128 ) def test_lambda_deployer_with_memory_size_configured(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', lambda_memory_size=256 ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, None, 'dev') self.aws_client.create_function.assert_called_with( function_name='myapp-dev', role_arn='role-arn', zip_contents=b'package contents', runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version) }, environment_variables={}, handler='app.app', timeout=60, memory_size=256 ) def test_lambda_deployer_with_tags(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', tags={'mykey': 'myvalue'} ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, None, 'dev') self.aws_client.create_function.assert_called_with( function_name='myapp-dev', role_arn='role-arn', zip_contents=b'package contents', runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version), 'mykey': 'myvalue' }, environment_variables={}, handler='app.app', timeout=60, memory_size=128 ) class TestLambdaUpdateDeploymentWithConfigurations(object): @fixture(autouse=True) def setup_deployer_dependencies(self, app_policy, ui): # This autouse fixture is used instead of ``setup_method`` because it: # * Is ran for every test # * Allows additional fixtures to be passed in to reduce the number # of fixtures that need to be supplied for the test methods. # * ``setup_method`` is called before fixtures get applied so # they cannot be applied to the ``setup_method`` or you will # will get a TypeError for too few arguments. self.package_name = 'packages.zip' self.package_contents = b'package contents' self.lambda_arn = 'lambda-arn' self.lambda_function_name = 'lambda_function_name' self.osutils = InMemoryOSUtils( {self.package_name: self.package_contents}) self.aws_client = mock.Mock(spec=TypedAWSClient) self.aws_client.lambda_function_exists.return_value = True self.aws_client.update_function.return_value = { 'FunctionArn': self.lambda_arn} self.aws_client.get_function_configuration.return_value = { 'Runtime': 'python2.7', } self.ui = ui self.ui.confirm.return_value = True self.packager = mock.Mock(spec=LambdaDeploymentPackager) self.packager.create_deployment_package.return_value =\ self.package_name self.app_policy = app_policy self.deployed_resources = DeployedResources( 'api', 'api_handler_arn', self.lambda_function_name, None, 'dev', None, None, {}) def test_lambda_deployer_defaults(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, self.deployed_resources, 'dev') self.aws_client.update_function.assert_called_with( function_name=self.lambda_function_name, zip_contents=self.package_contents, runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version), }, environment_variables={}, timeout=60, memory_size=128, role_arn='role-arn' ) def test_lambda_deployer_with_environment_vars(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', environment_variables={'FOO': 'BAR'} ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, self.deployed_resources, 'dev') self.aws_client.update_function.assert_called_with( function_name=self.lambda_function_name, zip_contents=self.package_contents, runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version), }, environment_variables={'FOO': 'BAR'}, timeout=60, memory_size=128, role_arn='role-arn' ) def test_lambda_deployer_with_timeout_configured(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', lambda_timeout=120 ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, self.deployed_resources, 'dev') self.aws_client.update_function.assert_called_with( function_name=self.lambda_function_name, zip_contents=self.package_contents, runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version), }, environment_variables={}, timeout=120, memory_size=128, role_arn='role-arn' ) def test_lambda_deployer_with_memory_size_configured(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', lambda_memory_size=256 ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, self.deployed_resources, 'dev') self.aws_client.update_function.assert_called_with( function_name=self.lambda_function_name, zip_contents=self.package_contents, runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version), }, environment_variables={}, timeout=60, memory_size=256, role_arn='role-arn' ) def test_lambda_deployer_with_tags(self, sample_app): cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=False, iam_role_arn='role-arn', project_dir='.', tags={'mykey': 'myvalue'} ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, self.app_policy) deployer.deploy(cfg, self.deployed_resources, 'dev') self.aws_client.update_function.assert_called_with( function_name=self.lambda_function_name, zip_contents=self.package_contents, runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version), 'mykey': 'myvalue' }, environment_variables={}, timeout=60, memory_size=128, role_arn='role-arn' ) def test_update_lambda_updates_role_once(self, sample_app): app_policy = mock.Mock(spec=ApplicationPolicyHandler) app_policy.generate_policy_from_app_source.return_value = { 'Version': '2012-10-17', 'Statement': [] } app_policy.load_last_policy.return_value = { 'Version': '2012-10-17', 'Statement': [] } cfg = Config.create( chalice_stage='dev', app_name='myapp', chalice_app=sample_app, manage_iam_role=True, iam_role_arn='role-arn', project_dir='.', tags={'mykey': 'myvalue'} ) deployer = LambdaDeployer( self.aws_client, self.packager, self.ui, self.osutils, app_policy) self.aws_client.get_role_arn_for_name.return_value = 'role-arn' deployer.deploy(cfg, self.deployed_resources, 'dev') self.aws_client.update_function.assert_called_with( function_name=self.lambda_function_name, zip_contents=self.package_contents, runtime=cfg.lambda_python_version, tags={ 'aws-chalice': 'version=%s:stage=dev:app=myapp' % ( chalice_version), 'mykey': 'myvalue' }, environment_variables={}, timeout=60, memory_size=128, role_arn='role-arn' ) self.aws_client.put_role_policy.assert_called_with( policy_document={'Version': '2012-10-17', 'Statement': []}, policy_name='lambda_function_name', role_name='lambda_function_name' ) assert self.aws_client.put_role_policy.call_count == 1
from vnpy.app.cta_strategy import ( CtaTemplate, StopOrder, TickData, BarData, TradeData, OrderData, BarGenerator, ArrayManager, ) from datetime import datetime as dt from loguru import logger import numpy as np class MStragety1(CtaTemplate): author = "mc" fast_window = 10 slow_window = 20 fast_ma0 = 0.0 fast_ma1 = 0.0 slow_ma0 = 0.0 slow_ma1 = 0.0 count = 0 signal_count = 99999999 last_trade_price = 0 parameters = ["fast_window", "slow_window"] variables = ["fast_ma0", "fast_ma1", "slow_ma0", "slow_ma1"] def __init__(self, cta_engine, strategy_name, vt_symbol, setting): """""" super().__init__(cta_engine, strategy_name, vt_symbol, setting) self.bg = BarGenerator(self.on_bar) self.am = ArrayManager() self.close_time = "1450" # 下午1450之后平仓 self.open_time = "0900" # 早上9点开始交易,只做白盘 def trade_status(self, bar): if bar.datetime.time().strftime("%H%M") > self.close_time: return "end" elif bar.datetime.time().strftime("%H%M") > self.open_time: return "start" else: return "hold" def on_init(self): """ Callback when strategy is inited. """ self.write_log("策略初始化") self.load_bar(10) def on_start(self): """ Callback when strategy is started. """ self.write_log("策略启动") self.put_event() def on_stop(self): """ Callback when strategy is stopped. """ self.write_log("策略停止") self.put_event() def on_tick(self, tick: TickData): """ Callback of new tick data update. """ self.bg.update_tick(tick) def on_bar(self, bar: BarData): """ Callback of new bar data update. """ self.count += 1 am = self.am am.update_bar(bar) if not am.inited: return trade_status = self.trade_status(bar) if trade_status == "end" and self.pos>0: self.cut(bar.close_price, self.pos) if trade_status == "hold": if self.pos != 0: logger.warning("夜盘持仓,仓位为{}".format(self.pos)) self.cut(bar.close_price, self.pos) if trade_status == "start": high = am.high low = am.low low_diff1 = low[-2] - low[-3] low_diff2 = low[-2] - low[-1] high_diff1 = high[-2] - high[-3] high_diff2 = high[-2] - high[-1] ema_20 = am.ema(20, array=True)[-1] long_signal = low_diff1 < 0 and low_diff2 < 0 and high_diff1 < 0 and high_diff2 < 0 and bar.close_price > ema_20 short_signal = low_diff1 > 0 and low_diff2 > 0 and high_diff1 > 0 and high_diff2 > 0 and bar.close_price < ema_20 if long_signal: self.signal_count = self.count if self.pos == 0: self.buy(bar.close_price, 1) elif self.pos < 0: self.cover(bar.close_price, self.pos) self.buy(bar.close_price, 1) self.last_trade_price = bar.close_price elif short_signal: self.signal_count = self.count if self.pos == 0: self.short(bar.close_price, 1) elif self.pos > 0: self.sell(bar.close_price, self.pos) self.short(bar.close_price, 1) self.last_trade_price = bar.close_price else: if self.pos > 0: ret = bar.close_price - self.last_trade_price if ret >= 10: self.cut(bar.close_price, self.pos) elif ret <= -5: self.cut(bar.close_price, self.pos) # 2分钟未盈利,止损 if self.count - self.signal_count >= 2: if bar.close_price <= self.last_trade_price: self.cut(bar.close_price, self.pos) self.put_event() def on_order(self, order: OrderData): """ Callback of new order data update. """ pass def on_trade(self, trade: TradeData): """ Callback of new trade data update. """ self.put_event() def on_stop_order(self, stop_order: StopOrder): """ Callback of stop order update. """ pass
"""Poisson image editing. """ import numpy as np import os import cv2 import scipy.sparse import pickle from scipy.sparse.linalg import spsolve from os import path def laplacian_matrix(n, m): """Generate the Poisson matrix. Refer to: https://en.wikipedia.org/wiki/Discrete_Poisson_equation Note: it's the transpose of the wiki's matrix """ mat_D = scipy.sparse.lil_matrix((m, m)) mat_D.setdiag(-1, -1) mat_D.setdiag(4) mat_D.setdiag(-1, 1) mat_A = scipy.sparse.block_diag([mat_D] * n).tolil() mat_A.setdiag(-1, 1*m) mat_A.setdiag(-1, -1*m) return mat_A def poisson_edit(source, target, mask): """The poisson blending function. Refer to: Perez et. al., "Poisson Image Editing", 2003. """ # Assume: # target is not smaller than source. # shape of mask is same as shape of target. y_max, x_max = target.shape[:-1] y_min, x_min = 0, 0 x_range = x_max - x_min y_range = y_max - y_min mask = mask[y_min:y_max, x_min:x_max] mask[mask != 0] = 1 mat_A = laplacian_matrix(y_range, x_range) laplacian = mat_A.tocoo() # set the region outside the mask to identity for y in range(1, y_range - 1): for x in range(1, x_range - 1): if mask[y, x] == 0: k = x + y * x_range mat_A[k, k] = 1 mat_A[k, k + 1] = 0 mat_A[k, k - 1] = 0 mat_A[k, k + x_range] = 0 mat_A[k, k - x_range] = 0 mat_A = mat_A.tocoo() mask_flat = mask.flatten() for channel in range(source.shape[2]): source_flat = source[y_min:y_max, x_min:x_max, channel].flatten() target_flat = target[y_min:y_max, x_min:x_max, channel].flatten() # inside the mask: # \Delta f = div v = \Delta g alpha = 1 mat_b = laplacian.dot(source_flat)*alpha # outside the mask: # f = t mat_b[mask_flat==0] = target_flat[mask_flat==0] x = spsolve(mat_A, mat_b) x = x.reshape((y_range, x_range)) x[x > 255] = 255 x[x < 0] = 0 x = x.astype('uint8') target[y_min:y_max, x_min:x_max, channel] = x return target def main(): before_dir = '../Datasets_Makeup/before_aligned_600' test_imgs_tmp = '../Datasets_Makeup/test_imgs_tmp' poisson_res_dir = '../Datasets_Makeup/test_imgs_poisson' api_landmarks = pickle.load(open('../Datasets_Makeup/landmark_aligned_600.pk', 'rb')) for i, before_name in enumerate(os.listdir(before_dir)): lmks = api_landmarks['before_aligned_600/' + before_name].astype(int) before_img = cv2.imread(before_dir + '/' + before_name) prefix = before_name.split('.')[0] for target_name in os.listdir(test_imgs_tmp): source = cv2.imread(test_imgs_tmp + '/' + target_name + '/' + prefix + '_fake_after.png') target = cv2.imread(test_imgs_tmp + '/' + target_name + '/' + prefix + '_before.png') mask = cv2.imread(test_imgs_tmp + '/' + target_name + '/' + prefix + '_mask.png', cv2.IMREAD_GRAYSCALE) result = poisson_edit(source, target, mask) eye_area = [9, 10, 11, 19, 84, 29, 79, 28, 24, 73, 70, 75, 74, 13, 15, 14, 22] top_left = [min(lmks[eye_area, 0]), min(lmks[eye_area, 1])] top_right = [max(lmks[eye_area, 0]), max(lmks[eye_area, 1])] before_img[top_left[1]:top_right[1], top_left[0]:top_right[0], :] = result os.makedirs(path.join(poisson_res_dir, target_name), exist_ok=True) cv2.imwrite(path.join(poisson_res_dir, target_name, prefix + "_possion.png"), before_img) print('%d th image generated!' % (i)) if __name__ == '__main__': main()
# Copyright 2020 Google LLC, Stanislav Khrapov # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ """ from functools import wraps import json import logging import re import curlify import requests from shared.datastore.service import Service URL_SSO_LOGIN = "https://sso.garmin.com/sso/signin" URL_BASE = 'https://connect.garmin.com' URL_MODERN = URL_BASE + '/modern' URL_ACTIVITIES = URL_MODERN + '/proxy/usersummary-service/usersummary/daily/' URL_HEARTRATES = URL_MODERN + '/proxy/wellness-service/wellness/dailyHeartRate/' URL_BODY_COMPOSITION = URL_MODERN + '/proxy/weight-service/weight/daterangesnapshot' URL_USER_WEIGHT = URL_BASE + '/proxy/weight-service/user-weight' HEADERS = { 'User-Agent': ( 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) ' + 'Chrome/79.0.3945.88 Safari/537.36' ), 'origin': 'https://sso.garmin.com', 'nk': 'NT', # Needed for user-weight, for some reason. } class Error(Exception): pass class SessionExpiredError(Error): pass def create(service): if not Service.has_credentials(service, required_key='password'): raise Exception('Cannot create Garmin client without creds: %s' % (service,)) creds = service.get('credentials', {}) session_state = creds.get('session_state', {}) def refresh_callback(session_state): logging.debug('Garmin creds refresh for: %s', service.key) Service.update_credentials(service, {'session_state': session_state}) garmin = Garmin( creds['username'], Service.get_credentials_password(creds), refresh_callback=refresh_callback, ) try: garmin.set_session_state(**session_state) except ValueError: logging.exception('Invalid session_state, ignoring') del creds['session_state'] return garmin def require_session(client_function): @wraps(client_function) def check_session(*args, **kwargs): client_object = args[0] if not (client_object._session and client_object.profile): logging.debug('No session established. Logging in.') client_object.login() try: return client_function(*args, **kwargs) except SessionExpiredError: logging.debug('Retrying (once) after login.') client_object.login() return client_function(*args, **kwargs) return check_session class Garmin(object): def __init__(self, username, password, refresh_callback=None): self._username = username self._password = password self._refresh_callback = refresh_callback self._session = None self._preferences = None self.profile = None def set_session_state(self, cookies=None, profile=None, preferences=None): if cookies or profile or preferences: if None in (cookies and profile and preferences): raise ValueError( 'Must pass all or nothing. cookies: %s, profile: %s, preferences: %s', cookies, profile, preferences, ) self._session = requests.Session() self._session.headers.update(HEADERS) if cookies: self._session.cookies.update(cookies) self._preferences = preferences self.profile = profile def get_session_state(self): if not self._session: return None return { 'cookies': self._session.cookies.get_dict(), 'preferences': self._preferences, 'profile': self.profile, } def login(self): logging.debug('Garmin Login') self.set_session_state() try: self._authenticate() except Exception as err: # Clear the session. self.set_session_state() logging.debug('Clearing session and raising.') raise err finally: logging.debug('Finally calling refresh_callback.') if self._refresh_callback: self._refresh_callback(self.get_session_state()) logging.debug('Login complete') def _authenticate(self): params = { # 'webhost': URL_BASE, 'service': URL_MODERN, # 'source': URL_SSO_LOGIN, # 'redirectAfterAccountLoginUrl': URL_MODERN, # 'redirectAfterAccountCreationUrl': URL_MODERN, # 'gauthHost': URL_SSO_LOGIN, # 'locale': 'en_US', # 'id': 'gauth-widget', # 'cssUrl': 'https://static.garmincdn.com/com.garmin.connect/ui/css/gauth-custom-v1.2-min.css', # 'clientId': 'GarminConnect', # 'rememberMeShown': 'true', # 'rememberMeChecked': 'false', # 'createAccountShown': 'true', # 'openCreateAccount': 'false', # 'usernameShown': 'false', # 'displayNameShown': 'false', # 'consumeServiceTicket': 'false', # 'initialFocus': 'true', # 'embedWidget': 'false', # 'generateExtraServiceTicket': 'true', } data = { 'username': self._username, 'password': self._password, 'embed': 'false', # 'lt': 'e1s1', # '_eventId': 'submit', # 'displayNameRequired': 'false', } login_response = self._session.post(URL_SSO_LOGIN, params=params, data=data) logging.debug('SSO Request: %s', curlify.to_curl(login_response.request)) login_response.raise_for_status() auth_ticket_url = self._extract_auth_ticket_url(login_response.text) logging.debug("Extracted auth ticket url: %s", auth_ticket_url) auth_response = self._session.get(auth_ticket_url) logging.debug('Auth Request: %s', curlify.to_curl(auth_response.request)) auth_response.raise_for_status() # There is auth info in here needed in order to fetch other services. self._preferences = self._extract_json( auth_response.text, 'VIEWER_USERPREFERENCES' ) self.profile = self._extract_json(auth_response.text, 'SOCIAL_PROFILE') @staticmethod def _extract_json(html, key): """Find and return json data.""" found = re.search(key + r" = JSON.parse\(\"(.*)\"\);", html, re.M) if found: text = found.group(1).replace('\\"', '"') return json.loads(text) @staticmethod def _extract_auth_ticket_url(auth_response): """Extracts an authentication ticket URL from the response of an authentication form submission. The auth ticket URL is typically of form: https://connect.garmin.com/modern?ticket=ST-0123456-aBCDefgh1iJkLmN5opQ9R-cas :param auth_response: HTML response from an auth form submission. """ match = re.search(r'response_url\s*=\s*"(https:[^"]+)"', auth_response) if not match: raise RuntimeError( "auth failure: unable to extract auth ticket URL. did you provide a correct username/password?" ) auth_ticket_url = match.group(1).replace("\\", "") return auth_ticket_url @require_session def get_body_comp(self, start_date, end_date=None): # 'YYY-mm-dd' end_date = end_date if end_date else start_date url = URL_BODY_COMPOSITION + '?startDate=' + start_date + '&endDate=' + end_date return self._get(url) @require_session def get_stats(self, start_date): # cDate = 'YYY-mm-dd' url = ( URL_ACTIVITIES + self.profile['displayName'] + '?' + 'calendarDate=' + start_date ) return self._get(url) @require_session def set_weight(self, weight, weight_date): url = URL_USER_WEIGHT weight_date = weight_date.replace(tzinfo=None) return self._post( url, json={ 'value': weight, 'unitKey': 'kg', 'date': weight_date.date().isoformat(), 'gmtTimestamp': weight_date.isoformat() + '.00', }, ) def _get(self, url): logging.debug('Fetching: %s', url) response = self._session.get(url) logging.info( 'Response code %s, and json %s', response.status_code, response.text, ) logging.debug('Request: %s', curlify.to_curl(response.request)) if response.status_code == 403: raise SessionExpiredError('Login expired') elif response.status_code == 204: return None else: response.raise_for_status() # One last check: Its a weird behavior, this one... # Kind of like a 403, only not. if response.json().get('privacyProtected'): raise SessionExpiredError('Login expired') return response.json() def _post(self, url, json=None): logging.debug('Posting: %s', url) response = self._session.post(url, json=json) logging.info('Response code %s, and %s', response.status_code, response.text) logging.debug('Request: %s', curlify.to_curl(response.request)) if response.status_code == 403: raise SessionExpiredError('Login expired') elif response.status_code == 204: return None else: response.raise_for_status() return None
# -*- coding: utf-8 -*- """ Showcases reflectance recovery computations using *Jakob et al. (2019)* method. """ import numpy as np import colour from colour.utilities import message_box message_box('"Jakob et al. (2019)" - Reflectance Recovery Computations') illuminant = colour.SDS_ILLUMINANTS['D65'] XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) message_box('Recovering reflectance using "Jakob et al. (2019)" method from ' 'given "XYZ" tristimulus values:\n' '\n\tXYZ: {0}'.format(XYZ)) sd = colour.XYZ_to_sd(XYZ, method='Jakob 2019') print(sd) print(colour.recovery.XYZ_to_sd_Jakob2019(XYZ)) print(colour.sd_to_XYZ(sd, illuminant=illuminant) / 100) print('\n') message_box('Generating a LUT according to "Jakob et al. (2019)" method ' 'for "sRGB" colourspace:') LUT = colour.recovery.LUT3D_Jakob2019() LUT.generate(colour.models.RGB_COLOURSPACE_sRGB, size=5) RGB = np.array([0.70573936, 0.19248266, 0.22354169]) print(LUT.RGB_to_sd(RGB))
"""empty message Revision ID: cc3b0f0860d5 Revises: 5871655bb18f Create Date: 2020-07-23 18:41:02.015852 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql # revision identifiers, used by Alembic. revision = 'cc3b0f0860d5' down_revision = '5871655bb18f' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('home_truths', 'created_at', existing_type=mysql.DATETIME(), nullable=True) op.alter_column('policy', 'created_at', existing_type=mysql.DATETIME(), nullable=True) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('policy', 'created_at', existing_type=mysql.DATETIME(), nullable=False) op.alter_column('home_truths', 'created_at', existing_type=mysql.DATETIME(), nullable=False) # ### end Alembic commands ###
# Objetos en Python por modulos # # Debido a que Pyside emplea solo objetos en su estructura profundizare un # poco en el empleo de objetos. Y ahora voy a partir el archivo en varios para # tener una especie de libreria de clases. # # Notas: # - Ver que el archivo Clases se llama como si fuera un modulo cualquiera # - Ver que para poder crear los objetos me toca usar la sintaxis # Modulo.Clase(args) import os import Clases # Borro la pantalla os.system('clear') # Creo mi objeto print "PROBANDO LA PRIMER CLASE: Calculadora\n" num1 = raw_input("Ingrese el primer numero: ") num2 = raw_input("Ingrese el segundo numero: ") calculo1 = Clases.Calculadora(num1,num2) # Realizo operaciones calculo1.suma() calculo1.resta() calculo1.multiplica() calculo1.divide() calculo1.potencia() calculo1.raiz() # Creo un objeto de constantes print "\n\nPROBANDO LA SEGUNDA CLASE: Constantes\n" const1 = Clases.Constantes() print const1.pi print const1.e # Creo un objeto de la clase Calculadora2 que hereda de Calculadora1 todos los metodos print "\n\nPROBANDO LA TERCER CLASE: Calculadora2\n" num1 = raw_input("Ingrese el primer numero: ") num2 = raw_input("Ingrese el segundo numero: ") calculodecolor1 = Clases.Calculadora2("calculodecolor1", num1, num2, "verde") calculodecolor1.presentarcolor() calculodecolor1.suma() raw_input("\nPresione Enter para salir...")
''' Get the highest answer rate question from a table survey_log with these columns: uid, action, question_id, answer_id, q_num, timestamp. uid means user id; action has these kind of values: "show", "answer", "skip"; answer_id is not null when action column is "answer", while is null for "show" and "skip"; q_num is the numeral order of the question in current session. Write a sql query to identify the question which has the highest answer rate. Example: Input: +------+-----------+--------------+------------+-----------+------------+ | uid | action | question_id | answer_id | q_num | timestamp | +------+-----------+--------------+------------+-----------+------------+ | 5 | show | 285 | null | 1 | 123 | | 5 | answer | 285 | 124124 | 1 | 124 | | 5 | show | 369 | null | 2 | 125 | | 5 | skip | 369 | null | 2 | 126 | +------+-----------+--------------+------------+-----------+------------+ Output: +-------------+ | survey_log | +-------------+ | 285 | +-------------+ Explanation: question 285 has answer rate 1/1, while question 369 has 0/1 answer rate, so output 285. Note: The highest answer rate meaning is: answer number's ratio in show number in the same question. ''' # Write your MySQL query statement below select question_id as survey_log from ( select question_id, sum(case when action="show" then 1 else 0 end) as num_show, sum(case when action="answer" then 1 else 0 end) as num_answer from survey_log group by question_id ) as tbl order by (num_answer / num_show) desc limit 1
''' BEGIN GPL LICENSE BLOCK This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. END GPL LICENSE BLOCK ''' from copy import deepcopy from math import degrees, radians, pi import bpy import bmesh import bgl import blf from mathutils import geometry, Euler, Quaternion, Vector from bpy_extras import view3d_utils from bpy_extras.view3d_utils import location_3d_to_region_2d as loc3d_to_reg2d from bpy_extras.view3d_utils import region_2d_to_vector_3d as reg2d_to_vec3d from bpy_extras.view3d_utils import region_2d_to_location_3d as reg2d_to_loc3d from bpy_extras.view3d_utils import region_2d_to_origin_3d as reg2d_to_org3d # "Constant" values ( X, Y, Z, CLICK_CHECK, WAIT_FOR_POPUP, GET_0_OR_180, DO_TRANSFORM, MOVE, SCALE, ROTATE, ) = range(10) # globals curr_meas_stor = 0.0 new_meas_stor = None popup_active = False #print("Loaded add-on.\n") # debug class Colr: red = 1.0, 0.0, 0.0, 0.6 green = 0.0, 1.0, 0.0, 0.6 blue = 0.0, 0.0, 1.0, 0.6 white = 1.0, 1.0, 1.0, 1.0 grey = 1.0, 1.0, 1.0, 0.4 black = 0.0, 0.0, 0.0, 1.0 yellow = 1.0, 1.0, 0.0, 0.6 brown = 0.15, 0.15, 0.15, 0.20 class RotDat: placeholder = True # Refreshes mesh drawing in 3D view and updates mesh coordinate # data so ref_pts are drawn at correct locations. # Using editmode_toggle to do this seems hackish, but editmode_toggle seems # to be the only thing that updates both drawing and coordinate info. def editmode_refresh(): if bpy.context.mode == "EDIT_MESH": bpy.ops.object.editmode_toggle() bpy.ops.object.editmode_toggle() def backup_blender_settings(): backup = [ deepcopy(bpy.context.tool_settings.use_snap), deepcopy(bpy.context.tool_settings.snap_element), deepcopy(bpy.context.tool_settings.snap_target), deepcopy(bpy.context.space_data.pivot_point), deepcopy(bpy.context.space_data.transform_orientation), deepcopy(bpy.context.space_data.show_manipulator), deepcopy(bpy.context.scene.cursor.location)] return backup def init_blender_settings(): bpy.context.tool_settings.use_snap = False bpy.context.tool_settings.snap_element = 'VERTEX' bpy.context.tool_settings.snap_target = 'CLOSEST' bpy.context.space_data.pivot_point = 'ACTIVE_ELEMENT' bpy.context.space_data.transform_orientation = 'GLOBAL' bpy.context.space_data.show_manipulator = False return def restore_blender_settings(backup): bpy.context.tool_settings.use_snap = deepcopy(backup[0]) bpy.context.tool_settings.snap_element = deepcopy(backup[1]) bpy.context.tool_settings.snap_target = deepcopy(backup[2]) bpy.context.space_data.pivot_point = deepcopy(backup[3]) bpy.context.space_data.transform_orientation = deepcopy(backup[4]) bpy.context.space_data.show_manipulator = deepcopy(backup[5]) bpy.context.scene.cursor.location = deepcopy(backup[6]) return def flts_alm_eq(flt_a, flt_b): tol = 0.0001 return flt_a > (flt_b - tol) and flt_a < (flt_b + tol) # todo : replace with flt_lists_alm_eq? def vec3s_alm_eq(vec_a, vec_b): X, Y, Z = 0, 1, 2 if flts_alm_eq(vec_a[X], vec_b[X]): if flts_alm_eq(vec_a[Y], vec_b[Y]): if flts_alm_eq(vec_a[Z], vec_b[Z]): return True return False # assume both float lists are same size? def flt_lists_alm_eq(ls_a, ls_b): for i in range(len(ls_a)): if not flts_alm_eq(ls_a[i], ls_b[i]): return False return True class MenuStore: def __init__(self): self.cnt = 0 self.active = 0 # unused ? # todo : replace above with self.current ? self.txtcolrs = [] self.tcoords = [] self.texts = [] self.arrows = [] # arrow coordinates class MenuHandler: def __init__(self, title, tsize, act_colr, dis_colr, toolwid, reg): self.dpi = bpy.context.preferences.system.dpi self.title = title # todo : better solution than None "magic numbers" self.menus = [None] # no menu for 0 self.menu_cnt = len(self.menus) self.current = 0 # current active menu self.tsize = tsize # text size self.act_colr = act_colr self.dis_colr = dis_colr # disabled color self.reg = reg # region view_offset = 36, 45 # box left top start self.box_y_pad = 8 # vertical space between boxes fontid = 0 blf.size(fontid, tsize, self.dpi) lcase_wid, lcase_hgt = blf.dimensions(fontid, "n") ucase_wid, ucase_hgt = blf.dimensions(fontid, "N") bot_space = blf.dimensions(fontid, "gp")[1] - lcase_hgt self.full_hgt = blf.dimensions(fontid, "NTgp")[1] arr_wid, arr_hgt = 12, 16 arrow_base = (0, 0), (0, arr_hgt), (arr_wid, arr_hgt/2) aw_adj, ah_adj = arr_wid * 1.5, (arr_hgt - ucase_hgt) / 2 self.arrow_pts = [] for a in arrow_base: self.arrow_pts.append((a[0] - aw_adj, a[1] - ah_adj)) self.blef = view_offset[0] + toolwid # box left start self.titlco = self.blef // 2, self.reg.height - view_offset[1] self.btop = self.titlco[1] - (self.full_hgt // 1.5) self.txt_y_pad = bot_space * 2 def add_menu(self, strings): self.menus.append(MenuStore()) new = self.menus[-1] btop = self.btop tlef = self.blef # text left new.cnt = len(strings) for i in range(new.cnt): new.txtcolrs.append(self.dis_colr) new.texts.append(strings[i]) bbot = btop - self.full_hgt new.tcoords.append((tlef, bbot)) btop = bbot - self.box_y_pad new.arrows.append(( (self.arrow_pts[0][0] + tlef, self.arrow_pts[0][1] + bbot), (self.arrow_pts[1][0] + tlef, self.arrow_pts[1][1] + bbot), (self.arrow_pts[2][0] + tlef, self.arrow_pts[2][1] + bbot))) new.txtcolrs[new.active] = self.act_colr self.menu_cnt += 1 def update_active(self, change): menu = self.menus[self.current] if menu is None: return menu.txtcolrs[menu.active] = self.dis_colr menu.active = (menu.active + change) % menu.cnt menu.txtcolrs[menu.active] = self.act_colr def change_menu(self, new): self.current = new def get_mode(self): menu = self.menus[self.current] return menu.texts[menu.active] #def rebuild_menus(self) # add in case blender window size changes? # return def draw(self, menu_visible): menu = self.menus[self.current] # prepare to draw text font_id = 0 blf.size(font_id, self.tsize, self.dpi) # draw title bgl.glColor4f(*self.dis_colr) blf.position(font_id, self.titlco[0], self.titlco[1], 0) blf.draw(font_id, self.title) # draw menu if menu_visible and menu is not None: for i in range(menu.cnt): bgl.glColor4f(*menu.txtcolrs[i]) blf.position(font_id, menu.tcoords[i][0], menu.tcoords[i][1], 0) blf.draw(font_id, menu.texts[i]) # draw arrow bgl.glEnable(bgl.GL_BLEND) bgl.glColor4f(*self.act_colr) bgl.glBegin(bgl.GL_LINE_LOOP) for p in menu.arrows[menu.active]: bgl.glVertex2f(*p) bgl.glEnd() # === 3D View mouse location and button code === class ViewButton(): def __init__(self, colr_on, colr_off, txt_sz, txt_colr, offs=(0, 0)): self.dpi = bpy.context.preferences.system.dpi self.is_drawn = False self.ms_over = False # mouse over button self.wid = 0 self.coords = None #self.co_outside_btn = None self.co2d = None self.colr_off = colr_off # colr when mouse not over button self.colr_on = colr_on # colr when mouse over button self.txt = "" self.txt_sz = txt_sz self.txt_colr = txt_colr self.txt_co = None self.offset = Vector(offs) # Set button height and text offsets (to determine where text would # be placed within button). Done in __init__ as this will not change # during program execution and prevents having to recalculate these # values every time text is changed. font_id = 0 blf.size(font_id, self.txt_sz, self.dpi) samp_txt_max = "Tgp" # text with highest and lowest pixel values x, max_y = blf.dimensions(font_id, samp_txt_max) y = blf.dimensions(font_id, "T")[1] # T = sample text y_diff = (max_y - y) self.hgt = int(max_y + (y_diff * 2)) self.txt_x_offs = int(x / (len(samp_txt_max) * 2) ) self.txt_y_offs = int(( self.hgt - y) / 2) + 1 # added 1 to txt_y_offs to compensate for possible int rounding # replace text string and update button width def set_text(self, txt): font_id = 0 self.txt = txt blf.size(font_id, self.txt_sz, self.dpi) w = blf.dimensions(font_id, txt)[0] # get text width self.wid = w + (self.txt_x_offs * 2) return def set_btn_coor(self, co2d): #offs_2d = Vector((-self.wid / 2, 25)) offs_2d = Vector((-self.wid / 2, 0)) new2d = co2d + offs_2d # co_bl == coordinate bottom left, co_tr == coordinate top right co_bl = new2d[0], new2d[1] co_tl = new2d[0], new2d[1] + self.hgt co_tr = new2d[0] + self.wid, new2d[1] + self.hgt co_br = new2d[0] + self.wid, new2d[1] self.coords = co_bl, co_tl, co_tr, co_br self.txt_co = new2d[0] + self.txt_x_offs, new2d[1] + self.txt_y_offs self.ms_chk = co_bl[0], co_tr[0], co_bl[1], co_tr[1] def pt_inside_btn2(self, mouse_co): mx, my = mouse_co[0], mouse_co[1] if mx < self.ms_chk[0] or mx > self.ms_chk[1]: return False if my < self.ms_chk[2] or my > self.ms_chk[3]: return False return True def draw_btn(self, btn_loc, mouse_co, highlight_mouse=False): if btn_loc is not None: offs_loc = btn_loc + self.offset font_id = 0 colr = self.colr_off self.set_btn_coor(offs_loc) if self.pt_inside_btn2(mouse_co): colr = self.colr_on self.ms_over = True else: self.ms_over = False # draw button box bgl.glColor4f(*colr) bgl.glBegin(bgl.GL_LINE_STRIP) for coord in self.coords: bgl.glVertex2f(coord[0], coord[1]) bgl.glVertex2f(self.coords[0][0], self.coords[0][1]) bgl.glEnd() # draw outline around button box if highlight_mouse and self.ms_over: bgl.glColor4f(*self.colr_off) HO = 4 # highlight_mouse offset offs = (-HO, -HO), (-HO, HO), (HO, HO), (HO, -HO) bgl.glBegin(bgl.GL_LINE_STRIP) for i, coord in enumerate(self.coords): bgl.glVertex2f(coord[0] + offs[i][0], coord[1] + offs[i][1]) bgl.glVertex2f(self.coords[0][0] + offs[0][0], self.coords[0][1] + offs[0][1]) bgl.glEnd() # draw button text bgl.glColor4f(*self.txt_colr) blf.size(font_id, self.txt_sz, self.dpi) blf.position(font_id, self.txt_co[0], self.txt_co[1], 0) blf.draw(font_id, self.txt) else: self.ms_over = False # Used for mod_pt mode class TempPoint(): def __init__(self): self.ls = [] # point list self.cnt = 0 self.co3d = None self.max_cnt = 50 def average(self): vsum = Vector() for p in self.ls: vsum += p self.co3d = vsum / self.cnt def find_pt(self, co3d): found_idx = None for i in range(self.cnt): if self.ls[i] == co3d: found_idx = i break return found_idx def rem_pt(self, idx): self.ls.pop(idx) self.cnt -= 1 if self.cnt > 0: self.average() else: self.co3d = None def try_add(self, co3d): found_idx = self.find_pt(co3d) if found_idx is None: if len(self.ls) < self.max_cnt: self.ls.append(co3d.copy()) self.cnt += 1 self.average() def reset(self, co3d): self.co3d = co3d.copy() self.ls = [co3d.copy()] self.cnt = 1 def get_co(self): return self.co3d.copy() def print_vals(self): # debug print("self.cnt:", self.cnt) print("self.ls:", self.cnt) print("self.co3d:", self.co3d) for i in range(self.cnt): print(" [" + str(i) + "]:", [self.ls[i]]) # Basically this is just a "wrapper" around a 3D coordinate (Vector type) # to centralize certain Reference Point features and make them easier to # work with. # note: if co3d is None, point does not "exist" class ReferencePoint: def __init__(self, ptype, colr, co3d=None): self.ptype = ptype # debug? self.colr = colr # color (tuple), for displaying point in 3D view self.co3d = co3d # 3D coordinate (Vector) # use this method to get co2d because "non-existing" points # will lead to a function call like this and throw an error: # loc3d_to_reg2d(reg, rv3d, None) def get_co2d(self): co2d = None if self.co3d is not None: reg = bpy.context.region rv3d = bpy.context.region_data co2d = loc3d_to_reg2d(reg, rv3d, self.co3d) return co2d def copy(self): return ReferencePoint( self.ptype, self.colr, self.co3d.copy() ) def print_vals(self): # debug print("self.ptype:", self.ptype) print("self.colr :", self.colr) print("self.co3d :", self.co3d) def init_ref_pts(self): self.pts = [ ReferencePoint("fre", Colr.green), ReferencePoint("anc", Colr.red), ReferencePoint("piv", Colr.yellow) ] # todo : move this part of initialization elsewhere? RotDat.piv_norm = None RotDat.new_ang_r = None RotDat.ang_diff_r = None RotDat.axis_lock = None RotDat.lock_pts = None RotDat.rot_pt_pos = None RotDat.rot_pt_neg = None RotDat.arc_pts = None def set_piv(self): #if self.pt_cnt == 2: if self.pt_cnt == 3: rpts = [p.co3d for p in self.pts] RotDat.piv_norm = geometry.normal(*rpts) def set_mouse_highlight(self): if self.pt_cnt < 3: self.highlight_mouse = True else: self.highlight_mouse = False def in_ref_pts(self, co3d, skip_idx=None): p_idxs = [0, 1, 2][:self.pt_cnt] # skip_idx so co3d is not checked against itself if skip_idx is not None: p_idxs.remove(skip_idx) found = False for i in p_idxs: if vec3s_alm_eq(self.pts[i].co3d, co3d): found = True self.swap_pt = i # todo : better solution than this break return found def add_pt(self, co3d): if not in_ref_pts(self, co3d): self.pts[self.pt_cnt].co3d = co3d self.pt_cnt += 1 self.menu.change_menu(self.pt_cnt) if self.pt_cnt > 1: updatelock_pts(self, self.pts) set_mouse_highlight(self) ''' Begin Debug cnt = self.pt_cnt - 1 pt_fnd_str = str(self.pts[cnt].co3d) pt_fnd_str = pt_fnd_str.replace("<Vector ", "Vector(") pt_fnd_str = pt_fnd_str.replace(">", ")") print("ref_pt_" + str(cnt) + ' =', pt_fnd_str) #print("ref pt added:", self.cnt, "cnt:", self.cnt+1) End Debug ''' def rem_ref_pt(self, idx): # hackery or smart, you decide... if idx != self.pt_cnt - 1: keep_idx = [0, 1, 2][:self.pt_cnt] keep_idx.remove(idx) for i in range(len(keep_idx)): self.pts[i].co3d = self.pts[keep_idx[i]].co3d.copy() self.pt_cnt -= 1 self.menu.change_menu(self.pt_cnt) # set "non-existing" points to None for j in range(self.pt_cnt, 3): self.pts[j].co3d = None if self.pt_cnt > 1: updatelock_pts(self, self.pts) else: RotDat.axis_lock = None self.highlight_mouse = True def add_select(self): if self.pt_cnt < 3: if bpy.context.mode == "OBJECT": if len(bpy.context.selected_objects) > 0: for obj in bpy.context.selected_objects: add_pt(self, obj.location.copy()) if self.pt_cnt > 2: break elif bpy.context.mode == "EDIT_MESH": m_w = bpy.context.edit_object.matrix_world bm = bmesh.from_edit_mesh(bpy.context.edit_object.data) if len(bm.select_history) > 0: exit_loop = False # simplify checking... for sel in bm.select_history: sel_verts = [] if type(sel) is bmesh.types.BMVert: sel_verts = [sel] elif type(sel) is bmesh.types.BMEdge: sel_verts = sel.verts elif type(sel) is bmesh.types.BMFace: sel_verts = sel.verts for v in sel_verts: v_co3d = m_w * v.co add_pt(self, v_co3d) if self.pt_cnt > 2: exit_loop = True break if exit_loop: break # todo : find way to merge this with add_select ? def add_select_multi(self): if self.multi_tmp.cnt < self.multi_tmp.max_cnt: if bpy.context.mode == "OBJECT": if len(bpy.context.selected_objects) > 0: for obj in bpy.context.selected_objects: self.multi_tmp.try_add(obj.location) if self.multi_tmp.cnt == self.multi_tmp.max_cnt: break elif bpy.context.mode == "EDIT_MESH": m_w = bpy.context.edit_object.matrix_world bm = bmesh.from_edit_mesh(bpy.context.edit_object.data) if len(bm.select_history) > 0: exit_loop = False # simplify checking... for sel in bm.select_history: sel_verts = [] if type(sel) is bmesh.types.BMVert: sel_verts = [sel] elif type(sel) is bmesh.types.BMEdge: sel_verts = sel.verts elif type(sel) is bmesh.types.BMFace: sel_verts = sel.verts for v in sel_verts: v_co3d = m_w * v.co self.multi_tmp.try_add(v_co3d) if self.multi_tmp.cnt == self.multi_tmp.max_cnt: exit_loop = True break if exit_loop: break if in_ref_pts(self, self.multi_tmp.get_co(), self.mod_pt): self.report({'WARNING'}, 'Points overlap.') self.pts[self.mod_pt].co3d = self.multi_tmp.get_co() def swap_ref_pts(self, pt1, pt2): temp = self.pts[pt1].co3d.copy() self.pts[pt1].co3d = self.pts[pt2].co3d.copy() self.pts[pt2].co3d = temp # For adding multi point without first needing a reference point # todo : clean up TempPoint so this function isn't needed # todo : find way to merge this with add_select_multi def new_select_multi(self): def enable_multi_mode(self): if self.grab_pt is not None: self.multi_tmp.__init__() self.multi_tmp.co3d = Vector() self.mod_pt = self.grab_pt self.grab_pt = None elif self.mod_pt is None: self.multi_tmp.__init__() self.multi_tmp.co3d = Vector() self.mod_pt = self.pt_cnt self.pt_cnt += 1 if bpy.context.mode == "OBJECT": if len(bpy.context.selected_objects) > 0: enable_multi_mode(self) for obj in bpy.context.selected_objects: self.multi_tmp.try_add(obj.location) if self.multi_tmp.cnt == self.multi_tmp.max_cnt: break else: return elif bpy.context.mode == "EDIT_MESH": m_w = bpy.context.edit_object.matrix_world bm = bmesh.from_edit_mesh(bpy.context.edit_object.data) if len(bm.select_history) > 0: enable_multi_mode(self) exit_loop = False # simplify checking... for sel in bm.select_history: sel_verts = [] if type(sel) is bmesh.types.BMVert: sel_verts = [sel] elif type(sel) is bmesh.types.BMEdge: sel_verts = sel.verts elif type(sel) is bmesh.types.BMFace: sel_verts = sel.verts for v in sel_verts: v_co3d = m_w * v.co self.multi_tmp.try_add(v_co3d) if self.multi_tmp.cnt == self.multi_tmp.max_cnt: exit_loop = True break if exit_loop: break else: return def exit_multi_mode(self): m_co3d = self.multi_tmp.get_co() if in_ref_pts(self, m_co3d, self.mod_pt): self.report({'ERROR'}, "Point overlapped another and was removed.") rem_ref_pt(self, self.mod_pt) else: self.pts[self.mod_pt].co3d = m_co3d if self.pt_cnt > 1: updatelock_pts(self, self.pts) set_mouse_highlight(self) self.mod_pt = None set_help_text(self, "CLICK") def get_axis_line_co(p1, p2, x_max, y_max): if None not in (p1, p2): x_min, y_min = 0.0, 0.0 x1, y1 = p1 x2, y2 = p2 if flts_alm_eq(x1, x2): return Vector((x1, y_min)), Vector((x1, y_max)) elif flts_alm_eq(y1, y2): return Vector((x_min, y1)), Vector((x_max, y1)) tol = 0.0001 xb_min, xb_max = x_min - tol, x_max + tol yb_min, yb_max = y_min - tol, y_max + tol ln_pts = [] slope = (y2 - y1) / (x2 - x1) x_bot = ((y_min - y1) / slope) + x1 if x_bot > xb_min and x_bot < xb_max: ln_pts.append( Vector((x_bot, y_min)) ) x_top = ((y_max - y1) / slope) + x1 if x_top > xb_min and x_top < xb_max: ln_pts.append( Vector((x_top, y_max)) ) if len(ln_pts) > 1: return ln_pts y_lef = (slope * (x_min - x1)) + y1 if y_lef > yb_min and y_lef < yb_max: ln_pts.append( Vector((x_min, y_lef)) ) if len(ln_pts) > 1: return ln_pts y_rgt = (slope * (x_max - x1)) + y1 if y_rgt > yb_min and y_rgt < yb_max: ln_pts.append( Vector((x_max, y_rgt)) ) if len(ln_pts) > 1: return ln_pts # Returns the closest object origin or vertex to the supplied 2D location # as 3D Vector. # Returns None if no found coordinate closer than minimum distance. def find_closest_point(loc): region = bpy.context.region rv3d = bpy.context.region_data shortest_dist = 40.0 # minimum distance from loc closest = None for obj in bpy.context.scene.objects: o_co2d = loc3d_to_reg2d(region, rv3d, obj.location) if o_co2d is None: continue dist2d = (loc - o_co2d).length if dist2d < shortest_dist: shortest_dist = dist2d closest = obj.location.copy() if obj.type == 'MESH': if len(obj.data.vertices) > 0: for v in obj.data.vertices: v_co3d = obj.matrix_world * v.co v_co2d = loc3d_to_reg2d(region, rv3d, v_co3d) if v_co2d is not None: dist2d = (loc - v_co2d).length if dist2d < shortest_dist: shortest_dist = dist2d closest = v_co3d return closest def draw_pt_2d(pt_co, pt_color, pt_size): if pt_co is not None: bgl.glEnable(bgl.GL_BLEND) bgl.glPointSize(pt_size) bgl.glColor4f(*pt_color) bgl.glBegin(bgl.GL_POINTS) bgl.glVertex2f(*pt_co) bgl.glEnd() return def draw_line_2d(pt_co_1, pt_co_2, pt_color): if None not in (pt_co_1, pt_co_2): bgl.glEnable(bgl.GL_BLEND) bgl.glPointSize(15) bgl.glColor4f(*pt_color) bgl.glBegin(bgl.GL_LINE_STRIP) bgl.glVertex2f(*pt_co_1) bgl.glVertex2f(*pt_co_2) bgl.glEnd() return def closest_to_point(pt, pts): smallest_dist = 15.0 closest, pt_idx = None, None for p in range(len(pts)): if pts[p] is not None: tmp_d = (pt - pts[p]).length if tmp_d < smallest_dist: smallest_dist = tmp_d closest = pts[p] pt_idx = p return closest, pt_idx def set_arc_pts(ref_pts): fre, anc, piv = ref_pts[0].co3d, ref_pts[1].co3d, ref_pts[2].co3d arc_pts = [] ang = (fre - piv).angle(anc - piv) deg_ang = degrees(ang) if deg_ang > 0.01 and deg_ang < 179.99: piv_norm = geometry.normal(fre, piv, anc) rot_val = Quaternion(piv_norm, ang) rotated = fre - piv rotated.rotate(rot_val) rotated += piv rot_ang = (anc - piv).angle(rotated - piv) if not flts_alm_eq(rot_ang, 0.0): ang = -ang dis_p_f = (piv - fre).length dis_p_a = (piv - anc).length if dis_p_f < dis_p_a: ratio = 0.5 else: # dis_p_a < dis_p_f: ratio = dis_p_a / dis_p_f * 0.5 mid_piv_free = piv.lerp(fre, ratio) arc_pts = [mid_piv_free] steps = abs( int(degrees(ang) // 10) ) ang_step = ang / steps mid_align = mid_piv_free - piv for a in range(1, steps): rot_val = Quaternion(piv_norm, ang_step * a) temp = mid_align.copy() temp.rotate(rot_val) arc_pts.append(temp + piv) # in case steps <= 1 rot_val = Quaternion(piv_norm, ang) temp = mid_align.copy() temp.rotate(rot_val) arc_pts.append(temp + piv) elif RotDat.axis_lock is not None: #if RotDat.axis_lock == 'X': # rot_val = Euler((pi*2, 0.0, 0.0), 'XYZ') if RotDat.axis_lock == 'X': piv_norm = 1.0, 0.0, 0.0 elif RotDat.axis_lock == 'Y': piv_norm = 0.0, 1.0, 0.0 elif RotDat.axis_lock == 'Z': piv_norm = 0.0, 0.0, 1.0 dis_p_f = (piv - fre).length dis_p_a = (piv - anc).length if dis_p_f < dis_p_a: ratio = 0.5 else: # dis_p_a < dis_p_f: ratio = dis_p_a / dis_p_f * 0.5 mid_piv_free = piv.lerp(fre, ratio) arc_pts = [mid_piv_free] steps = 36 ang_step = pi*2 / steps mid_align = mid_piv_free - piv for a in range(1, steps+1): rot_val = Quaternion(piv_norm, ang_step * a) temp = mid_align.copy() temp.rotate(rot_val) arc_pts.append(temp + piv) RotDat.arc_pts = arc_pts # Takes a ref_pts (ReferencePoints class) argument and modifies its member # variable lp_ls (lock pt list). The lp_ls variable is assigned a modified list # of 3D coordinates (if an axis lock was provided), the contents of the # ref_pts' rp_ls var (if no axis lock was provided), or an empty list (if there # wasn't enough ref_pts or there was a problem creating the modified list). # todo : move inside ReferencePoints class ? def set_lock_pts(ref_pts, pt_cnt): if pt_cnt < 2: RotDat.lock_pts = [] elif RotDat.axis_lock is None: RotDat.lock_pts = ref_pts if pt_cnt == 3: set_arc_pts(ref_pts) # end_a, piv_pt, and end_b are Vector based 3D coordinates # coordinates must share a common center "pivot" point (piv_pt) def get_line_ang_3d(end_a, piv_pt, end_b): algn_a = end_a - piv_pt algn_b = end_b - piv_pt return algn_a.angle(algn_b) # Checks if the 3 Vector coordinate arguments (end_a, piv_pt, end_b) # will create an angle with a measurement matching the value in the # argument exp_ang (expected angle measurement). def ang_match3d(end_a, piv_pt, end_b, exp_ang): ang_meas = get_line_ang_3d(end_a, piv_pt, end_b) #print("end_a", end_a) # debug #print("piv_pt", piv_pt) # debug #print("end_b", end_b) # debug #print("exp_ang ", exp_ang) # debug #print("ang_meas ", ang_meas) # debug return flts_alm_eq(ang_meas, exp_ang) # Updates lock points and changes curr_meas_stor to use measure based on # lock points instead of ref_pts (for axis constrained transformations). def updatelock_pts(self, ref_pts): global curr_meas_stor set_lock_pts(ref_pts, self.pt_cnt) if RotDat.lock_pts == []: if RotDat.axis_lock is not None: self.report({'ERROR'}, 'Axis lock \''+ RotDat.axis_lock+ '\' creates identical points') RotDat.lock_pts = ref_pts RotDat.axis_lock = None # See if key was pressed that would require updating the axis lock info. # If one was, update the lock points to use new info. def axis_key_check(self, new_axis): if self.pt_cnt == 1: if new_axis != RotDat.axis_lock: RotDat.axis_lock = new_axis def draw_rot_arc(colr): reg = bpy.context.region rv3d = bpy.context.region_data len_arc_pts = len(RotDat.arc_pts) if len_arc_pts > 1: last = loc3d_to_reg2d(reg, rv3d, RotDat.arc_pts[0]) for p in range(1, len_arc_pts): p2d = loc3d_to_reg2d(reg, rv3d, RotDat.arc_pts[p]) draw_line_2d(last, p2d, Colr.white) last = p2d # Called when add-on mode changes and every time point is added or removed. def set_help_text(self, mode): text = "" if mode == "CLICK": if self.pt_cnt == 0: text = "ESC/LMB+RMB - exits add-on, LMB - add ref point" elif self.pt_cnt == 1: text = "ESC/LMB+RMB - exits add-on, LMB - add/remove ref points, X/Y/Z - set axis lock, C - clear axis lock, G - grab point, SHIFT+LMB enter mid point mode" elif self.pt_cnt == 2: text = "ESC/LMB+RMB - exits add-on, LMB - add/remove ref points, G - grab point, SHIFT+LMB enter mid point mode" else: # self.pt_cnt == 3 text = "ESC/LMB+RMB - exits add-on, LMB - remove ref points, G - grab point, SHIFT+LMB enter mid point mode" elif mode == "MULTI": text = "ESC/LMB+RMB - exits add-on, SHIFT+LMB exit mid point mode, LMB - add/remove point" elif mode == "GRAB": text = "ESC/LMB+RMB - exits add-on, G - cancel grab, LMB - place/swap ref points" elif mode == "POPUP": text = "ESC/LMB+RMB - exits add-on, LMB/RMB (outside pop-up) - cancel pop-up input" bpy.context.area.header_text_set(text) # todo : move most of below to mouse_co update in modal? def draw_callback_px(self, context): reg = bpy.context.region rv3d = bpy.context.region_data ptsz_lrg = 20 ptsz_sml = 10 add_rm_co = Vector((self.rtoolsw, 0)) self.add_rm_btn.draw_btn(add_rm_co, self.mouse_co, self.shift_held) self.rotate_btn.is_drawn = False # to-do : cleaner btn activation # allow appending None so indexing does not get messed up # causing potential false positive for overlap pts2d = [p.get_co2d() for p in self.pts] ms_colr = Colr.yellow if self.pt_cnt < 3: ms_colr = self.pts[self.pt_cnt].colr if self.grab_pt is not None: # not enabled if mod_pt active line_beg = pts2d[self.grab_pt] # backup orignal co for move line pts2d[self.grab_pt] = None # prevent check on grabbed pt closest_pt, self.overlap_idx = closest_to_point(self.mouse_co, pts2d) pts2d[self.grab_pt] = self.mouse_co ms_colr = self.pts[self.grab_pt].colr if not self.shift_held: draw_line_2d(line_beg, self.mouse_co, self.pts[self.grab_pt].colr) draw_pt_2d(closest_pt, Colr.white, ptsz_lrg) elif self.mod_pt is not None: ms_colr = self.pts[self.mod_pt].colr m_pts2d = [loc3d_to_reg2d(reg, rv3d, p) for p in self.multi_tmp.ls] closest_pt, self.overlap_idx = closest_to_point(self.mouse_co, m_pts2d) draw_pt_2d(pts2d[self.mod_pt], Colr.white, ptsz_lrg) if self.shift_held: draw_pt_2d(self.mouse_co, Colr.black, ptsz_lrg) if len(m_pts2d) > 1: for mp in m_pts2d: draw_pt_2d(mp, Colr.black, ptsz_lrg) else: draw_pt_2d(closest_pt, Colr.black, ptsz_lrg) if len(m_pts2d) > 1: for p in m_pts2d: draw_pt_2d(p, ms_colr, ptsz_sml) last_mod_pt = loc3d_to_reg2d(reg, rv3d, self.multi_tmp.ls[-1]) draw_line_2d(last_mod_pt, self.mouse_co, self.pts[self.mod_pt].colr) else: # "Normal" mode closest_pt, self.overlap_idx = closest_to_point(self.mouse_co, pts2d) if self.shift_held: draw_pt_2d(closest_pt, Colr.white, ptsz_lrg) else: draw_pt_2d(closest_pt, Colr.black, ptsz_lrg) rwid = context.region.width rhgt = context.region.height if self.pt_cnt == 1: if RotDat.axis_lock is not None: if self.running_transf is False: self.rotate_btn.draw_btn(pts2d[0], self.mouse_co) self.rotate_btn.is_drawn = True if RotDat.axis_lock == 'X': test = self.pts[0].co3d + Vector((1, 0, 0)) colr = Colr.red if RotDat.axis_lock == 'Y': test = self.pts[0].co3d + Vector((0, 1, 0)) colr = Colr.green if RotDat.axis_lock == 'Z': test = self.pts[0].co3d + Vector((0, 0, 1)) colr = Colr.blue t2d = loc3d_to_reg2d(reg, rv3d, test) axis_pts = get_axis_line_co(pts2d[0], t2d, rwid, rhgt) if axis_pts is not None: draw_line_2d(axis_pts[0], axis_pts[1], colr) dpi = bpy.context.preferences.system.dpi font_id, txt_sz = 0, 32 x_pos, y_pos = self.rtoolsw + 80, 36 bgl.glColor4f(*colr) blf.size(font_id, txt_sz, dpi) blf.position(font_id, x_pos, y_pos, 0) blf.draw(font_id, RotDat.axis_lock) elif self.pt_cnt == 2: axis_pts = get_axis_line_co(pts2d[0], pts2d[1], rwid, rhgt) #draw_line_2d(pts2d[0], pts2d[1], Colr.white) if axis_pts is not None: draw_line_2d(axis_pts[0], axis_pts[1], Colr.white) #draw_line_2d(pts2d[0], self.mouse_co, Colr.white) btn_co = pts2d[0].lerp(pts2d[1], 0.5) #self.meas_btn.draw_btn(btn_co, self.mouse_co) #self.meas_btn.active = True if not self.running_transf: self.rotate_btn.draw_btn(btn_co, self.mouse_co) self.rotate_btn.is_drawn = True elif self.pt_cnt == 3: test = self.pts[2].co3d + RotDat.piv_norm t2d = loc3d_to_reg2d(reg, rv3d, test) axis_pts = get_axis_line_co(pts2d[2], t2d, rwid, rhgt) if axis_pts is not None: draw_line_2d(axis_pts[0], axis_pts[1], Colr.white) #btn_co = pts2d[2] + Vector((0, 20)) draw_line_2d(pts2d[0], pts2d[2], Colr.white) draw_line_2d(pts2d[1], pts2d[2], Colr.white) #self.meas_btn.draw_btn(pts2d[2], self.mouse_co) #self.meas_btn.active = True #draw_btn(self, btn_loc, mouse_co): if not self.running_transf: self.rotate_btn.draw_btn(pts2d[2], self.mouse_co) self.rotate_btn.is_drawn = True # todo : figure out reason for weirdness below cnt = 0 for p in pts2d: draw_pt_2d(p, self.pts[cnt].colr, ptsz_sml) cnt += 1 if self.highlight_mouse and not self.running_transf: draw_pt_2d(self.mouse_co, ms_colr, ptsz_sml) self.menu.draw(self.rotate_btn.is_drawn) def exit_addon(self): restore_blender_settings(self.settings_backup) bpy.context.area.header_text_set() # todo : reset openGL settings? #bgl.glColor4f() #blf.size() #blf.position() #print("\n\nAdd-On Exited\n") # debug # Sees if "use_region_overlap" is enabled and X offset is needed. def get_reg_overlap(): rtoolsw = 0 # region tools (toolbar) width #ruiw = 0 # region ui (Number/n-panel) width system = bpy.context.preferences.system if system.use_region_overlap: area = bpy.context.area for r in area.regions: if r.type == 'TOOLS': rtoolsw = r.width #elif r.type == 'UI': # ruiw = r.width #return rtoolsw, ruiw return rtoolsw class XEditFreeRotate(bpy.types.Operator): bl_idname = "view3d.xedit_free_rotate_op" bl_label = "XEdit Free Rotate" # Only launch Add-On from OBJECT or EDIT modes @classmethod def poll(self, context): return context.mode == 'OBJECT' or context.mode == 'EDIT_MESH' def modal(self, context, event): context.area.tag_redraw() if event.type in {'A', 'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE', 'NUMPAD_1', 'NUMPAD_2', 'NUMPAD_3', 'NUMPAD_4', 'NUMPAD_6', 'NUMPAD_7', 'NUMPAD_8', 'NUMPAD_9', 'NUMPAD_0', 'TAB'}: return {'PASS_THROUGH'} if event.type == 'MOUSEMOVE': self.mouse_co = Vector((event.mouse_region_x, event.mouse_region_y)) if event.type in {'LEFT_SHIFT', 'RIGHT_SHIFT'}: if event.value == 'PRESS': self.shift_held = True #print("\nShift pressed") # debug elif event.value == 'RELEASE': self.shift_held = False #print("\nShift released") # debug if event.type == 'RIGHTMOUSE': if event.value == 'PRESS': if self.lmb_held: bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') exit_addon(self) return {'CANCELLED'} elif event.value == 'RELEASE': self.running_transf = False set_mouse_highlight(self) set_help_text(self, "CLICK") return {'PASS_THROUGH'} elif event.type == 'SPACE' and event.value == 'RELEASE': # Safely exit transform if self.running_transf: self.running_transf = False elif event.type in {'RET', 'LEFTMOUSE'} and event.value == 'PRESS': self.mouse_co = Vector((event.mouse_region_x, event.mouse_region_y)) if event.type == 'LEFTMOUSE': self.lmb_held = True #print("LEFTMOUSE PRESS") # debug elif event.type in {'RET', 'LEFTMOUSE'} and event.value == 'RELEASE': # prevent click/enter that launched add-on from doing anything if self.first_run: self.first_run = False return {'RUNNING_MODAL'} if event.type == 'LEFTMOUSE': self.lmb_held = False #print("LeftMouse released") # debug self.mouse_co = Vector((event.mouse_region_x, event.mouse_region_y)) #=========================== # Safely exit transform #=========================== if self.running_transf is True: self.running_transf = False #=================================== # Check for click on Rotate Button #=================================== elif self.rotate_btn.is_drawn is True and \ self.rotate_btn.ms_over is True: #print("Button Clicked") curs_loc = None #bpy.ops.object.ms_input_dialog_op('INVOKE_DEFAULT') if self.pt_cnt == 1: if RotDat.axis_lock == 'X': rot_axis = 1.0, 0.0, 0.0 elif RotDat.axis_lock == 'Y': rot_axis = 0.0, 1.0, 0.0 elif RotDat.axis_lock == 'Z': # -1 because it is assumed most rotations # will have negative z pointing down rot_axis = 0.0, 0.0, -1.0 curs_loc = self.pts[0].co3d.copy() elif self.pt_cnt == 2: #if RotDat.axis_lock is None: rot_vec = self.pts[1].co3d - self.pts[0].co3d rot_axis = rot_vec.normalized() curs_loc = self.pts[0].co3d.lerp(self.pts[1].co3d, 0.5) elif self.pt_cnt == 3: #if RotDat.axis_lock is None: rot_axis = RotDat.piv_norm curs_loc = self.pts[2].co3d.copy() self.running_transf = True bpy.context.space_data.pivot_point = 'CURSOR' bpy.context.scene.cursor.location = curs_loc bpy.ops.transform.rotate('INVOKE_DEFAULT',axis=rot_axis) #=========================================== # Check for click on "Add Selected" Button #=========================================== elif self.add_rm_btn.ms_over: if self.mod_pt is not None: if not self.shift_held: add_select_multi(self) else: if self.pt_cnt < 3: new_select_multi(self) exit_multi_mode(self) self.menu.change_menu(self.pt_cnt) elif self.grab_pt is not None: co3d = None if bpy.context.mode == "OBJECT": if len(bpy.context.selected_objects) > 0: if not self.shift_held: co3d = bpy.context.selected_objects[0].location else: new_select_multi(self) exit_multi_mode(self) self.menu.change_menu(self.pt_cnt) elif bpy.context.mode == "EDIT_MESH": m_w = bpy.context.edit_object.matrix_world bm = bmesh.from_edit_mesh(bpy.context.edit_object.data) if len(bm.select_history) > 0: if not self.shift_held: for sel in bm.select_history: if type(sel) is bmesh.types.BMVert: co3d = m_w * sel.co break elif type(sel) is bmesh.types.BMEdge or \ type(sel) is bmesh.types.BMFace: co3d = Vector() for v in sel.verts: co3d += m_w * v.co co3d = co3d / len(sel.verts) break else: new_select_multi(self) exit_multi_mode(self) self.menu.change_menu(self.pt_cnt) if co3d is not None: if not in_ref_pts(self, co3d): self.pts[self.grab_pt].co3d = co3d else: swap_ref_pts(self, self.grab_pt, self.swap_pt) self.swap_pt = None self.grab_pt = None updatelock_pts(self, self.pts) set_piv(self) else: # no grab or mod point if self.shift_held: if self.pt_cnt < 3: new_select_multi(self) if in_ref_pts(self, self.multi_tmp.get_co(), self.mod_pt): self.report({'WARNING'}, 'Points overlap.') self.pts[self.mod_pt].co3d = self.multi_tmp.get_co() self.menu.change_menu(self.pt_cnt) else: add_select(self) # todo : see if this is really a good solution... if self.mod_pt is None: set_help_text(self, "CLICK") else: set_help_text(self, "MULTI") #=========================== # Point Place or Grab Mode #=========================== elif self.mod_pt is None: if self.overlap_idx is None: # no point overlap if not self.shift_held: if self.grab_pt is not None: found_pt = find_closest_point(self.mouse_co) if found_pt is not None: if not in_ref_pts(self, found_pt): self.pts[self.grab_pt].co3d = found_pt self.grab_pt = None if self.pt_cnt > 1: updatelock_pts(self, self.pts) set_mouse_highlight(self) set_piv(self) set_help_text(self, "CLICK") elif self.pt_cnt < 3: found_pt = find_closest_point(self.mouse_co) if found_pt is not None: if not in_ref_pts(self, found_pt): self.pts[self.pt_cnt].co3d = found_pt self.pt_cnt += 1 self.menu.change_menu(self.pt_cnt) if self.pt_cnt > 1: RotDat.axis_lock = None updatelock_pts(self, self.pts) set_piv(self) #if self.pt_cnt set_mouse_highlight(self) set_help_text(self, "CLICK") ''' Begin Debug cnt = self.pt_cnt - 1 pt_fnd_str = str(self.pts[cnt].co3d) pt_fnd_str = pt_fnd_str.replace("<Vector ", "Vector(") pt_fnd_str = pt_fnd_str.replace(">", ")") print("ref_pt_" + str(cnt) + ' =', pt_fnd_str) #print("ref pt added:", self.cnt, "cnt:", self.cnt+1) End Debug ''' else: # overlap if self.grab_pt is not None: if not self.shift_held: if self.grab_pt != self.overlap_idx: swap_ref_pts(self, self.grab_pt, self.overlap_idx) self.grab_pt = None if self.pt_cnt > 1: updatelock_pts(self, self.pts) set_mouse_highlight(self) set_piv(self) set_help_text(self, "CLICK") elif not self.shift_held: # overlap and shift not held == remove point rem_ref_pt(self, self.overlap_idx) set_help_text(self, "CLICK") else: # shift_held # enable multi point mode self.mod_pt = self.overlap_idx self.multi_tmp.reset(self.pts[self.mod_pt].co3d) self.highlight_mouse = True set_help_text(self, "MULTI") #=========================== # Mod Ref Point Mode #=========================== else: # mod_pt exists if self.overlap_idx is None: # no point overlap if not self.shift_held: # attempt to add new point to multi_tmp found_pt = find_closest_point(self.mouse_co) if found_pt is not None: self.multi_tmp.try_add(found_pt) mult_co3d = self.multi_tmp.get_co() if in_ref_pts(self, mult_co3d, self.mod_pt): self.report({'WARNING'}, 'Points overlap.') self.pts[self.mod_pt].co3d = mult_co3d else: # shift_held, exit multi_tmp exit_multi_mode(self) set_piv(self) else: # overlap multi_tmp if not self.shift_held: # remove multi_tmp point self.multi_tmp.rem_pt(self.overlap_idx) # if all multi_tmp points removed, # exit multi mode, remove edited point if self.multi_tmp.co3d is None: rem_ref_pt(self, self.mod_pt) self.mod_pt = None set_help_text(self, "CLICK") elif in_ref_pts(self, self.multi_tmp.co3d, self.mod_pt): self.report({'WARNING'}, 'Points overlap.') self.pts[self.mod_pt].co3d = self.multi_tmp.get_co() else: self.pts[self.mod_pt].co3d = self.multi_tmp.get_co() else: # shift_held exit_multi_mode(self) if event.type == 'C' and event.value == 'PRESS': #print("Pressed C\n") # debug axis_key_check(self, None) elif event.type == 'X' and event.value == 'PRESS': #print("Pressed X\n") # debug axis_key_check(self, 'X') elif event.type == 'Y' and event.value == 'PRESS': #print("Pressed Y\n") # debug axis_key_check(self, 'Y') elif event.type == 'Z' and event.value == 'PRESS': #print("Pressed Z\n") # debug axis_key_check(self, 'Z') ''' elif event.type == 'D' and event.value == 'RELEASE': # open debug console __import__('code').interact(local=dict(globals(), **locals())) ''' elif event.type == 'G' and event.value == 'RELEASE': # if already in grab mode, cancel grab if self.grab_pt is not None: self.grab_pt = None set_mouse_highlight(self) set_help_text(self, "CLICK") # else enable grab mode (if possible) elif self.mod_pt is None: if self.overlap_idx is not None: self.grab_pt = self.overlap_idx self.highlight_mouse = False set_help_text(self, "GRAB") elif event.type in {'ESC'} and event.value == 'RELEASE': bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') exit_addon(self) return {'CANCELLED'} if self.force_quit: bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') exit_addon(self) return {'FINISHED'} return {'RUNNING_MODAL'} def invoke(self, context, event): if context.area.type == 'VIEW_3D': args = (self, context) # Add the region OpenGL drawing callback # draw in view space with 'POST_VIEW' and 'PRE_VIEW' self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback_px, args, 'WINDOW', 'POST_PIXEL') self.settings_backup = backup_blender_settings() self.mouse_co = Vector((event.mouse_region_x, event.mouse_region_y)) self.rtoolsw = get_reg_overlap() # region tools (toolbar) width self.highlight_mouse = True # draw ref point on mouse self.pts = [] self.running_transf = False self.pt_cnt = 0 self.lk_pts = [] self.multi_tmp = TempPoint() self.rotate_btn = ViewButton(Colr.red, Colr.white, 18, Colr.white, (0.0, 20)) self.rotate_btn.set_text("Rotate") self.add_rm_btn = ViewButton(Colr.red, Colr.white, 18, Colr.white, (190, 36)) self.overlap_idx = None self.shift_held = False #self.debug_flag = False self.mod_pt = None self.first_run = event.type in {'RET', 'LEFTMOUSE'} and event.value != 'RELEASE' self.force_quit = False self.grab_pt = None self.new_free_co = () self.swap_pt = None #self.addon_mode = CLICK_CHECK self.lmb_held = False self.menu = MenuHandler("Free Rotate", 18, Colr.yellow, Colr.white, \ self.rtoolsw, context.region) self.menu.add_menu(["Axis Lock Rotate"]) self.menu.add_menu(["Axis Rotate"]) self.menu.add_menu(["Planar Rotate"]) context.window_manager.modal_handler_add(self) init_ref_pts(self) init_blender_settings() editmode_refresh() #print("Add-on started") # debug self.add_rm_btn.set_text("Add Selected") set_help_text(self, "CLICK") return {'RUNNING_MODAL'} else: self.report({'WARNING'}, "View3D not found, cannot run operator") return {'CANCELLED'}
__version__ = "2.0.3" __author__ = "decoxviii"
# -*- coding: utf-8 -*- """Diogenes, reto-06: clase Configurator.""" # Copyright (c) 2022 José Lorenzo Nieto Corral <a.k.a. jlnc> <a.k.a. JoseLo> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os from pathlib import Path import toml from typing import Dict, Union, MutableMapping, Any class Configurator: """Una clase para generar y verificar la configuración.""" HEADER = "directorios" DIR_INPUT = "in" DIR_OUTPUT = "out" def __init__(self, path: Path, config: str) -> None: """Constructor. Almacena los parametros de entada y chequea la configuración. Parameters ---------- path : Path El path al directorio de la configuración. config : str El nombre del fichero de configuración. Returns ------- None """ self.directory = path self.filename = config self.check() def check(self) -> None: """Chequea la integridad de la configuración. 1) Si no existe el directorio de la configuración, lo crea. 2) Si no existe el fichero de configuración, lo crea y escribe el nombre de la sección principal: '["directorios"]'. 3) Si existe el fichero de configuración, lo lee y crea todos los directorios definidos en él. No verifica si los paths de esos directorios contienen cosas como ~/directorio, $HOME/directorio, ../../directorio, etc pero debería, ya que en esos casos se crean paths relativos al directorio actual: ${PWD}/"~/directorio", ${PWD}/"$HOME/directorio", etc y no es eso lo que queremos. Returns ------- None. """ if not self.directory.exists(): os.makedirs(self.directory) config_file = self.directory / self.filename if not config_file.exists(): conf = {self.__class__.HEADER: {}} with open(config_file, 'w') as fwrite: toml.dump(conf, fwrite) conf = toml.load(config_file) # print(conf, flush=True) dirs_inout = (self.__class__.DIR_INPUT, self.__class__.DIR_OUTPUT) for item in conf[self.__class__.HEADER].values(): # print(item, flush=True) for path in [Path(item[d]) for d in item if d in dirs_inout]: # print(path, flush=True) if not path.exists(): os.makedirs(path) def read(self) -> MutableMapping[str, Any]: """Leer la configuración. Accede al fichero de configuración usando los atributos que se definen en el constructor (self.path y self.filename) Returns ------- MutableMapping[str, Any] """ config_file = self.directory / self.filename return toml.load(config_file) def save(self, conf: Union[Dict, MutableMapping[str, Any]]) -> None: """Guarda la configuración en el formato TOML. Parameters ---------- conf : Union[Dict, MutableMapping[str, Any]] La variable que hemos usado para definir la configuración de forma interna. Returns ------- None """ config_file = self.directory / self.filename with open(config_file, 'w') as fw: toml.dump(conf, fw)
from selenium import webdriver from selenium.webdriver.common.keys import Keys import pandas as pd import time from copy import copy import datetime print("") print("Ziel ist es diese Seiten zu Scrapen: ") print("# LINKS:https://www.tuifly.com/flugangebote " "https://www.kayak.de/flugangebote") time.sleep(3) # LINKS: # https://www.tuifly.com/flugangebote?tts=GVABCNc~20191119-VY6198-NonX3~20191126-VY6201-NonX3|STRPMIc~20191201-X32172-X3Pure~20191208-VY3440-NonX3 # https://www.tuifly.com/flugangebote # https://www.kayak.de/flugangebote # https://www.statravel.de/aktuelle-flugangebote.htm # https://www.lufthansa.com/de/de/fluege df = pd.DataFrame # self.tui_dep_time = [] # self.tui_ret_time = [] # Listen bsp: self.dep_times_list = [] def compile_data_tui(): j = 0 while j < 61: j += 1 try: t_destiny = driver.find_elements_by_xpath( f"/html/body/div[2]/main/section[2]/div/div/div[3]/div[{j}]/label[1]/div[2]/div[1]/span[2]") tui_destiny = [element.text for element in t_destiny] print(" Ziel ") print(tui_destiny) t_price = driver.find_elements_by_xpath(f"/html/body/div[2]/main/section[2]/div/div/div[3]/div[{j}]/label[1]/div[2]/div[2]/div") tui_price = [element.text for element in t_price] print(tui_price) t_departure = driver.find_elements_by_xpath(f"/html/body/div[2]/main/section[2]/div/div/div[3]/div[{j}]/label[1]/div[2]/div[1]/span[1]") tui_departure = [element.text for element in t_departure] print("Abflugort ist: ") print(tui_departure) except: print("Konnte nicht durchgeführt werden") pass def tui_chooser(): i = 0 while i < 61: try: i += 1 try: show_more = driver.find_element_by_xpath(f"/html/body/div[2]/main/section[2]/div/div/div[3]/div[{i}]/label[2]") show_more.click() time.sleep(3) # driver.execute_script("window.scrollTo(500, 1000)") time.sleep(3) except: # print("jetzt wird nach css gesucht") show_more2 = driver.find_element_by_css_selector(f"body > div.page.underlay-spacer.js-underlay-spacer > main > section.box > div > div > div.js-trip-tile-list > div:nth-child({i}) > label.trip-tile__button") driver.execute_script("arguments[0].click();", show_more2) except: print("Alles wurde geöffnet") ######################################################################################################################## ######################################################################################################################## def kayak_chooser(): time.sleep(3) try: cookies_kacke = driver.find_element_by_xpath('/html/body/div[6]/div/div[3]/div/div/div/div/div[1]/div/div[2]/div[2]/div[2]/button') print(" C1 ") except: cookies_kacke = driver.find_element_by_css_selector('#d9Mu-cookie-consent-dialog-body > div > div > div.page-1._i57._jht._jhn > div > div._iEG._itL > div._iaB._iDa > div._iEG') print(" C2 ") webdriver.ActionChains(driver).move_to_element(cookies_kacke).click(cookies_kacke).perform() time.sleep(5) print("Mehr anzeigen") time.sleep(2) for i in range(2): time.sleep(2) driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/span').click() # Mehr Ergebnisse: # /html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/span def compile_data_kayak(): k = 0 while k < 15: k += 1 print(k) try: k_price = driver.find_elements_by_xpath(f'/html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/div[1]/div/div[{k}]/div/a/div[7]/div[2]') # "//div[@class='price']" /// f"/html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/div[1]/div/div[{k}]/div/a/div[7]/div[2]" kayak_price = [element.text for element in k_price] print(kayak_price) except: print("Kayak hat nicht funktioniert!") pass try: k_destiny = driver.find_elements_by_xpath(f'/html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/div[1]/div/div[{k}]/div/a/div[5]') kayak_destiny = [element.text for element in k_destiny] print(kayak_destiny) except: print("Error") pass try: k_depart = driver.find_elements_by_xpath("/html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/h2/span") kayak_departure = [element.text for element in k_depart] print(kayak_departure) except: print("Abflugort nicht da") pass try: k_dates = driver.find_elements_by_xpath(f'/html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/div[1]/div/div[{k}]/div/a/div[3]') # k_dates = driver.find_elements_by_xpath('') print(k_dates) kayak_dates = [element.text for element in k_dates] print(kayak_dates) except: print("Keine Daten gefunden") pass # /html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/div[1]/div/div[1]/div/a/div[3] # '//div[@class ="datesWrapper"]' #//*[@id="MnWc"] # #gaa2 > a > div.datesWrapper # #Gi9A > a > div.datesWrapper # #xKww > a > div.datesWrapper # #iCZV > a > div.datesWrapper # /html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/div[1]/div/div[2]/div/a/div[3] # /html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/div[1]/div/div[1]/div/a/div[3] # f'/html/body/div[1]/div/div[1]/main/div/div[1]/div/div[2]/div/div/div[3]/div[2]/div/div[1]/div/div[{k}]/div/a/div[3]' # #v9T5 > a:nth-child(1) > div:nth-child(3) # links = ["https://www.tuifly.com/flugangebote", "https://www.kayak.de/flugangebote"] #, "https://www.statravel.de/aktuelle-flugangebote.htm" for item in links: chromedriver = "/Users/Fabi/Downloads/chromedriver" driver = webdriver.Chrome(chromedriver) print(item) time.sleep(2) driver.get(item) time.sleep(2) # TUI if item == links[0]: # tui_chooser() # time.sleep(3) print("**********************") # time.sleep(3) compile_data_tui() else: print("ist kein Tui-link") pass # KAYAK if item == links[1]: time.sleep(3) kayak_chooser() time.sleep(3) compile_data_kayak() else: print("ist kein kayak-link") pass # if item == links[3]: time.sleep(5) driver.close() time.sleep(2)
from typing import List, Union from vortexasdk.endpoints.geographies import Geographies from vortexasdk.api import ID from vortexasdk.conversions.conversions import _convert_to_ids def convert_to_geography_ids( ids_or_names_list: List[Union[ID, str]] ) -> List[ID]: """ Convert a mixed list of names or IDs to geography ids. # Example ``` >>> convert_to_geography_ids(["Rotterdam [NL]"]) [...] ``` # Example ``` >>> convert_to_geography_ids(["Rotterdam [NL]", "b514a3bfd0b87d91984f43d9ee5071fb3a063ec309f2fe486ca3d2e58b61d683"]) [...] ``` """ return _convert_to_ids(ids_or_names_list, Geographies())
import io import time from datetime import datetime, timedelta import requests as reqs import json import AirQualityReader ecobee_base_url = "https://api.ecobee.com/" api_versioni_url = "1/" thermostat_request_url = "thermostat" tokenFileName = "tokens.json" def readJsonDataFromFile(filename): with open(filename, 'r') as token_file: data = json.load(token_file) return data def writeTokenFile(tokens, filename): with open(filename, 'w') as token_file: json.dump(tokens, token_file, sort_keys=True, indent=4) def refreshTokenRequest(tokens): req_data = {'grant_type': 'refresh_token', \ 'refresh_token': tokens['REFRESH_TOKEN'], \ 'client_id': tokens['API_KEY']} t = datetime.utcnow() access_token_expire = t + timedelta(0,0,0,0,0,1,0) refresh_token_expire = t + timedelta(365) r = reqs.post(ecobee_base_url + 'token', req_data) if r.status_code != 200: return False rj = r.json() tokens['REFRESH_TOKEN'] = rj['refresh_token'] tokens['ACCESS_TOKEN'] = rj['access_token'] tokens['ACCESS_EXPIRE'] = str(access_token_expire.timestamp()) tokens['REFRESH_EXPIRE'] = str(refresh_token_expire.timestamp()) return True def updateAccessToken(tokens): access_token_expire = datetime.fromtimestamp(float(tokens['ACCESS_EXPIRE'])) if datetime.utcnow() > access_token_expire: if refreshTokenRequest(tokens): writeTokenFile(tokens, tokenFileName) return True return False return True def controlThermostat(tokens, controlJsonFileName): full_url = ecobee_base_url + api_versioni_url + thermostat_request_url auth_headers = {'Content-Type': 'application/json;charset=UTF-8', 'Authorization': 'Bearer ' + tokens['ACCESS_TOKEN']} req_data = readJsonDataFromFile(controlJsonFileName) r = reqs.post(full_url, headers = auth_headers, json=req_data) if r.status_code != 200: return False return True def ecobeeControl(iaq: float, iaq_accuracy: int): while (updateAccessToken(myTokens) == False): time.sleep(1) if not ecobeeControl.systemReady : if iaq_accuracy > 0 : ecobeeControl.systemReady = True print("System is ready to use") else: if iaq > 100.0 and iaq_accuracy > 0 and not ecobeeControl.fanIsOn : while (controlThermostat(myTokens, "setHoldFanOn.json") == False): time.sleep(1) ecobeeControl.fanIsOn = True print(f'IAQ {iaq:.0f}, IAQ Accuracy {iaq_accuracy:d}, fan is turned on') return if iaq < 50.0 and iaq_accuracy > 0 and ecobeeControl.fanIsOn : while (controlThermostat(myTokens, "resumeProgram.json") == False): time.sleep(1) ecobeeControl.fanIsOn = False print(f'IAQ {iaq:.0f}, IAQ Accuracy {iaq_accuracy:d}, fan is turned off') return myTokens = readJsonDataFromFile(tokenFileName) print("System is in 5 minute burn-in time") # System is ready after 5 minute burn-in time and IAQ accurary is greater than 0 ecobeeControl.systemReady = False ecobeeControl.fanIsOn = False AirQualityReader.run(ecobeeControl)
import pytest import datetime from acondbs import ops ##__________________________________________________________________|| @pytest.fixture def app(app_users): y = app_users # Relation types: # parent <-> child # plaintiff <-> defendant # Relations: # map1 -> beam1 # | | # +--------+---> beam2 with y.app_context(): ops.create_product_relation_type( type_={ "type_id": 1, "name": "parent", "indef_article": "a", "singular": "parent", "plural": "parents", }, reverse={ "type_id": 2, "name": "child", "indef_article": "a", "singular": "child", "plural": "children", }, ) ops.create_product_relation_type( type_={ "type_id": 3, "name": "plaintiff", "indef_article": "a", "singular": "plaintiff", "plural": "plaintiffs", }, reverse={ "type_id": 4, "name": "defendant", "indef_article": "a", "singular": "defendant", "plural": "defendants", }, ) ops.create_product_type(type_id=1, name="map") ops.create_product_type(type_id=2, name="beam") ops.commit() with y.app_context(): ops.create_product( product_id=1, type_id=1, name="map1", date_produced=datetime.date(2020, 2, 1), ) ops.create_product( product_id=4, type_id=2, name="beam1", date_produced=datetime.date(2020, 2, 5), ) ops.create_product( product_id=5, type_id=2, name="beam2", date_produced=datetime.date(2020, 3, 4), ) ops.commit() with y.app_context(): ops.create_product_relation( type_id=1, self_product_id=4, other_product_id=1, ) ops.create_product_relation( type_id=1, self_product_id=5, other_product_id=1, ) ops.create_product_relation( type_id=1, self_product_id=5, other_product_id=4, ) ops.commit() yield y ##__________________________________________________________________||
''' Copyright 2017 Rafael Alves Ribeiro Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from browsermobproxy import Server from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.firefox.options import Options from simplejson.scanner import JSONDecodeError class Firefox(webdriver.Firefox): def __init__(self, browsermob_binary=None, firefox_binary=None, firefox_profile=None, capabilities=None, allow_insecure_certs=False, timeout=30, headless=False, firefox_options=None): self.browsermob_binary = browsermob_binary self.blacklist = {} self.set_browsermob_proxy() self.add_proxy_to_profile(firefox_profile) self.firefox_binary = firefox_binary self.capabilities = capabilities if self.capabilities is True: self.allow_insecure_certs() self.firefox_options = Options() if headless is True: self.firefox_options.add_argument('-headless') if firefox_options: [self.firefox_options.add_argument(arg) for arg in firefox_options] webdriver.Firefox.__init__(self, firefox_binary=self.firefox_binary, firefox_profile=self.profile, capabilities=self.capabilities, firefox_options=self.firefox_options) def add_proxy_to_profile(self, profile): if profile is None: self.profile = webdriver.FirefoxProfile() self.profile.set_proxy(self.proxy.selenium_proxy()) else: self.profile = profile self.profile.set_proxy(self.proxy.selenium_proxy()) def set_browsermob_proxy(self): server = Server(self.browsermob_binary) server.start() self.proxy = server.create_proxy() def add_blacklist(self, blacklist_urls): ''' Sets a list of URL patterns to blacklist :param blacklist_urls: dict of urls to blacklist(key) and the HTTP status code to return for URLs(value) ''' for pattern in blacklist_urls: self.blacklist[pattern] = blacklist_urls[pattern] for url in self.blacklist: self.proxy.blacklist(url, self.blacklist[url]) @property def har(self): try: har = self.proxy.har except JSONDecodeError: har = None return har def get(self, url): self.proxy.new_har(options={'captureHeaders': True}) super(webdriver.Firefox, self).get(url) return self.status_code def allow_insecure_certs(self): if self.capabilities is None: self.capabilities = DesiredCapabilities.FIREFOX.copy() self.capabilities['acceptInsecureCerts'] = True @property def request(self): if self.har is not None: return self.har['log']['entries'][0]['request'] else: return None @property def response(self): if self.har is not None: return self.har['log']['entries'][0]['response'] else: return None @property def headers(self): if self.response is not None: headers = {} for header_field in self.response['headers']: headers[header_field['name']] = header_field['value'] return headers else: return None @property def status_code(self): ''' Integer Code of responded HTTP Status, e.g. 404 or 200. ''' if self.response is not None: return self.response['status'] else: return None
#!/usr/bin/env python # vim: set fileencoding=utf-8 : """A few checks at the VERA Fingervein database. """ import os import numpy from . import Database, PADDatabase from .create import VERAFINGER_PATH import nose.tools from nose.plugins.skip import SkipTest def sql3_available(test): """Decorator for detecting if the sql3 file is available""" from bob.io.base.test_utils import datafile from nose.plugins.skip import SkipTest import functools @functools.wraps(test) def wrapper(*args, **kwargs): dbfile = datafile("db.sql3", __name__, None) if os.path.exists(dbfile): return test(*args, **kwargs) else: raise SkipTest("The interface SQL file (%s) is not available; did you forget to run 'bob_dbmanage.py %s create' ?" % (dbfile, 'vera')) return wrapper def db_available(path): """Decorator for detecting if the database files are available""" def decorator(test): from bob.io.base.test_utils import datafile from nose.plugins.skip import SkipTest import functools @functools.wraps(test) def wrapper(*args, **kwargs): if os.path.exists(path): return test(*args, **kwargs) else: raise SkipTest("The database path (%s) is not available" % (path,)) return wrapper return decorator @sql3_available def test_counts_bio(): # test whether the correct number of clients is returned db = Database() nose.tools.eq_(db.groups(), ('train', 'dev')) protocols = db.protocol_names() nose.tools.eq_(len(protocols), 8) assert 'Nom' in protocols assert 'Full' in protocols assert 'Fifty' in protocols assert 'B' in protocols assert 'Cropped-Nom' in protocols assert 'Cropped-Full' in protocols assert 'Cropped-Fifty' in protocols assert 'Cropped-B' in protocols nose.tools.eq_(db.purposes(), ('train', 'enroll', 'probe', 'attack')) nose.tools.eq_(db.genders(), ('M', 'F')) nose.tools.eq_(db.sides(), ('L', 'R')) # test model ids model_ids = db.model_ids() nose.tools.eq_(len(model_ids), 440) model_ids = db.model_ids(protocol='Nom') nose.tools.eq_(len(model_ids), 220) model_ids = db.model_ids(protocol='Fifty') nose.tools.eq_(len(model_ids), 100) model_ids = db.model_ids(protocol='B') nose.tools.eq_(len(model_ids), 216) model_ids = db.model_ids(protocol='Full') nose.tools.eq_(len(model_ids), 440) model_ids = db.model_ids(protocol='Cropped-Nom') nose.tools.eq_(len(model_ids), 220) model_ids = db.model_ids(protocol='Cropped-Fifty') nose.tools.eq_(len(model_ids), 100) model_ids = db.model_ids(protocol='Cropped-B') nose.tools.eq_(len(model_ids), 216) model_ids = db.model_ids(protocol='Cropped-Full') nose.tools.eq_(len(model_ids), 440) # test database sizes nose.tools.eq_(len(db.objects(protocol='Nom', groups='train')), 0) nose.tools.eq_(len(db.objects(protocol='Nom', groups='dev')), 660) nose.tools.eq_(len(db.objects(protocol='Nom', groups='dev', purposes='enroll')), 220) nose.tools.eq_(len(db.objects(protocol='Nom', groups='dev', purposes='probe')), 220) nose.tools.eq_(len(db.objects(protocol='Nom', groups='dev', purposes='attack')), 220) nose.tools.eq_(len(db.objects(protocol='Fifty', groups='train')), 240) nose.tools.eq_(len(db.objects(protocol='Fifty', groups='dev')), 300) nose.tools.eq_(len(db.objects(protocol='Fifty', groups='dev', purposes='enroll')), 100) nose.tools.eq_(len(db.objects(protocol='Fifty', groups='dev', purposes='probe')), 100) nose.tools.eq_(len(db.objects(protocol='Fifty', groups='dev', purposes='attack')), 100) nose.tools.eq_(len(db.objects(protocol='B', groups='train')), 224) nose.tools.eq_(len(db.objects(protocol='B', groups='dev')), 432) nose.tools.eq_(len(db.objects(protocol='B', groups='dev', purposes='enroll')), 216) nose.tools.eq_(len(db.objects(protocol='B', groups='dev', purposes='probe')), 216) nose.tools.eq_(len(db.objects(protocol='B', groups='dev', purposes='attack')), 216) nose.tools.eq_(len(db.objects(protocol='Full', groups='train')), 0) nose.tools.eq_(len(db.objects(protocol='Full', groups='dev')), 880) nose.tools.eq_(len(db.objects(protocol='Full', groups='dev', purposes='enroll')), 440) nose.tools.eq_(len(db.objects(protocol='Full', groups='dev', purposes='probe')), 440) nose.tools.eq_(len(db.objects(protocol='Full', groups='dev', purposes='attack')), 440) nose.tools.eq_(len(db.objects(protocol='Cropped-Nom', groups='train')), 0) nose.tools.eq_(len(db.objects(protocol='Cropped-Nom', groups='dev')), 660) nose.tools.eq_(len(db.objects(protocol='Cropped-Nom', groups='dev', purposes='enroll')), 220) nose.tools.eq_(len(db.objects(protocol='Cropped-Nom', groups='dev', purposes='probe')), 220) nose.tools.eq_(len(db.objects(protocol='Cropped-Nom', groups='dev', purposes='attack')), 220) nose.tools.eq_(len(db.objects(protocol='Cropped-Fifty', groups='train')), 240) nose.tools.eq_(len(db.objects(protocol='Cropped-Fifty', groups='dev')), 300) nose.tools.eq_(len(db.objects(protocol='Cropped-Fifty', groups='dev', purposes='enroll')), 100) nose.tools.eq_(len(db.objects(protocol='Cropped-Fifty', groups='dev', purposes='probe')), 100) nose.tools.eq_(len(db.objects(protocol='Cropped-Fifty', groups='dev', purposes='attack')), 100) nose.tools.eq_(len(db.objects(protocol='Cropped-B', groups='train')), 224) nose.tools.eq_(len(db.objects(protocol='Cropped-B', groups='dev')), 432) nose.tools.eq_(len(db.objects(protocol='Cropped-B', groups='dev', purposes='enroll')), 216) nose.tools.eq_(len(db.objects(protocol='Cropped-B', groups='dev', purposes='probe')), 216) nose.tools.eq_(len(db.objects(protocol='Cropped-B', groups='dev', purposes='attack')), 216) nose.tools.eq_(len(db.objects(protocol='Cropped-Full', groups='train')), 0) nose.tools.eq_(len(db.objects(protocol='Cropped-Full', groups='dev')), 880) nose.tools.eq_(len(db.objects(protocol='Cropped-Full', groups='dev', purposes='enroll')), 440) nose.tools.eq_(len(db.objects(protocol='Cropped-Full', groups='dev', purposes='probe')), 440) nose.tools.eq_(len(db.objects(protocol='Cropped-Full', groups='dev', purposes='attack')), 440) # make sure that we can filter by model ids nose.tools.eq_(len(db.objects(protocol='Full', groups='dev', purposes='enroll', model_ids=model_ids[:10])), 10) # filtering by model ids on probes, returns all nose.tools.eq_(len(db.objects(protocol='Full', groups='dev', purposes='probe', model_ids=model_ids[0])), 440) # filtering by model ids on attacks, returns all with matching finger nose.tools.eq_(len(db.objects(protocol='Full', groups='dev', purposes='attack', model_ids=model_ids[0])), 2) nose.tools.eq_(len(db.objects(protocol='Cropped-Full', groups='dev', purposes='enroll', model_ids=model_ids[:10])), 10) # filtering by model ids on probes, returns all nose.tools.eq_(len(db.objects(protocol='Cropped-Full', groups='dev', purposes='probe', model_ids=model_ids[0])), 440) # filtering by model ids on attacks, returns all with matching finger nose.tools.eq_(len(db.objects(protocol='Cropped-Full', groups='dev', purposes='attack', model_ids=model_ids[0])), 2) @sql3_available @db_available(VERAFINGER_PATH) def test_driver_api(): from bob.db.base.script.dbmanage import main nose.tools.eq_(main('verafinger dumplist --self-test'.split()), 0) nose.tools.eq_(main('verafinger dumplist --protocol=Full --group=dev --purpose=enroll --model=101_L_1 --self-test'.split()), 0) nose.tools.eq_(main('verafinger dumplist --protocol=Cropped-Full --group=dev --purpose=attack --model=101_L_1 --self-test'.split()), 0) nose.tools.eq_(main('verafinger dumplist --protocol=Full --group=dev --purpose=attack --model=101_L_1 --self-test'.split()), 0) nose.tools.eq_(main('verafinger dumplist --protocol=Cropped-Full --group=dev --purpose=enroll --model=101_L_1 --self-test'.split()), 0) nose.tools.eq_(main('verafinger checkfiles --self-test'.split()), 0) @sql3_available @db_available(VERAFINGER_PATH) def test_load(): db = Database() for f in db.objects(): # loads an image from the database image = f.load(VERAFINGER_PATH) assert isinstance(image, numpy.ndarray) nose.tools.eq_(len(image.shape), 2) #it is a 2D array nose.tools.eq_(image.dtype, numpy.uint8) roi = f.roi(VERAFINGER_PATH) assert isinstance(roi, numpy.ndarray) nose.tools.eq_(len(roi.shape), 2) #it is a 2D array nose.tools.eq_(roi.shape[1], 2) #two columns nose.tools.eq_(roi.dtype, numpy.uint16) if f.size == 'full': assert len(roi) > 10 #at least 10 points else: assert len(roi) == 4 # ensures all annotation points are within image boundary Y,X = image.shape for y,x in roi: assert y < Y, 'Annotation (%d, %d) for %s surpasses the image size (%d, %d)' % (y, x, f.path, Y, X) assert x < X, 'Annotation (%d, %d) for %s surpasses the image size (%d, %d)' % (y, x, f.path, Y, X) @sql3_available def test_model_id_to_finger_name_conversion(): db = Database() for f in db.objects(): assert len(db.finger_name_from_model_id(f.model_id)) == 5 @sql3_available @db_available(VERAFINGER_PATH) def test_load_pad(): db = PADDatabase() for f in db.objects(): # loads an image from the database image = f.load(VERAFINGER_PATH) assert isinstance(image, numpy.ndarray) nose.tools.eq_(len(image.shape), 2) #it is a 2D array nose.tools.eq_(image.dtype, numpy.uint8) roi = f.roi(VERAFINGER_PATH) assert isinstance(roi, numpy.ndarray) nose.tools.eq_(len(roi.shape), 2) #it is a 2D array nose.tools.eq_(roi.shape[1], 2) #two columns nose.tools.eq_(roi.dtype, numpy.uint16) if f.size == 'full': assert len(roi) > 10 #at least 10 points else: assert len(roi) == 4 # ensures all annotation points are within image boundary Y,X = image.shape for y,x in roi: assert y < Y, 'Annotation (%d, %d) for %s surpasses the image size (%d, %d)' % (y, x, f.path, Y, X) assert x < X, 'Annotation (%d, %d) for %s surpasses the image size (%d, %d)' % (y, x, f.path, Y, X) @sql3_available @db_available(VERAFINGER_PATH) def test_driver_api_pad(): from bob.db.base.script.dbmanage import main nose.tools.eq_(main('verafinger dumppadlist --self-test'.split()), 0) nose.tools.eq_(main('verafinger dumppadlist --protocol=full --group=dev --self-test'.split()), 0) nose.tools.eq_(main('verafinger dumppadlist --protocol=cropped --group=eval --self-test'.split()), 0) @sql3_available def test_counts_pad(): # test whether the correct number of clients is returned db = PADDatabase() nose.tools.eq_(db.groups(), ('train', 'dev', 'eval')) protocols = db.protocol_names() nose.tools.eq_(len(protocols), 2) assert 'full' in protocols assert 'cropped' in protocols nose.tools.eq_(db.genders(), ('M', 'F')) nose.tools.eq_(db.sides(), ('L', 'R')) def _fingers_in_group(protocol, group): '''Returns a unique list of clients/fingers in the group as a set''' files = db.objects(protocol=protocol, groups=group) return set([k.finger.client.id for k in files]), \ set([k.finger.unique_name for k in files]) def _check_proto(name): '''Runs a full check on a given protocol''' # test database sizes nose.tools.eq_(len(db.objects(protocol=name, groups='train')), 240) nose.tools.eq_(len(db.objects(protocol=name, groups='train', purposes='real')), 120) nose.tools.eq_(len(db.objects(protocol=name, groups='train', purposes='attack')), 120) nose.tools.eq_(len(db.objects(protocol=name, groups='dev')), 240) nose.tools.eq_(len(db.objects(protocol=name, groups='dev', purposes='real')), 120) nose.tools.eq_(len(db.objects(protocol=name, groups='dev', purposes='attack')), 120) nose.tools.eq_(len(db.objects(protocol=name, groups='eval')), 400) nose.tools.eq_(len(db.objects(protocol=name, groups='eval', purposes='real')), 200) nose.tools.eq_(len(db.objects(protocol=name, groups='eval', purposes='attack')), 200) train_clients, train_fingers = _fingers_in_group(name, 'train') dev_clients, dev_fingers = _fingers_in_group(name, 'dev') eval_clients, eval_fingers = _fingers_in_group(name, 'eval') # Test individual counts on clients and fingers nose.tools.eq_(len(train_clients), 30) nose.tools.eq_(len(train_fingers), 60) nose.tools.eq_(len(dev_clients), 30) nose.tools.eq_(len(dev_fingers), 60) nose.tools.eq_(len(eval_clients), 50) nose.tools.eq_(len(eval_fingers), 100) nose.tools.eq_(train_clients.intersection(dev_clients), set()) nose.tools.eq_(train_clients.intersection(eval_clients), set()) nose.tools.eq_(dev_clients.intersection(eval_clients), set()) nose.tools.eq_(train_fingers.intersection(dev_fingers), set()) nose.tools.eq_(train_fingers.intersection(eval_fingers), set()) nose.tools.eq_(dev_fingers.intersection(eval_clients), set()) _check_proto('full') _check_proto('cropped')
from typing import TYPE_CHECKING from sqlalchemy import BigInteger, Column, ForeignKey, Integer from .base import Base if TYPE_CHECKING: # pragma: no cover from .game import Game # noqa from .user import User # noqa class Play(Base): """Records of a users game plays.""" __tablename__ = "plays" user_xid = Column( BigInteger, ForeignKey("users.xid", ondelete="CASCADE"), primary_key=True, nullable=False, doc="The external Discord ID of the user who played a game", ) game_id = Column( Integer, ForeignKey("games.id", ondelete="CASCADE"), primary_key=True, nullable=False, index=True, doc="The SpellBot game ID of the game the user played", ) points = Column( Integer, nullable=True, doc="The number of points reported by the user", )
def area(largura, comprimeto): a = l * c print(f'O tamanho do terreno de {largura}x{comprimeto} é de {a} m²') print('-' * 20) print('Tamanho do terreno') l = float(input('Largura[m]: ')) c = float(input('Comprimento[m]: ')) area(l, c)
#python 3.5.2 import sys while True: line = sys.stdin.readline() if not line: break num = int(line.split()[0]) min_x = 1e+9 max_x = -1e+9 min_y = 1e+9 max_y = -1e+9 for i in range(num): line = sys.stdin.readline() a = list(map(int, line.split())) min_x = min(min_x, a[0]) max_x = max(max_x, a[0]) min_y = min(min_y, a[1]) max_y = max(max_y, a[1]) ans = max(max_x - min_x, max_y - min_y)**2 print(ans)
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest import math import random import paddle import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid import Program, program_guard def stable_softmax_comm(x): shiftx = (x - np.max(x)) deno = np.log(np.sum(np.exp(shiftx))) comm = shiftx - deno return comm def margin_cross_entropy(logits, label, axis, margin1, margin2, margin3, scale, reduction=None): one_hot_label = np.zeros_like(logits, dtype=logits.dtype) for i, lb in enumerate(label): one_hot_label[i, lb] = 1.0 # add arcface margin to logit theta = np.arccos(logits) if margin1 != 1.0: theta = margin1 * theta if margin2 != 0.0: theta = theta + margin2 margin_cos = np.cos(theta) if margin3 != 0.0: margin_cos = margin_cos - margin3 diff = one_hot_label * (margin_cos - logits) arc_logits = (logits + diff) * scale comm = np.apply_along_axis(stable_softmax_comm, axis, arc_logits) loss = (-one_hot_label * comm).sum(axis=axis, keepdims=True) softmax = np.exp(comm) if reduction == 'mean': loss = np.mean(loss) elif reduction == 'sum': loss = np.sum(loss) return loss, softmax @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOp(OpTest): def initParams(self): self.op_type = "margin_cross_entropy" self.axis = -1 self.batch_dim = 5 self.feat_dim = 41 self.num_class = 37 def init_loss_params(self): self.margin1 = 1.0 self.margin2 = 0.5 self.margin3 = 0.0 self.scale = 2.0 def init_dtype(self): self.dtype = np.float64 def setUp(self): self.initParams() self.init_loss_params() self.init_dtype() datas = np.random.uniform( -0.99, 0.99, [self.batch_dim, self.feat_dim]).astype(self.dtype) datas = datas / np.sqrt(np.sum(np.square(datas), axis=1, keepdims=True)) weights = np.random.uniform( -0.99, 0.99, [self.feat_dim, self.num_class]).astype(self.dtype) weights = weights / np.sqrt( np.sum(np.square(weights), axis=0, keepdims=True)) logits = np.matmul(datas, weights) labels = np.random.randint(0, self.num_class, (self.batch_dim, ), dtype="int64") loss, softmax = margin_cross_entropy(logits, labels, self.axis, self.margin1, self.margin2, self.margin3, self.scale) self.inputs = {"Logits": logits, "Label": labels} self.outputs = { "Softmax": softmax.astype(self.dtype), "Loss": loss.astype(self.dtype) } self.attrs = { 'margin1': self.margin1, 'margin2': self.margin2, 'margin3': self.margin3, 'scale': self.scale, } def test_check_output(self): self.check_output_with_place(core.CUDAPlace(0), atol=1e-5) def test_check_grad(self): self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], "Loss") @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOpFP32(TestMarginCrossEntropyOp): def init_dtype(self): self.dtype = np.float32 def test_check_grad(self): self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], "Loss", numeric_grad_delta=5e-2, max_relative_error=5e-2) @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp): def init_dtype(self): self.dtype = np.float16 def test_check_output(self): self.check_output_with_place(core.CUDAPlace(0), atol=5e-2) def test_check_grad(self): self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], "Loss", numeric_grad_delta=6e-1, max_relative_error=6e-1) @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOpCosFace(TestMarginCrossEntropyOp): def init_loss_params(self): self.margin1 = 1.0 self.margin2 = 0.0 self.margin3 = 0.35 self.scale = 2.0 @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOpSphereFace(TestMarginCrossEntropyOp): def init_loss_params(self): self.margin1 = 1.35 self.margin2 = 0.0 self.margin3 = 0.0 self.scale = 2.0 class TestMarginCrossEntropyOpCPU(TestMarginCrossEntropyOp): def test_check_output(self): try: self.check_output_with_place(core.CPUPlace(), atol=1e-5) except RuntimeError: pass def test_check_grad(self): try: self.check_grad_with_place(core.CPUPlace(), ["Logits"], "Loss") except RuntimeError: pass @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOpV2(unittest.TestCase): def setUp(self): self.initParams() np.random.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) self.places = [] if core.is_compiled_with_cuda(): self.places.append(paddle.fluid.CUDAPlace(0)) def initParams(self): self.seed = 2021 self.axis = -1 self.batch_dim = 5 self.feat_dim = 41 self.num_class = 37 self.init_loss_params() self.init_dtype() self.init_reduction() def init_loss_params(self): self.margin1 = 1.0 self.margin2 = 0.5 self.margin3 = 0.0 self.scale = 2.0 def init_dtype(self): self.dtype = np.float64 def init_reduction(self): self.reduction = None def test_static(self): for place in self.places: self.check_static_result(place=place) def check_static_result(self, place): with program_guard(Program(), Program()): datas = np.random.uniform( -0.99, 0.99, [self.batch_dim, self.feat_dim]).astype(self.dtype) datas = datas / np.sqrt( np.sum(np.square(datas), axis=1, keepdims=True)) weights = np.random.uniform( -0.99, 0.99, [self.feat_dim, self.num_class]).astype(self.dtype) weights = weights / np.sqrt( np.sum(np.square(weights), axis=0, keepdims=True)) logits_np = np.matmul(datas, weights) labels_np = np.random.randint(0, self.num_class, (self.batch_dim, ), dtype="int64") loss_np, softmax_np = margin_cross_entropy(logits_np, labels_np, self.axis, self.margin1, self.margin2, self.margin3, self.scale, self.reduction) logits = paddle.static.data(name='logits', shape=[self.batch_dim, self.num_class], dtype=self.dtype) label = paddle.static.data(name='label', shape=[self.batch_dim], dtype="int64") loss, softmax = paddle.nn.functional.margin_cross_entropy( logits, label, margin1=self.margin1, margin2=self.margin2, margin3=self.margin3, scale=self.scale, return_softmax=True, reduction=self.reduction) exe = paddle.fluid.Executor(place) [loss_res, softmax_res] = exe.run(paddle.fluid.default_main_program(), feed={ 'logits': logits_np, 'label': labels_np }, fetch_list=[loss, softmax]) np.testing.assert_allclose(loss_res, loss_np) np.testing.assert_allclose(softmax_res, softmax_np) def test_dynamic(self): for place in self.places: self.check_dynamic_result(place=place) def check_dynamic_result(self, place): with paddle.fluid.dygraph.guard(place): datas = np.random.uniform( -0.99, 0.99, [self.batch_dim, self.feat_dim]).astype(self.dtype) datas = datas / np.sqrt( np.sum(np.square(datas), axis=1, keepdims=True)) weights = np.random.uniform( -0.99, 0.99, [self.feat_dim, self.num_class]).astype(self.dtype) weights = weights / np.sqrt( np.sum(np.square(weights), axis=0, keepdims=True)) logits_np = np.matmul(datas, weights) labels_np = np.random.randint(0, self.num_class, (self.batch_dim, ), dtype="int64") loss_np, softmax_np = margin_cross_entropy(logits_np, labels_np, self.axis, self.margin1, self.margin2, self.margin3, self.scale, self.reduction) logits = paddle.to_tensor(logits_np, dtype=self.dtype) labels = paddle.to_tensor(labels_np, dtype="int64") loss, softmax = paddle.nn.functional.margin_cross_entropy( logits, labels, margin1=self.margin1, margin2=self.margin2, margin3=self.margin3, scale=self.scale, return_softmax=True, reduction=self.reduction) loss_res = loss.numpy() softmax_res = softmax.numpy() np.testing.assert_allclose(loss_res, loss_np) np.testing.assert_allclose(softmax_res, softmax_np) @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOpV3(TestMarginCrossEntropyOpV2): def init_reduction(self): self.reduction = 'mean' @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOpV4(TestMarginCrossEntropyOpV2): def init_reduction(self): self.reduction = 'sum' @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestMarginCrossEntropyOpAPIError(unittest.TestCase): def setUp(self): self.initParams() np.random.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) self.places = [] if core.is_compiled_with_cuda(): self.places.append(paddle.fluid.CUDAPlace(0)) def initParams(self): self.seed = 2021 self.axis = -1 self.batch_dim = 10 self.feat_dim = 41 self.num_class = 37 self.init_loss_params() self.init_dtype() def init_loss_params(self): self.margin1 = 1.0 self.margin2 = 0.5 self.margin3 = 0.0 self.scale = 2.0 def init_dtype(self): self.dtype = np.float64 def test_dynamic_errors(self): def test_dim(): for place in self.places: with paddle.fluid.dygraph.guard(place): labels_np = np.random.randint(0, self.num_class, (self.batch_dim, 2), dtype="int64") logits_np = np.random.uniform( -0.99, 0.99, [self.batch_dim, self.num_class]).astype(self.dtype) labels = paddle.to_tensor(labels_np) logits = paddle.to_tensor(logits_np) loss, softmax = paddle.nn.functional.margin_cross_entropy( logits, labels, margin1=self.margin1, margin2=self.margin2, margin3=self.margin3, scale=self.scale, return_softmax=True, reduction=None) def test_label_type(): for place in self.places: with paddle.fluid.dygraph.guard(place): labels_np = np.random.uniform(0, self.num_class, (self.batch_dim, 1)).astype( self.dtype) logits_np = np.random.uniform( -0.99, 0.99, [self.batch_dim, self.num_class]).astype(self.dtype) labels = paddle.to_tensor(labels_np) logits = paddle.to_tensor(logits_np) loss, softmax = paddle.nn.functional.margin_cross_entropy( logits, labels, margin1=self.margin1, margin2=self.margin2, margin3=self.margin3, scale=self.scale, return_softmax=True, reduction=None) def test_group_value(): for place in self.places: with paddle.fluid.dygraph.guard(place): labels_np = np.random.randint(0, self.num_class, (self.batch_dim, ), dtype="int64") logits_np = np.random.uniform( -0.99, 0.99, [self.batch_dim, self.num_class]).astype(self.dtype) labels = paddle.to_tensor(labels_np) logits = paddle.to_tensor(logits_np) loss, softmax = paddle.nn.functional.margin_cross_entropy( logits, labels, margin1=self.margin1, margin2=self.margin2, margin3=self.margin3, scale=self.scale, return_softmax=True, reduction=None, group=True) self.assertRaises(ValueError, test_dim) self.assertRaises(NotImplementedError, test_label_type) self.assertRaises(ValueError, test_group_value) if __name__ == '__main__': unittest.main()
def igcd(a, b): if a > b: s = b if b > a: s = a for i in range(1, s + 1): if a % i == 0 and b % i == 0: gcd = i return gcd class Gcd: def __init__(self, a, b): self.a = a self.b = b def __call__(self): if self.a > self.b: s = self.b if self.b > self.a: s = self.a for i in range(1, s+1): if self.a % i == 0 and self.b % i == 0: gcd = i return gcd def tester(): num = input("Imperative [i] or OOP [o]") try: if num == 'i': print("The gcd of 60 and 48 is : ",end="") print(igcd(60,48)) elif num == 'o': f = Gcd(60,48) print("The gcd of 60 and 48 is : ",end="") print(f()) except: print("Sorry, something went wrong") if __name__ == "__main__": tester()
from selfusepy.log import Logger def log_test(): log = Logger().logger log.info('sixth')
# -*-coding:utf-8-*- import time import logging import logging.config from logging.handlers import TimedRotatingFileHandler import os from uuid import uuid1 from flask import request, g from flask_login import current_user from apps.configs.sys_config import LOG_PATH, WEBLOG_NORMAL_FILENAME, WEBLOG_EXCEP_FILENAME, LOG_FORMATTER, \ WEBLOG_EXCEP_LEVEL, WEBLOG_NORMAL_LEVEL, WEBLOG_START_FILENAME __author__ = 'Allen Woo' class WebLogger: def __init__(self): self.set_logger = Logger().set_logger def init_app(self, app): filename = os.path.abspath( "{}/{}".format(LOG_PATH, WEBLOG_NORMAL_FILENAME)) normal_log, handler_normal = self.set_logger( WEBLOG_NORMAL_LEVEL, filename, 'web_normal', LOG_FORMATTER) filename = os.path.abspath( "{}/{}".format(LOG_PATH, WEBLOG_EXCEP_FILENAME)) error_log, handler_error = self.set_logger( WEBLOG_EXCEP_LEVEL, filename, 'web_error', LOG_FORMATTER) @app.before_request def before_request_log(): """ DEFORE REQUEST :return: """ global _weblog_g _weblog_g = {"log": {}} st = time.time() # "{}{}".format(st, randint(1, 1000000)) _weblog_g["log"]['request_id'] = uuid1() g.weblog_id = _weblog_g["log"]['request_id'] _weblog_g["log"]['st'] = st _weblog_g["log"]['ip'] = request.remote_addr _weblog_g["log"]['url'] = request.url if current_user.is_authenticated: _weblog_g["log"]['user_id'] = current_user.str_id @app.teardown_request def teardown_request_log(exception): """ Teardown request :param exception: :return: """ try: _weblog_g["log"]["method"] = request.c_method _weblog_g["log"]['u_t_m'] = "{} ms".format( (time.time() - _weblog_g["log"]['st']) * 1000) normal_log.info("[api|view] {}".format(_weblog_g["log"])) if exception: error_log.error(_weblog_g["log"]) error_log.exception(exception) except Exception as e: _weblogger_error = {"type": "weblogger error", "exceptione": e} error_log.error(_weblogger_error) def start_log(self): """ :return: logger obj """ filename = os.path.abspath( "{}/{}".format(LOG_PATH, WEBLOG_START_FILENAME)) sys_start_log, handler_start = self.set_logger( set_level=logging.INFO, logfile=filename, get_log_name='sys_start', formatter=LOG_FORMATTER) return sys_start_log class Logger: def set_logger( self, set_level=logging.INFO, logfile="{}.log".format( time.time()), get_log_name='logger', formatter='%(asctime)s %(levelname)s %(message)s'): if not os.path.exists(os.path.split(logfile)[0]): os.makedirs(os.path.split(logfile)[0]) # 每天保存一个日志, 最多保存7个 file_handler = TimedRotatingFileHandler(logfile, "midnight", 1, 7) file_handler.suffix = "%Y-%m-%d" # According to the size # file_handler = RotatingFileHandler(filename, maxBytes=10*1024*1024, backupCount=3) file_handler.setLevel(set_level) _formatter = logging.Formatter(formatter) file_handler.setFormatter(_formatter) logging.getLogger('{}'.format(get_log_name)).addHandler(file_handler) logging.getLogger('{}'.format(get_log_name)).setLevel(logging.INFO) logg = logging.getLogger(get_log_name) return logg, file_handler web_start_log = WebLogger().start_log()
# Copyright (c) 2019 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. from typing import Callable, Dict, List, Optional, TYPE_CHECKING from PyQt5.QtCore import pyqtSlot, pyqtProperty, pyqtSignal, QObject, QTimer from UM.i18n import i18nCatalog from UM.Logger import Logger from UM.Util import parseBool from UM.OutputDevice.OutputDeviceManager import ManualDeviceAdditionAttempt if TYPE_CHECKING: from UM.OutputDevice.OutputDevicePlugin import OutputDevicePlugin from cura.CuraApplication import CuraApplication from cura.PrinterOutput.NetworkedPrinterOutputDevice import NetworkedPrinterOutputDevice catalog = i18nCatalog("cura") class DiscoveredPrinter(QObject): def __init__(self, ip_address: str, key: str, name: str, create_callback: Callable[[str], None], machine_type: str, device: "NetworkedPrinterOutputDevice", parent: Optional["QObject"] = None) -> None: super().__init__(parent) self._ip_address = ip_address self._key = key self._name = name self.create_callback = create_callback self._machine_type = machine_type self._device = device nameChanged = pyqtSignal() def getKey(self) -> str: return self._key @pyqtProperty(str, notify = nameChanged) def name(self) -> str: return self._name def setName(self, name: str) -> None: if self._name != name: self._name = name self.nameChanged.emit() @pyqtProperty(str, constant = True) def address(self) -> str: return self._ip_address machineTypeChanged = pyqtSignal() @pyqtProperty(str, notify = machineTypeChanged) def machineType(self) -> str: return self._machine_type def setMachineType(self, machine_type: str) -> None: if self._machine_type != machine_type: self._machine_type = machine_type self.machineTypeChanged.emit() # Checks if the given machine type name in the available machine list. # The machine type is a code name such as "ultimaker_3", while the machine type name is the human-readable name of # the machine type, which is "Ultimaker 3" for "ultimaker_3". def _hasHumanReadableMachineTypeName(self, machine_type_name: str) -> bool: from cura.CuraApplication import CuraApplication results = CuraApplication.getInstance().getContainerRegistry().findDefinitionContainersMetadata(name = machine_type_name) return len(results) > 0 # Human readable machine type string @pyqtProperty(str, notify = machineTypeChanged) def readableMachineType(self) -> str: from cura.CuraApplication import CuraApplication machine_manager = CuraApplication.getInstance().getMachineManager() # In NetworkOutputDevice, when it updates a printer information, it updates the machine type using the field # "machine_variant", and for some reason, it's not the machine type ID/codename/... but a human-readable string # like "Ultimaker 3". The code below handles this case. if self._hasHumanReadableMachineTypeName(self._machine_type): readable_type = self._machine_type else: readable_type = self._getMachineTypeNameFromId(self._machine_type) if not readable_type: readable_type = catalog.i18nc("@label", "Unknown") return readable_type @pyqtProperty(bool, notify = machineTypeChanged) def isUnknownMachineType(self) -> bool: if self._hasHumanReadableMachineTypeName(self._machine_type): readable_type = self._machine_type else: readable_type = self._getMachineTypeNameFromId(self._machine_type) return not readable_type def _getMachineTypeNameFromId(self, machine_type_id: str) -> str: machine_type_name = "" from cura.CuraApplication import CuraApplication results = CuraApplication.getInstance().getContainerRegistry().findDefinitionContainersMetadata(id = machine_type_id) if results: machine_type_name = results[0]["name"] return machine_type_name @pyqtProperty(QObject, constant = True) def device(self) -> "NetworkedPrinterOutputDevice": return self._device @pyqtProperty(bool, constant = True) def isHostOfGroup(self) -> bool: return getattr(self._device, "clusterSize", 1) > 0 @pyqtProperty(str, constant = True) def sectionName(self) -> str: if self.isUnknownMachineType or not self.isHostOfGroup: return catalog.i18nc("@label", "The printer(s) below cannot be connected because they are part of a group") else: return catalog.i18nc("@label", "Available networked printers") # # Discovered printers are all the printers that were found on the network, which provide a more convenient way # to add networked printers (Plugin finds a bunch of printers, user can select one from the list, plugin can then # add that printer to Cura as the active one). # class DiscoveredPrintersModel(QObject): def __init__(self, application: "CuraApplication", parent: Optional["QObject"] = None) -> None: super().__init__(parent) self._application = application self._discovered_printer_by_ip_dict = dict() # type: Dict[str, DiscoveredPrinter] self._plugin_for_manual_device = None # type: Optional[OutputDevicePlugin] self._manual_device_address = "" self._manual_device_request_timeout_in_seconds = 5 # timeout for adding a manual device in seconds self._manual_device_request_timer = QTimer() self._manual_device_request_timer.setInterval(self._manual_device_request_timeout_in_seconds * 1000) self._manual_device_request_timer.setSingleShot(True) self._manual_device_request_timer.timeout.connect(self._onManualRequestTimeout) discoveredPrintersChanged = pyqtSignal() @pyqtSlot(str) def checkManualDevice(self, address: str) -> None: if self.hasManualDeviceRequestInProgress: Logger.log("i", "A manual device request for address [%s] is still in progress, do nothing", self._manual_device_address) return priority_order = [ ManualDeviceAdditionAttempt.PRIORITY, ManualDeviceAdditionAttempt.POSSIBLE, ] # type: List[ManualDeviceAdditionAttempt] all_plugins_dict = self._application.getOutputDeviceManager().getAllOutputDevicePlugins() can_add_manual_plugins = [item for item in filter( lambda plugin_item: plugin_item.canAddManualDevice(address) in priority_order, all_plugins_dict.values())] if not can_add_manual_plugins: Logger.log("d", "Could not find a plugin to accept adding %s manually via address.", address) return plugin = max(can_add_manual_plugins, key = lambda p: priority_order.index(p.canAddManualDevice(address))) self._plugin_for_manual_device = plugin self._plugin_for_manual_device.addManualDevice(address, callback = self._onManualDeviceRequestFinished) self._manual_device_address = address self._manual_device_request_timer.start() self.hasManualDeviceRequestInProgressChanged.emit() @pyqtSlot() def cancelCurrentManualDeviceRequest(self) -> None: self._manual_device_request_timer.stop() if self._manual_device_address: if self._plugin_for_manual_device is not None: self._plugin_for_manual_device.removeManualDevice(self._manual_device_address, address = self._manual_device_address) self._manual_device_address = "" self._plugin_for_manual_device = None self.hasManualDeviceRequestInProgressChanged.emit() self.manualDeviceRequestFinished.emit(False) def _onManualRequestTimeout(self) -> None: Logger.log("w", "Manual printer [%s] request timed out. Cancel the current request.", self._manual_device_address) self.cancelCurrentManualDeviceRequest() hasManualDeviceRequestInProgressChanged = pyqtSignal() @pyqtProperty(bool, notify = hasManualDeviceRequestInProgressChanged) def hasManualDeviceRequestInProgress(self) -> bool: return self._manual_device_address != "" manualDeviceRequestFinished = pyqtSignal(bool, arguments = ["success"]) def _onManualDeviceRequestFinished(self, success: bool, address: str) -> None: self._manual_device_request_timer.stop() if address == self._manual_device_address: self._manual_device_address = "" self.hasManualDeviceRequestInProgressChanged.emit() self.manualDeviceRequestFinished.emit(success) @pyqtProperty("QVariantMap", notify = discoveredPrintersChanged) def discoveredPrintersByAddress(self) -> Dict[str, DiscoveredPrinter]: return self._discovered_printer_by_ip_dict @pyqtProperty("QVariantList", notify = discoveredPrintersChanged) def discoveredPrinters(self) -> List["DiscoveredPrinter"]: item_list = list( x for x in self._discovered_printer_by_ip_dict.values() if not parseBool(x.device.getProperty("temporary"))) # Split the printers into 2 lists and sort them ascending based on names. available_list = [] not_available_list = [] for item in item_list: if item.isUnknownMachineType or getattr(item.device, "clusterSize", 1) < 1: not_available_list.append(item) else: available_list.append(item) available_list.sort(key = lambda x: x.device.name) not_available_list.sort(key = lambda x: x.device.name) return available_list + not_available_list def addDiscoveredPrinter(self, ip_address: str, key: str, name: str, create_callback: Callable[[str], None], machine_type: str, device: "NetworkedPrinterOutputDevice") -> None: if ip_address in self._discovered_printer_by_ip_dict: Logger.log("e", "Printer with ip [%s] has already been added", ip_address) return discovered_printer = DiscoveredPrinter(ip_address, key, name, create_callback, machine_type, device, parent = self) self._discovered_printer_by_ip_dict[ip_address] = discovered_printer self.discoveredPrintersChanged.emit() def updateDiscoveredPrinter(self, ip_address: str, name: Optional[str] = None, machine_type: Optional[str] = None) -> None: if ip_address not in self._discovered_printer_by_ip_dict: Logger.log("w", "Printer with ip [%s] is not known", ip_address) return item = self._discovered_printer_by_ip_dict[ip_address] if name is not None: item.setName(name) if machine_type is not None: item.setMachineType(machine_type) def removeDiscoveredPrinter(self, ip_address: str) -> None: if ip_address not in self._discovered_printer_by_ip_dict: Logger.log("w", "Key [%s] does not exist in the discovered printers list.", ip_address) return del self._discovered_printer_by_ip_dict[ip_address] self.discoveredPrintersChanged.emit() # A convenience function for QML to create a machine (GlobalStack) out of the given discovered printer. # This function invokes the given discovered printer's "create_callback" to do this. @pyqtSlot("QVariant") def createMachineFromDiscoveredPrinter(self, discovered_printer: "DiscoveredPrinter") -> None: discovered_printer.create_callback(discovered_printer.getKey())
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # MIT License # # Copyright (c) 2019 Tsutomu Furuse # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation import thermal_zone NUM_BUF_POINTS = 180 PLOT_INTERVAL = 1000 Data = None def plot(i, zone_paths, zone_names): global Data zone_temps = thermal_zone.get_thermal_zone_temps(zone_paths) print(zone_temps) zone_temps = [t / 1000.0 for t in zone_temps] Data = np.append(Data, np.array([zone_temps]), axis = 0) if i >= NUM_BUF_POINTS: Data = np.delete(Data, 0, axis = 0) plt.cla() plt.plot(Data, marker = 'x') plt.xlim(0, NUM_BUF_POINTS) plt.ylim(0.0, 90.0) plt.title('Jetson Thermal Monitor', fontsize = 14) plt.xlabel('Sample', fontsize = 10) plt.ylabel('Temperature [C]', fontsize = 10) plt.tick_params(labelsize=10) plt.grid(True) plt.legend(labels = zone_names, loc = 'upper left', fontsize = 10) def main(): ''' Plots real-time temperatures from the Jetson on-module thermal sensors ''' global Data zone_paths = thermal_zone.get_thermal_zone_paths() zone_names = thermal_zone.get_thermal_zone_names(zone_paths) print(zone_names) Data = np.empty((0, len(zone_names)), float) fig = plt.figure(figsize=(10, 4)) ani = animation.FuncAnimation(fig, plot, \ fargs = (zone_paths, zone_names), interval = PLOT_INTERVAL) plt.show() if __name__ == "__main__": main()
# -*- coding: utf-8 -*- """ Created on Sat Jan 1 2021 @author: growolff Ecuaciones de los limites de los Dedos """ import numpy as np # Largo de cuerda que hay que tirar para cerrar el dedo completo # depende de los angulos maximos de flexion de las falanges # y del radio donde se soporta la cuerda # para el extensor es el radio del arco de las articulaciones r*theta # para el flector es el punto donde se afirma la cuerda en la falange movil def lArticulacion(th,r1,r2): rad = th*np.pi/180 return round(np.sqrt(r1**2 + (r2*np.cos(np.pi/2-rad))**2) - r2*np.sin(np.pi/2-rad),1) # angulos maximos de flexion de falanges 1:mcp, 2:pip, 3:dip it = [90, 90, 70] # radios para calcular el largo maximo del extensor ir = [7.5, 6.5, 4.5] # radios para calcular el largo maximo del flector irMcp = [9.3, 7.75] irPip = [6.75, 6.75] irDip = [5.75, 5.75] mt = [90, 90, 70] mr = [7.5, 6.5, 4.5] mrMcp = [9.3, 7.75] mrPip = [6.75, 6.75] mrDip = [5.75, 5.75] pt = [70, 65, 0] pr = [7.5, 6.5, 0] prMcp = [7.75, 9.5] prDip = [6.75, 7] deg2rad = np.pi/180 iAext = [] iAflec= [] mAext = [] mAflec = [] pAext = [] pAflec = [] for i in range(len(it)): iAext.append(round(it[i]*ir[i],1)) mAext.append(mt[i]*mr[i]) pAext.append(pt[i]*pr[i]) print(round(iAext[i]*deg2rad,1)) #print(iAext[i]*deg2rad) iAmcpflec = lArticulacion(it[0],irMcp[0],irMcp[1]) iApipflec = lArticulacion(it[1],irPip[0],irPip[1]) iAdipflec = lArticulacion(it[2],irDip[0],irDip[1]) iLtotalFlec = iAmcpflec + iApipflec + iAdipflec print(iAmcpflec , iApipflec , iAdipflec,iLtotalFlec) mAmcpflec = lArticulacion(mt[0],mrMcp[0],mrMcp[1]) mApipflec = lArticulacion(mt[1],mrPip[0],mrPip[1]) mAdipflec = lArticulacion(mt[2],mrDip[0],mrDip[1]) mLtotalFlec = mAmcpflec + mApipflec + mAdipflec print(mAmcpflec,mApipflec,mAdipflec,mLtotalFlec) pAmcpflec = lArticulacion(pt[0],prMcp[0],prMcp[1]) pAdipflec = lArticulacion(pt[1],prDip[0],prDip[1]) pLtotalFlec = pAmcpflec + pAdipflec print(pAmcpflec,pAdipflec ,pLtotalFlec) #print(iLtotalFlec, mLtotalFlec, pLtotalFlec) iLtotalExt = (iAext[0]+iAext[1]+iAext[2])*deg2rad mLtotalExt = (mAext[0]+mAext[1]+mAext[2])*deg2rad pLtotalExt = (pAext[0]+pAext[1]+pAext[2])*deg2rad print(iLtotalExt, mLtotalExt, pLtotalExt)
#coding=utf-8 import numpy as np import functools import tensorflow as tf import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten import matplotlib.pyplot as plt plt.switch_backend('agg') class LossHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.losses = {'batch': [], 'epoch': []} self.accuracy = {'batch': [], 'epoch': []} self.top2_acc = {'batch': [], 'epoch': []} self.val_loss = {'batch': [], 'epoch': []} self.val_acc = {'batch': [], 'epoch': []} self.val_top2_acc = {'batch': [], 'epoch': []} def on_batch_end(self, batch, logs={}): self.losses['batch'].append(logs.get('loss')) self.accuracy['batch'].append(logs.get('acc')) self.top2_acc['batch'].append(logs.get('top2_acc')) self.val_loss['batch'].append(logs.get('val_loss')) self.val_acc['batch'].append(logs.get('val_acc')) self.val_top2_acc['batch'].append(logs.get('val_top2_acc')) def on_epoch_end(self, batch, logs={}): self.losses['epoch'].append(logs.get('loss')) self.accuracy['epoch'].append(logs.get('acc')) self.top2_acc['epoch'].append(logs.get('top2_acc')) self.val_loss['epoch'].append(logs.get('val_loss')) self.val_acc['epoch'].append(logs.get('val_acc')) self.val_top2_acc['epoch'].append(logs.get('val_top2_acc')) def loss_plot(self, loss_type , model_type): iters = range(len(self.losses[loss_type])) plt.figure() # acc plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc') # loss plt.plot(iters, self.losses[loss_type], 'g', label='train loss') # top2_acc plt.plot(iters, self.top2_acc[loss_type], 'c', label='train top2 acc') if loss_type == 'epoch': # val_acc plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc') # val_loss plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss') # val_top2_acc plt.plot(iters, self.val_top2_acc[loss_type], 'm', label='val top2 acc') plt.grid(True) plt.xlabel(loss_type) plt.ylabel('acc-loss') plt.legend(loc="center right") plt.show() plt.savefig('/home/user045/fws/JQXX/Fashion_MNIST_Classification/result/'+model_type+'/Loss_Acc_Curve.png') fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train_images.reshape([-1,28,28]) / 255.0 test_images = test_images.reshape([-1,28,28]) / 255.0 # Preprocessing plt.figure(figsize=(10, 10)) for i in range(25): plt.subplot(5, 5, i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(class_names[train_labels[i]]) # Model model = keras.Sequential([ #(-1,28,28)->(-1,100) keras.layers.SimpleRNN( # for batch_input_shape, if using tensorflow as the backend, we have to put None for the batch_size. # Otherwise, model.evaluate() will get error. input_shape=(28, 28), # Or: input_dim=INPUT_SIZE, input_length=TIME_STEPS, units=512, unroll=True), keras.layers.Dropout(rate=0.5), #(-1,256)->(-1,10) keras.layers.Dense(256, activation=tf.nn.relu), keras.layers.Dropout(rate=0.5), keras.layers.Dense(128, activation=tf.nn.relu), keras.layers.Dropout(rate=0.5), keras.layers.Dense(10, activation=tf.nn.softmax) ]) # compile the model model.summary() #top2 top2_acc = functools.partial(keras.metrics.sparse_top_k_categorical_accuracy, k=2) top2_acc.__name__ = 'top2_acc' model.compile(optimizer=tf.train.AdamOptimizer(0.0001),loss='sparse_categorical_crossentropy',metrics=['accuracy', top2_acc]) # train the model history = LossHistory() model.fit(train_images,train_labels, epochs = 100,validation_data=[test_images[:1000],test_labels[:1000]],callbacks=[history]) test_score = model.evaluate(test_images, test_labels) # evaluate the model print('top1_Test_accuracy:', test_score[1], 'top2_Test_accuracy:', test_score[2]) predictions = model.predict(test_images) def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100 * np.max(predictions_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array[i], true_label[i] plt.grid(False) plt.xticks([]) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions, test_labels, test_images) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions, test_labels) plt.show() plt.savefig('/home/user045/fws/JQXX/Fashion_MNIST_Classification/result/RNN/Prediction.png') history.loss_plot('epoch','RNN')
#!/usr/bin/env python3 """Collates data files from prefix_url on s3 matching regex. Uploads collated output to a programatically-generated s3 url. Usage: ./collate.py [prefix_url] [regex] """ import boto3 from functools import reduce from io import StringIO from iterdub import iterdub as ib import itertools from keyname import keyname as kn import math import multiprocessing as mp import os import pandas as pd import re import sys import tempfile from tqdm.contrib.concurrent import process_map ################################################################################ print( ) print( 'running collate.py' ) print( '------------------' ) ################################################################################ try: prefix_url, regex = sys.argv[1:] except: print(__doc__) sys.exit(1) bucket = re.search('s3://(.+?)/', prefix_url).group(1) prefix = re.search(f's3://{bucket}/(.+)', prefix_url).group(1) print(f'prefix_url {prefix_url}') print(f'regex {regex}') print(f'bucket {bucket}') print(f'prefix {prefix}') assert prefix.count('stage=') == 1 and prefix.count('what=') == 1 assert any( 'stage' in kn.unpack(segment) and 'what' in kn.unpack(segment) for segment in prefix.split('/') ) stages, = [ list(kn.unpack(segment)['stage'].split('~')) for segment in prefix.split('/') if 'stage' in kn.unpack(segment) and 'what' in kn.unpack(segment) ] print(f'stages {stages}') prefixes = [ '/'.join( kn.pack({ **kn.unpack(segment), **{ 'stage' : stage, }, }) if 'stage' in kn.unpack(segment) and 'what' in kn.unpack(segment) else segment for segment in prefix.split('/') ) for stage in stages ] print(f'prefixes {prefixes}') ################################################################################ print( ) print( 'grepping for source files' ) print( '-------------------------' ) ################################################################################ client = boto3.client('s3', region_name='us-west-2',) pattern = re.compile('.*' + regex + '$') matches = [ key['Key'] for prefix in prefixes for page in client.get_paginator('list_objects').paginate( Bucket=bucket, Prefix=prefix, ) for key in page['Contents'] if pattern.match(key['Key']) ] print(f'{len(matches)} matching source files') print(f'for example, s3://{bucket}/{matches[0]}') if not matches: print('no matches detected') exit(1) ################################################################################ print( ) print( 'computing output path' ) print( '---------------- ----' ) ################################################################################ out_prefix = '/'.join( kn.pack({ **kn.unpack(segment), **{ 'stage' : max( map(int, stages), ) + 1, 'what' : 'collated', }, }) if 'stage' in kn.unpack(segment) and 'what' in kn.unpack(segment) else segment for segment in prefix.split('/') ) common_keys = set.intersection(*[ set( kn.unpack(match).keys() ) for match in matches ]) out_filename = kn.pack({ **{ key : ib.dub( kn.unpack(match)[key] for match in matches ) for key in common_keys }, **{ 'ext' : '.csv.xz' }, }) out_path = (out_prefix + '/' + out_filename).replace('//', '/') print(f'upload path will be s3://{bucket}/{out_path}') ################################################################################ print( ) print( 'downloading and concatenating' ) print( '-----------------------------' ) ################################################################################ def getter(match): res = pd.read_csv(f's3://{bucket}/{match}') num_nans = res.isna().sum().sum() num_cells = math.prod(map(len, res.axes)) if num_nans * 2 > num_cells: print(f'WARNING: {match} is more than half nans') # print(match, len(res.index)) return res df = pd.concat(process_map( getter, matches, chunksize=10, max_workers=mp.cpu_count(), )) print(f'concatenated dataframe has {len(df.index)} rows') print(f'concatenated dataframe has {len(df.columns)} columns') ################################################################################ print( ) print( 'deleting stale collated data, if any' ) print( '------------------------------------' ) ################################################################################ pattern = re.compile( '.*/' # anything leading up to final / '[^\/]*' # anything EXCEPT / f'a={kn.unpack(out_path)["a"]}\+' # title of collation output '[^\/]*' # anything EXCEPT / '$' # end of line ) matches = [ key['Key'] for page in client.get_paginator('list_objects').paginate( Bucket=bucket, Prefix=out_prefix, ) for key in page.get('Contents', []) if pattern.match(key['Key']) ] for match in matches: print(f'deleting stale collated data, {match}') client.delete_object( Bucket=bucket, Key=match, ) ################################################################################ print( ) print( 'dumping and uploading' ) print( '---------------------' ) ################################################################################ # have to work with filename or pandas compression doesn't work with tempfile.TemporaryDirectory() as tmp: temp_path = os.path.join(tmp, 'data.xz') print(f'temp path is {temp_path}') df.to_csv( temp_path, index=False, compression='xz', ) with open(temp_path, 'rb') as f: client.upload_fileobj( f, bucket, out_path, ) ################################################################################ print( ) print( 'consolidation complete' ) print( '----------------------' ) ################################################################################
from django.contrib.auth.models import User from rest_framework.serializers import RelatedField from .models import ( Obra ) class UserComentarioRelatedFields (RelatedField): def to_representation (self, value): return { 'id': value.id, 'first_name': value.first_name, 'last_name': value.last_name } class UserLoginRelatedFields (RelatedField): def to_representation (self, value): return { 'id': value.id, 'username': value.username, 'email': value.email, 'password': value.password, 'first_name': value.first_name, 'last_name': value.last_name, 'is_active': value.is_active, 'last_login': value.last_login, 'date_joined': value.date_joined } class ObraRelatedFields (RelatedField): pass
import pandas as pd import numpy as np import os from sklearn.metrics import roc_auc_score, accuracy_score from sklearn import metrics from scipy.stats import rankdata import math import argparse def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--enspath", type=str, default="./data", help="Path to folder with all csvs") parser.add_argument("--enstype", type=str, default="loop", help="Type of ensembling to be performed - Current options: loop / sa") parser.add_argument("--exp", type=str, default="experiment", help="Name of experiment for csv's") parser.add_argument('--subdata', action='store_const', default=False, const=True) # Parse the arguments. args = parser.parse_args() return args ### FUNCTIONS IMPLEMENTING ENSEMBLE METHODS ### ### HELPERS ### # Optimizing accuracy based on ROC AUC # Source: https://albertusk95.github.io/posts/2019/12/best-threshold-maximize-accuracy-from-roc-pr-curve/ # ACC = (TP + TN)/(TP + TN + FP + FN) = (TP + TN) / P + N (= Correct ones / all) # Senstivity / tpr = TP / P # Specificity / tnr = TN / N def get_acc_and_best_threshold_from_roc_curve(tpr, fpr, thresholds, num_pos_class, num_neg_class): tp = tpr * num_pos_class tn = (1 - fpr) * num_neg_class acc = (tp + tn) / (num_pos_class + num_neg_class) best_threshold = thresholds[np.argmax(acc)] return np.amax(acc), best_threshold def set_acc(row, threshold): if row['proba'] >= threshold: val = 1 else: val = 0 return val ### AVERAGES ### def simple_average(targets, example, weights=None, power=1, normalize=False): """ targets: df with target values as columns example: output df example (e.g. including ID - make sure to adjust iloc below if target is not at 1) weights: per submission weights; default is equal weighting power: optional for power averaging normalize: Whether to normalize targets btw 0 & 1 """ if weights is None: weights = len(targets.columns) * [1.0 / len(targets.columns)] else: weights = weights / np.sum(weights) preds = example.copy() preds.iloc[:, 1] = np.zeros(len(preds)) if normalize: targets = (targets - targets.min()) / (targets.max() - targets.min()) for i in range(len(targets.columns)): preds.iloc[:, 1] = np.add(preds.iloc[:, 1], weights[i] * (targets.iloc[:, i].astype(float) ** power)) return preds def rank_average(subs, weights=None): """ subs: list of submission dataframes with two columns (id, value) weights: per submission weights; default is equal weighting """ if weights is None: weights = len(subs) * [1.0 / len(subs)] else: weights = weights / np.sum(weights) preds = subs[0].copy() preds.iloc[:, 1] = np.zeros(len(subs[0])) for i, sub in enumerate(subs): preds.iloc[:, 1] = np.add(preds.iloc[:, 1], weights[i] * rankdata(sub.iloc[:, 1]) / len(sub)) return preds ### SIMPLEX ### ### Similar to scipy optimize # Taken & adapted from: # https://github.com/chrisstroemel/Simple from heapq import heappush, heappop, heappushpop import numpy import math import time import matplotlib.pyplot as plotter CAPACITY_INCREMENT = 1000 class _Simplex: def __init__(self, pointIndices, testCoords, contentFractions, objectiveScore, opportunityCost, contentFraction, difference): self.pointIndices = pointIndices self.testCoords = testCoords self.contentFractions = contentFractions self.contentFraction = contentFraction self.__objectiveScore = objectiveScore self.__opportunityCost = opportunityCost self.update(difference) def update(self, difference): self.acquisitionValue = -(self.__objectiveScore + (self.__opportunityCost * difference)) self.difference = difference def __eq__(self, other): return self.acquisitionValue == other.acquisitionValue def __lt__(self, other): return self.acquisitionValue < other.acquisitionValue class SimpleTuner: def __init__(self, cornerPoints, objectiveFunction, exploration_preference=0.15): self.__cornerPoints = cornerPoints self.__numberOfVertices = len(cornerPoints) self.queue = [] self.capacity = self.__numberOfVertices + CAPACITY_INCREMENT self.testPoints = numpy.empty((self.capacity, self.__numberOfVertices)) self.objective = objectiveFunction self.iterations = 0 self.maxValue = None self.minValue = None self.bestCoords = [] self.opportunityCostFactor = exploration_preference # / self.__numberOfVertices def optimize(self, maxSteps=10): for step in range(maxSteps): # print(self.maxValue, self.iterations, self.bestCoords) if len(self.queue) > 0: targetSimplex = self.__getNextSimplex() newPointIndex = self.__testCoords(targetSimplex.testCoords) for i in range(0, self.__numberOfVertices): tempIndex = targetSimplex.pointIndices[i] targetSimplex.pointIndices[i] = newPointIndex newContentFraction = targetSimplex.contentFraction * targetSimplex.contentFractions[i] newSimplex = self.__makeSimplex(targetSimplex.pointIndices, newContentFraction) heappush(self.queue, newSimplex) targetSimplex.pointIndices[i] = tempIndex else: testPoint = self.__cornerPoints[self.iterations] testPoint.append(0) testPoint = numpy.array(testPoint, dtype=numpy.float64) self.__testCoords(testPoint) if self.iterations == (self.__numberOfVertices - 1): initialSimplex = self.__makeSimplex(numpy.arange(self.__numberOfVertices, dtype=numpy.intp), 1) heappush(self.queue, initialSimplex) self.iterations += 1 def get_best(self): return (self.maxValue, self.bestCoords[0:-1]) def __getNextSimplex(self): targetSimplex = heappop(self.queue) currentDifference = self.maxValue - self.minValue while currentDifference > targetSimplex.difference: targetSimplex.update(currentDifference) # if greater than because heapq is in ascending order if targetSimplex.acquisitionValue > self.queue[0].acquisitionValue: targetSimplex = heappushpop(self.queue, targetSimplex) return targetSimplex def __testCoords(self, testCoords): objectiveValue = self.objective(testCoords[0:-1]) if self.maxValue is None or objectiveValue > self.maxValue: self.maxValue = objectiveValue self.bestCoords = testCoords if self.minValue is None: self.minValue = objectiveValue elif objectiveValue < self.minValue: self.minValue = objectiveValue testCoords[-1] = objectiveValue if self.capacity == self.iterations: self.capacity += CAPACITY_INCREMENT self.testPoints.resize((self.capacity, self.__numberOfVertices)) newPointIndex = self.iterations self.testPoints[newPointIndex] = testCoords return newPointIndex def __makeSimplex(self, pointIndices, contentFraction): vertexMatrix = self.testPoints[pointIndices] coordMatrix = vertexMatrix[:, 0:-1] barycenterLocation = numpy.sum(vertexMatrix, axis=0) / self.__numberOfVertices differences = coordMatrix - barycenterLocation[0:-1] distances = numpy.sqrt(numpy.sum(differences * differences, axis=1)) totalDistance = numpy.sum(distances) barycentricTestCoords = distances / totalDistance euclideanTestCoords = vertexMatrix.T.dot(barycentricTestCoords) vertexValues = vertexMatrix[:, -1] testpointDifferences = coordMatrix - euclideanTestCoords[0:-1] testPointDistances = numpy.sqrt(numpy.sum(testpointDifferences * testpointDifferences, axis=1)) inverseDistances = 1 / testPointDistances inverseSum = numpy.sum(inverseDistances) interpolatedValue = inverseDistances.dot(vertexValues) / inverseSum currentDifference = self.maxValue - self.minValue opportunityCost = self.opportunityCostFactor * math.log(contentFraction, self.__numberOfVertices) return _Simplex(pointIndices.copy(), euclideanTestCoords, barycentricTestCoords, interpolatedValue, opportunityCost, contentFraction, currentDifference) def plot(self): if self.__numberOfVertices != 3: raise RuntimeError('Plotting only supported in 2D') matrix = self.testPoints[0:self.iterations, :] x = matrix[:, 0].flat y = matrix[:, 1].flat z = matrix[:, 2].flat coords = [] acquisitions = [] for triangle in self.queue: coords.append(triangle.pointIndices) acquisitions.append(-1 * triangle.acquisitionValue) plotter.figure() plotter.tricontourf(x, y, coords, z) plotter.triplot(x, y, coords, color='white', lw=0.5) plotter.colorbar() plotter.figure() plotter.tripcolor(x, y, coords, acquisitions) plotter.triplot(x, y, coords, color='white', lw=0.5) plotter.colorbar() plotter.show() def Simplex(devs, label, df_list=False, exploration=0.01, scale=1): """ devs: list of dataframes with "proba" column label: list/np array of ground truths scale: By default we will get weights in the 0-1 range. Setting e.g. scale=50, gives weights in the 0-50 range. """ predictions = [] if df_list: for df in devs: predictions.append(df.proba) print(len(predictions[0])) else: for i, column in enumerate(devs): predictions.append(devs.iloc[:, i]) print(len(predictions[0])) print("Optimizing {} inputs.".format(len(predictions))) def roc_auc(weights): ''' Will pass the weights as a numpy array ''' final_prediction = 0 for weight, prediction in zip(weights, predictions): final_prediction += weight * prediction return roc_auc_score(label, final_prediction) # This defines the search area, and other optimization parameters. # For e.g. 11 models, we have 12 corner points -- e.g. all none, only model 1, all others none, only model 2 all others none.. # We concat an identity matrix & a zero array to create those zero_vtx = np.zeros((1, len(predictions)), dtype=int) optimization_domain_vertices = np.identity(len(predictions), dtype=int) * scale optimization_domain_vertices = np.concatenate((zero_vtx, optimization_domain_vertices), axis=0).tolist() number_of_iterations = 3000 exploration = exploration # optional, default 0.01 # Optimize weights tuner = SimpleTuner(optimization_domain_vertices, roc_auc, exploration_preference=exploration) tuner.optimize(number_of_iterations) best_objective_value, best_weights = tuner.get_best() print('Optimized =', best_objective_value) # same as roc_auc(best_weights) print('Weights =', best_weights) return best_weights ### APPLYING THE HELPER FUNCTIONS ### def sa_wrapper(data_path="./data"): """ Applies simple average. data_path: path to folder with X * (dev_seen, test_seen & test_unseen) .csv files """ # Make sure the lists will be ordered, i.e. test[0] is the same model as devs[0] train, dev, test, test_unseen = [], [], [], [] train_probas, dev_probas, test_probas, test_unseen_probas = {}, {}, {}, {} # Never dynamically add to a pd Dataframe for csv in sorted(os.listdir(data_path)): if ".csv" in csv: print("Included in Simple Average: ", csv) if "train" in csv: train.append(pd.read_csv(data_path + csv)) train_probas[csv[:-4]] = pd.read_csv(data_path + csv).proba.values elif ("dev" in csv) or ("val" in csv): dev.append(pd.read_csv(data_path + csv)) dev_probas[csv[:-8]] = pd.read_csv(data_path + csv).proba.values elif "test_unseen" in csv: test_unseen.append(pd.read_csv(data_path + csv)) test_unseen_probas[csv[:-14]] = pd.read_csv(data_path + csv).proba.values elif "test" in csv: test.append(pd.read_csv(data_path + csv)) test_probas[csv[:-7]] = pd.read_csv(data_path + csv).proba.values train_probas = pd.DataFrame(train_probas) dev_probas = pd.DataFrame(dev_probas) test_probas = pd.DataFrame(test_probas) test_unseen_probas = pd.DataFrame(test_unseen_probas) train_SA = simple_average(train_probas, train[0]) dev_SA = simple_average(dev_probas, dev[0]) test_SA = simple_average(test_probas, test[0]) test_unseen_SA = simple_average(test_unseen_probas, test_unseen[0]) # Create output dir os.makedirs(os.path.join(data_path, args.exp), exist_ok=True) for csv in sorted(os.listdir(data_path)): if ".csv" in csv: if "train" in csv: os.remove(os.path.join(data_path, csv)) train_SA.to_csv(os.path.join(data_path, args.exp, args.exp + "_train_SA.csv"), index=False) elif ("dev" in csv) or ("val" in csv): os.remove(os.path.join(data_path, csv)) dev_SA.to_csv(os.path.join(data_path, args.exp, args.exp + "_dev_seen_SA.csv"), index=False) elif "test_unseen" in csv: os.remove(os.path.join(data_path, csv)) test_unseen_SA.to_csv(os.path.join(data_path, args.exp, args.exp + "_test_unseen_SA.csv"), index=False) elif "test" in csv: os.remove(os.path.join(data_path, csv)) test_SA.to_csv(os.path.join(data_path, args.exp, args.exp + "_test_seen_SA.csv"), index=False) def main(path, gt_path="./data/"): """ Loops through Averaging, Power Averaging, Rank Averaging, Optimization to find the best ensemble. path: String to directory with csvs of all models For each model there should be three csvs: dev, test, test_unseen gt_path: Path to folder with ground truth for dev """ # Ground truth dev_df = pd.read_json(os.path.join(gt_path, 'dev_seen.jsonl'), lines=True) # Make sure the lists will be ordered, i.e. test[0] is the same model as devs[0] train, dev, test, test_unseen = [], [], [], [] train_probas, dev_probas, test_probas, test_unseen_probas = {}, {}, {}, {} # Never dynamically add to a pd Dataframe for csv in sorted(os.listdir(path)): print(csv) if ".csv" in csv: if "train" in csv: train.append(pd.read_csv(os.path.join(path, csv))) train_probas[csv[:-4]] = pd.read_csv(os.path.join(path, csv)).proba.values if ("dev" in csv) or ("val" in csv): dev.append(pd.read_csv(os.path.join(path, csv))) dev_probas[csv[:-8]] = pd.read_csv(os.path.join(path, csv)).proba.values elif "test_unseen" in csv: test_unseen.append(pd.read_csv(os.path.join(path, csv))) test_unseen_probas[csv[:-14]] = pd.read_csv(os.path.join(path, csv)).proba.values elif "test" in csv: test.append(pd.read_csv(os.path.join(path, csv))) test_probas[csv[:-7]] = pd.read_csv(os.path.join(path, csv)).proba.values train_probas = pd.DataFrame(train_probas) dev_probas = pd.DataFrame(dev_probas) test_probas = pd.DataFrame(test_probas) test_unseen_probas = pd.DataFrame(test_unseen_probas) train_or = train.copy() dev_or = dev.copy() test_or = test.copy() test_unseen_or = test_unseen.copy() if len(dev_df) > len(dev_probas): print("Your predictions do not include the full dev!") dev_df = dev[0][["id"]].merge(dev_df, how="left", on="id") loop, last_score, delta = 0, 0, 0.1 while (delta > 0.0001): # Individual Roc Aucs print("Individual RCs:\n") print("dev") for i, column in enumerate(dev_probas): score = roc_auc_score(dev_df.label, dev_probas.iloc[:, i]) print(column, score) print('-' * 50) if loop > 0: while len(dev) > 5: lowest_score = 1 drop = 0 for i, column in enumerate(dev_probas): score = roc_auc_score(dev_df.label, dev_probas.iloc[:, i]) if score < lowest_score: lowest_score = score col = column drop = i column_numbers = [x for x in range(train_probas.shape[1])] # list of columns' integer indices column_numbers.remove(drop) train_probas = train_probas.iloc[:, column_numbers] column_numbers = [x for x in range(dev_probas.shape[1])] # list of columns' integer indices column_numbers.remove(drop) dev_probas = dev_probas.iloc[:, column_numbers] column_numbers = [x for x in range(test_probas.shape[1])] # list of columns' integer indices column_numbers.remove(drop) test_probas = test_probas.iloc[:, column_numbers] column_numbers = [x for x in range(test_unseen_probas.shape[1])] # list of columns' integer indices column_numbers.remove(drop) test_unseen_probas = test_unseen_probas.iloc[:, column_numbers] if i < len(dev_or): train_or.pop(drop) dev_or.pop(drop) test_or.pop(drop) test_unseen_or.pop(drop) if i < len(dev): train.pop(drop) dev.pop(drop) test.pop(drop) test_unseen.pop(drop) print("Dropped:", col) # Spearman Correlations: print("Spearman Corrs:") train_corr = train_probas.corr(method='spearman') dev_corr = dev_probas.corr(method='spearman') test_corr = test_probas.corr(method='spearman') test_unseen_corr = test_unseen_probas.corr(method='spearman') print(train_corr, '\n') print(dev_corr, '\n') print(test_corr) print(test_unseen_corr) print('-' * 50) ### SIMPLE AVERAGE ### train_SA = simple_average(train_probas, train[0], power=1, normalize=True) dev_SA = simple_average(dev_probas, dev[0], power=1, normalize=True) test_SA = simple_average(test_probas, test[0], power=1, normalize=True) test_unseen_SA = simple_average(test_unseen_probas, test_unseen[0], power=1, normalize=True) print(roc_auc_score(dev_df.label, dev_SA.proba), accuracy_score(dev_df.label, dev_SA.label)) print('-' * 50) ### POWER AVERAGE ### train_PA = simple_average(train_probas, train[0], power=2, normalize=True) dev_PA = simple_average(dev_probas, dev[0], power=2, normalize=True) test_PA = simple_average(test_probas, test[0], power=2, normalize=True) test_unseen_PA = simple_average(test_unseen_probas, test_unseen[0], power=2, normalize=True) print(roc_auc_score(dev_df.label, dev_PA.proba), accuracy_score(dev_df.label, dev_PA.label)) print('-' * 50) ### RANK AVERAGE ### train_RA = rank_average(train) dev_RA = rank_average(dev) test_RA = rank_average(test) test_unseen_RA = rank_average(test_unseen) print(roc_auc_score(dev_df.label, dev_RA.proba), accuracy_score(dev_df.label, dev_RA.label)) print('-' * 50) ### SIMPLEX ### weights_dev = Simplex(dev_probas, dev_df.label) train_SX = simple_average(train_probas, train[0], weights_dev) dev_SX = simple_average(dev_probas, dev[0], weights_dev) test_SX = simple_average(test_probas, test[0], weights_dev) test_unseen_SX = simple_average(test_unseen_probas, test_unseen[0], weights_dev) print(roc_auc_score(dev_df.label, dev_SX.proba), accuracy_score(dev_df.label, dev_SX.label)) print('-' * 50) # Prepare Next Round train = train_or + [train_SA, train_PA, train_RA, train_SX] dev = dev_or + [dev_SA, dev_PA, dev_RA, dev_SX] test = test_or + [test_SA, test_PA, test_RA, test_SX] test_unseen = test_unseen_or + [test_unseen_SA, test_unseen_PA, test_unseen_RA, test_unseen_SX] train_probas = pd.concat([df.proba for df in train], axis=1) dev_probas = pd.concat([df.proba for df in dev], axis=1) test_probas = pd.concat([df.proba for df in test], axis=1) test_unseen_probas = pd.concat([df.proba for df in test_unseen], axis=1) # Calculate Delta & increment loop delta = abs(roc_auc_score(dev_df.label, dev_SX.proba) - last_score) last_score = roc_auc_score(dev_df.label, dev_SX.proba) loop += 1 # I found the loop to not add any value after 2 rounds. if loop == 2: break print("Currently at {} after {} loops.".format(last_score, loop)) # Get accuracy thresholds & optimize (This does not add value to the roc auc, but just to also have an acc score) fpr, tpr, thresholds = metrics.roc_curve(dev_df.label, dev_SX.proba) acc, threshold = get_acc_and_best_threshold_from_roc_curve(tpr, fpr, thresholds, 250, 250) test_SX.label = test_SX.apply(set_acc, axis=1, args=[threshold]) test_unseen_SX.label = test_unseen_SX.apply(set_acc, axis=1, args=[threshold]) os.makedirs(os.path.join(path, args.exp), exist_ok=True) # Set path instd of /k/w ; Remove all csv data / load the exact same 3 files again as put out # As Simplex at some point simply weighs the highest of all - lets take sx as the final prediction after x loops train_SX.to_csv(os.path.join(path, args.exp, "FIN_train_" + args.exp + "_" + str(loop)) + ".csv", index=False) dev_SX.to_csv(os.path.join(path, args.exp, "FIN_dev_seen_" + args.exp + "_" + str(loop) + ".csv"), index=False) test_SX.to_csv(os.path.join(path, args.exp, "FIN_test_seen_" + args.exp + "_" + str(loop) + ".csv"), index=False) test_unseen_SX.to_csv(os.path.join(path, args.exp, "FIN_test_unseen_" + args.exp + "_" + str(loop) + ".csv"), index=False) print("Finished.") if __name__ == "__main__": args = parse_args() if args.enstype == "loop": main(args.enspath) elif args.enstype == "sa": sa_wrapper(args.enspath) else: print(args.enstype, " is not yet enabled. Feel free to add the code :)")
from django.test import TestCase from robber import expect from data.factories import AreaFactory, OfficerFactory, PoliceUnitFactory from search_terms.term_builders import ( AreaTermBuilder, PoliceDistrictsTermBuilder, CommunitiesTermBuilder, NeighborhoodsTermBuilder, PoliceBeatTermBuilder, SchoolGroundsTermBuilder, WardsTermBuilder, get_term_builders, OfficerRankTermBuilder, PoliceUnitTermBuilder ) class AreaTermBuilderTestCase(TestCase): def test_build_terms(self): class MyAreaTermBuilder(AreaTermBuilder): slug = 'my_area_type' query_key = 'my_query_key' AreaFactory(area_type='my_area_type', name='1st') AreaFactory(area_type='my_area_type', name='2nd') expect(MyAreaTermBuilder.build_terms()).to.eq([ { 'name': '1st', 'link': 'http://cpdb.lvh.me/url-mediator/session-builder?my_query_key=1st' }, { 'name': '2nd', 'link': 'http://cpdb.lvh.me/url-mediator/session-builder?my_query_key=2nd' } ]) class GetTermBuildersTestCase(TestCase): def test_call(self): expect(get_term_builders('police-districts')).to.eq(PoliceDistrictsTermBuilder) expect(get_term_builders('community')).to.eq(CommunitiesTermBuilder) expect(get_term_builders('neighborhoods')).to.eq(NeighborhoodsTermBuilder) expect(get_term_builders('police-beats')).to.eq(PoliceBeatTermBuilder) expect(get_term_builders('school-grounds')).to.eq(SchoolGroundsTermBuilder) expect(get_term_builders('wards')).to.eq(WardsTermBuilder) class OfficerRankTermBuilderTestCase(TestCase): def test_build_terms(self): OfficerFactory(rank='my-custom-rank') expect(OfficerRankTermBuilder.build_terms()).to.eq([{ 'name': 'my-custom-rank', 'link': 'http://cpdb.lvh.me/url-mediator/session-builder?officer__rank=my-custom-rank' }]) class PoliceUnitTermBuilderTestCase(TestCase): def test_build_terms(self): PoliceUnitFactory(unit_name='001', description='my unit description') expect(PoliceUnitTermBuilder.build_terms()).to.eq([{ 'name': 'my unit description', 'link': 'http://cpdb.lvh.me/url-mediator/session-builder?officer__unit=001' }])
# -*- coding:utf-8 -*- from __future__ import print_function from __future__ import division import tensorflow as tf from common import IMAGE_HEIGHT, IMAGE_SIZE, IMAGE_WIDTH, CAPTCHA_LEN, CHAR_SET_LEN, NUM_LABELS def weight_variable(shape): initial = tf.random_normal(shape, stddev=0.01) return tf.Variable(initial) def bias_variable(shape): initial = tf.random_normal(shape, stddev=0.1) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def load_model_nn(alpha=1e-3): # `cnn` up to now with tf.Graph().as_default() as graph: # Define the PlaceHolder x = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE]) y = tf.placeholder(tf.float32, shape=[None, NUM_LABELS]) keep_prob = tf.placeholder(tf.float32) x_image = tf.reshape(x, shape=[-1, IMAGE_WIDTH, IMAGE_HEIGHT, 1]) # First Convolutional Layer, input@(100, 40), output@(50, 20) conv_layer1_weight = weight_variable([5, 5, 1, 32]) conv_layer1_bias = bias_variable([32]) pool_layer1 = max_pool( tf.nn.relu( conv2d(x_image, conv_layer1_weight) + conv_layer1_bias ) ) # Second Convolutional Layer, input@(50, 20), output@(25, 10) conv_layer2_weight = weight_variable([5, 5, 32, 64]) conv_layer2_bias = bias_variable([64]) pool_layer2 = max_pool( tf.nn.relu( conv2d(pool_layer1, conv_layer2_weight) + conv_layer2_bias ) ) # Third Convolutional Layer, input@(25, 10), output@(13, 5) conv_layer3_weight = weight_variable([5, 5, 64, 64]) conv_layer3_bias = bias_variable([64]) pool_layer3 = max_pool( tf.nn.relu( conv2d(pool_layer2, conv_layer3_weight) + conv_layer3_bias ) ) # Fully Connected Layer fc_layer_weight = weight_variable([13 * 5 * 64, 1024]) fc_layer_bias = bias_variable([1024]) pool_layer3_flat = tf.reshape(pool_layer3, [-1, 13 * 5 * 64]) fc_layer = tf.nn.relu(tf.add(tf.matmul(pool_layer3_flat, fc_layer_weight), fc_layer_bias)) # Dropout fc_layer_drop = tf.nn.dropout(fc_layer, keep_prob) # Readout Layer output_layer_weight = weight_variable([1024, NUM_LABELS]) output_layer_bias = bias_variable([NUM_LABELS]) y_conv = tf.add(tf.matmul(fc_layer_drop, output_layer_weight), output_layer_bias) loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=y_conv) ) optimizer = tf.train.AdamOptimizer(alpha).minimize(loss) prediction = tf.argmax(tf.reshape(y_conv, [-1, CAPTCHA_LEN, CHAR_SET_LEN]), 2) correct = tf.argmax(tf.reshape(y, [-1, CAPTCHA_LEN, CHAR_SET_LEN]), 2) correct_prediction = tf.equal(prediction, correct) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(max_to_keep=2) model = {'x': x, 'y': y, 'optimizer': optimizer, 'loss': loss, 'keep_prob': keep_prob, 'accuracy': accuracy, 'prediction': prediction, 'saver': saver, 'graph': graph } return model
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys, os sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定 import numpy as np from alifebook_lib.simulators import AntSimulator from ant_nn_utils import generate_nn_model, generate_action, decode_weights, CONTEXT_NEURON_NUM agent_num = [] agent_nn_model_list = [] agent_nn_context_val_list = [] for i in range(1, len(sys.argv), 2): gene = np.load(sys.argv[i]) num = int(sys.argv[i+1]) agent_num.append(num) for j in range(num): nn_model = generate_nn_model() decode_weights(nn_model, gene) context_val = np.zeros(CONTEXT_NEURON_NUM) agent_nn_model_list.append(nn_model) agent_nn_context_val_list.append(context_val) N = np.sum(agent_num) action = np.empty((N, 2)) # 各エージェントのアクションを収めるための (Nx2) の配列 simulator = AntSimulator(N, decay_rate=0.995, hormone_secretion=0.15) # エージェントの遺伝子ファイル毎に色をセットする idx = 0 if len(agent_num) > 1: for i, n in enumerate(agent_num): # xには0-1の間の等間隔の値が入る x = i / (len(agent_num) - 1) # xに応じてグラデーション色を生成 r = max(-2.0 * x + 1.0, 0.0) g = min(2.0 * x, -2.0 * x + 2.0) b = max(2.0 * x - 1.0, 0.0) color = (r, g, b) for j in range(n): simulator.set_agent_color(idx, color) idx += 1 while simulator: sensor_data = simulator.get_sensor_data() for i in range(N): a, c = generate_action(agent_nn_model_list[i], sensor_data[i], agent_nn_context_val_list[i]) action[i] = a agent_nn_context_val_list[i] = c simulator.update(action)
from typing import List from grpc import Channel from .execution_pb2_grpc import ExecutionServiceStub from .message import Execution, GetRequest, GetManyRequest __all__ = ["get_execution", "get_executions"] def get_execution(channel: Channel, execution_id: str, with_payload: bool = False) -> Execution: """ Get an execution """ stub = ExecutionServiceStub(channel) response = stub.Get( GetRequest(id=execution_id, with_payload=with_payload)) return response.workspace def get_executions(channel: Channel, execution_ids: List[str], with_payload: bool = False) -> List[Execution]: """ Get many executions """ stub = ExecutionServiceStub(channel) response = stub.GetMany( GetManyRequest(ids=execution_ids, with_payload=with_payload)) return response.workspaces
import os from django.core.files.storage import FileSystemStorage from BosvogelWebPlatform.settings import MEDIA_ROOT class OverwriteOnSameNameStorage(FileSystemStorage): media_root = MEDIA_ROOT # to let the unit test change media_root def get_available_name(self, name, max_length=None): if self.exists(name): os.remove(os.path.join(self.media_root, name)) return name
from flask import Flask, jsonify, render_template, request, abort, current_app, make_response from datetime import timedelta from functools import update_wrapper from flask_cors import CORS app = Flask(__name__, static_path='/static') CORS(app) import os import pickle import subprocess @app.route("/", methods=['GET']) def askApi(): return render_template("index.html") @app.route("/api/v2.0/<string:text>", methods=['GET']) def api(text): newText = text.lower() try: new = subprocess.check_output(["python3", "saver.py", text]) except IndexError: abort(404) return jsonify({'analyze': str(new)})
from jinja2 import Markup from db import export_sql def render(vis, request, info): info["message"] = [] # user parameters table = request.args.get("table", '') field = request.args.get("field", '') where = request.args.get("where", '1=1') reload = int(request.args.get("reload", 0)) view = request.args.get("view", '') limit = request.args.get("limit", '1000') start = request.args.get("start", '0') groupby = request.args.get("groupBy", '') if groupby and len(groupby) > 0: groupby = ' group by %s' % groupby orderBy = request.args.get("orderBy", '1') if orderBy and len(orderBy) > 0: orderBy = ' order by %s ' % orderBy pfield = request.args.get("pfield", []) # fields split into an array sfield = request.args.get("sfield", []) # field captions split into an array if len(table) == 0 or not field: info["message"].append("Table or field missing.") info["message_class"] = "failure" elif len(sfield) < 2: info["message"].append("Not enough fields.") info["message_class"] = "failure" else: if len(sfield) > 4: info["message"].append("Too many fields. Only first 4 are used.") sfield = sfield[:4] info["xlabel"] = pfield[0] info["ylabel"] = pfield[1] # if z,c are not provided sfield.extend(['1'] * (4 - len(sfield))) pfield.extend(['1'] * (4 - len(pfield))) info["field3"] = pfield[3 - 1] info["field4"] = pfield[4 - 1] sql = "select %s from %s where %s %s %s limit %s offset %s" \ % (','.join(sfield), table, where, groupby, orderBy, limit, start) header = "x,y,z,c" (datfile, reload, result) = export_sql(sql, vis.config, reload, header, view) if len(result) > 0: info["message"].append(result) info["message_class"] = "failure" else: info["message_class"] = "success" if reload > 0: info["message"].append("Loaded fresh.") else: info["message"].append("Loading from cache. Use reload=1 to reload.") info["datfile"] = datfile pfield = request.args.get("pfield", []) info["title"] = "FIELD_X: <em>%s</em>, <br />FIELD_Y: <em>%s</em>, <br />FIELD_Z(size): <em>%s</em>, <br />FIELD_C(color): <em>%s</em> from <br />TABLE: <em>%s</em>" \ % (pfield[0], pfield[1], pfield[2], pfield[3], table) info["title"] = Markup(info["title"]) info["message"] = Markup(''.join(['<p>%s</p>' % m for m in info["message"] if len(m) > 0]))
from simple_rest_client.resource import Resource class SystemCMDB(Resource): actions = { "dns": {"method": "GET", "url": "/system/dns{}"}, "new_dns": {"method": "PUT", "url": "/system/dns"}, "interfaces": {"method": "GET", "url": "/system/interface{}"}, "interface": {"method": "GET", "url": "/system/interface/{}"}, "virtual_wan_link": {"method": "GET", "url": "/system/virtual-wan-link{}"}, "new_virtual_wan_link": {"method": "PUT", "url": "/system/virtual-wan-link"} }
"""dtoolutils package.""" __version__ = "0.2.0"
import re from pytchat.processors.chat_processor import ChatProcessor superchat_regex = re.compile(r"^(\D*)(\d{1,3}(,\d{3})*(\.\d*)*\b)$") items_paid = [ 'addChatItemAction', 'item', 'liveChatPaidMessageRenderer' ] items_sticker = [ 'addChatItemAction', 'item', 'liveChatPaidStickerRenderer' ] class SuperchatCalculator(ChatProcessor): """ Calculate the amount of SuperChat by currency. """ def __init__(self): self.results = {"amount_sc":0} def process(self, chat_components: list): """ Return ------------ results : dict : List of amount by currency. key: currency symbol, value: total amount. """ if chat_components is None: return self.results for component in chat_components: chatdata = component.get('chatdata') if chatdata is None: continue for action in chatdata: renderer = self._get_item(action, items_paid) or \ self._get_item(action, items_sticker) if renderer is None: continue symbol, amount = self._parse(renderer) self.results.setdefault(symbol, 0) self.results[symbol] += amount self.results["amount_sc"] += 1 return self.results def _parse(self, renderer): purchase_amount_text = renderer["purchaseAmountText"]["simpleText"] m = superchat_regex.search(purchase_amount_text) if m: symbol = m.group(1).replace("\xa0","") amount = float(m.group(2).replace(',', '')) else: symbol = "" amount = 0.0 return symbol, amount def _get_item(self, dict_body, items: list): for item in items: if dict_body is None: break if isinstance(dict_body, dict): dict_body = dict_body.get(item) continue if isinstance(item, int) and \ isinstance(dict_body, list) and \ len(dict_body) > item: dict_body = dict_body[item] continue return None return dict_body
from ndex.networkn import NdexGraph import sys def test_types(): G = NdexGraph() n = G.add_new_node('Node with Types') n1 = G.add_new_node('A') n2 = G.add_new_node('B') G.add_edge_between(n, n1) G.add_edge_between(n, n2) G.set_name('Test Types') G.node[n]['string'] = 'mystring' G.node[n]['bool'] = True G.node[n]['int'] = 5 G.node[n]['double'] = 2.5 # Python3 doesn't support Long (you cannot have a = 5L); # If we need an integer being a long in Python 2 and still be compatible with Python 3, # we need to define up a long variable to be the same as the int class under Python 3, # and it can then be used explicitly to make sure the integer is a long. # # it is taken from http: //python3porting.com/noconv.html if sys.version_info.major == 3: long = int # long(5) will be 5L in Python 2 and just int 5 (which is long) in Python 3 G.node[n]['long'] = long(5) G.node[n]['string_list'] = ['mystring','myotherstring'] G.node[n]['bool_list'] = [False, True] G.node[n]['int_list'] = [5, -20] G.node[n]['double_list'] = [2.5, 3.7] # long(5), long(75) will be 5L, 75L in Python 2 and just int 5, 75 (which is long) in Python 3 G.node[n]['long_list'] = [long(5), long(75)] G.write_to('temp_test_type.cx') # G.upload_to('http://test.ndexbio.org', 'scratch', 'scratch') def test_metadata(): G = NdexGraph(server="http://dev.ndexbio.org", uuid="317332f7-ade8-11e6-913c-06832d634f41") print(G.metadata_original) if __name__ == "__main__": test_types() test_metadata()
from django.db import models class ModerationQuerySet(models.QuerySet): def public(self, user): if user.is_authenticated and user.is_staff: return self.all() else: q = models.Q(public=True) if user.is_authenticated: q |= models.Q(creator=user) return self.filter(q) class ModerationManager(models.Manager): def public(self, user): return self.get_queryset().public(user) def get_queryset(self): return ModerationQuerySet(self.model, using=self._db)
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Namify constants. This determines the identifier names of constants in the generated code. We try to have readable names where possible, and resort to hash codes only when it is really necessary. """ import hashlib import math import re from types import BuiltinFunctionType from nuitka.__past__ import ( # pylint: disable=I0021,redefined-builtin long, unicode, xrange, ) from nuitka.Builtins import builtin_anon_values, builtin_named_values_list from nuitka.Tracing import general class ExceptionCannotNamify(Exception): pass def namifyConstant(constant): # Many branches, statements and every case has a return, this is a huge case # statement, that encodes the naming policy of constants, with often complex # conditions, pylint: disable=too-many-branches,too-many-return-statements,too-many-statements if type(constant) is int: if constant == 0: result = "int_0" elif constant > 0: result = "int_pos_%d" % constant else: result = "int_neg_%d" % abs(constant) if len(result) > 32: result = _digest(result) return result elif type(constant) is long: if constant == 0: result = "long_0" elif constant > 0: result = "long_pos_%d" % constant else: result = "long_neg_%d" % abs(constant) if len(result) > 32: result = _digest(result) return result elif constant is None: return "none" elif constant is True: return "true" elif constant is False: return "false" elif constant is Ellipsis: return "ellipsis" elif type(constant) is str: return "str_" + _namifyString(constant) elif type(constant) is bytes: return "bytes_" + _namifyString(constant) elif type(constant) is unicode: if _isAscii(constant): return "unicode_" + _namifyString(str(constant)) else: # Others are better digested to not cause compiler trouble return "unicode_digest_" + _digest(repr(constant)) elif type(constant) is float: if math.isnan(constant): return "float_%s_nan" % ( "minus" if math.copysign(1, constant) < 0 else "plus" ) return "float_%s" % repr(constant).replace(".", "_").replace( "-", "minus_" ).replace("+", "") elif type(constant) is complex: value = "%s__%s" % (constant.real, constant.imag) value = value.replace("+", "p").replace("-", "m").replace(".", "_") if value.startswith("(") and value.endswith(")"): value = value[1:-1] return "complex_%s" % value elif type(constant) is dict: if constant == {}: return "dict_empty" else: return "dict_" + _digest(repr(constant)) elif type(constant) is set: if constant == set(): return "set_empty" else: return "set_" + _digest(repr(constant)) elif type(constant) is frozenset: if constant == frozenset(): return "frozenset_empty" else: return "frozenset_" + _digest(repr(constant)) elif type(constant) is tuple: if constant == (): return "tuple_empty" else: try: result = "_".join(namifyConstant(value) for value in constant) if len(result) > 60: result = _digest(repr(constant)) return "tuple_" + result + "_tuple" except ExceptionCannotNamify: general.warning("Couldn't namify '%r'" % (constant,)) return "tuple_" + _digest(repr(constant)) elif type(constant) is list: if constant == []: return "list_empty" else: try: result = "_".join(namifyConstant(value) for value in constant) if len(result) > 60: result = _digest(repr(constant)) return "list_" + result + "_list" except ExceptionCannotNamify: general.warning("Couldn't namify '%r'" % value) return "list_" + _digest(repr(constant)) elif type(constant) is bytearray: return "bytearray_" + _digest(repr(constant)) elif type(constant) is xrange: return "xrange_%s" % ( str(constant)[7 if str is bytes else 6 : -1] .replace(" ", "") .replace(",", "_") .replace("-", "neg") ) elif type(constant) is slice: return "slice_%s_%s_%s" % ( namifyConstant(constant.start), namifyConstant(constant.stop), namifyConstant(constant.step), ) elif constant in builtin_anon_values: return "anon_%s" % builtin_anon_values[constant] elif type(constant) is type: return "type_%s" % constant.__name__ elif type(constant) is BuiltinFunctionType: assert constant in builtin_named_values_list return "builtin_%s" % constant.__name__ elif constant is NotImplemented: return "type_notimplemented" else: raise ExceptionCannotNamify("%r" % constant, type(constant)) _re_str_needs_no_digest = re.compile(r"^([a-z]|[A-Z]|[0-9]|_){1,40}$", re.S) def _namifyString(string): # Many branches case has a return, encodes the naming policy of strings # constants, with often complex decisions to make, pylint: disable=too-many-return-statements if string in ("", b""): return "empty" elif string == " ": return "space" elif string == ".": return "dot" elif string == "\n": return "newline" elif ( type(string) is str and _re_str_needs_no_digest.match(string) and "\n" not in string ): # Some strings can be left intact for source code readability. return "plain_" + string elif len(string) == 1: return "chr_%d" % ord(string) elif ( len(string) > 2 and string[0] == "<" and string[-1] == ">" and _re_str_needs_no_digest.match(string[1:-1]) and "\n" not in string ): return "angle_" + string[1:-1] else: # Others are better digested to not cause compiler trouble return "digest_" + _digest(repr(string)) def _isAscii(string): try: _unused = str(string) return True except UnicodeEncodeError: return False def _digest(value): if str is bytes: # Python2 is simple return hashlib.md5(value).hexdigest() else: # Python3 needs to encode the string if it is one. if type(value) is bytes: return hashlib.md5(value).hexdigest() else: return hashlib.md5(value.encode("utf-8")).hexdigest()
# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test # noqa from tempest_lib.common.utils import data_utils # noqa from tempest_lib import exceptions as lib_exc # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base class ShareServersNegativeAdminTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ShareServersNegativeAdminTest, cls).resource_setup() cls.member_shares_client = clients.Manager().shares_client @test.attr(type=["gate", "smoke", "negative", ]) def test_try_list_share_servers_with_member(self): self.assertRaises(lib_exc.Forbidden, self.member_shares_client.list_share_servers) @test.attr(type=["gate", "smoke", "negative", ]) def test_try_show_share_server_with_member(self): self.assertRaises(lib_exc.Forbidden, self.member_shares_client.show_share_server, 'fake_id') @test.attr(type=["gate", "smoke", "negative", ]) def test_try_show_share_server_details_with_member(self): self.assertRaises(lib_exc.Forbidden, self.member_shares_client.show_share_server_details, 'fake_id') @test.attr(type=["gate", "smoke", "negative", ]) def test_show_share_server_with_inexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.show_share_server, 'fake_id') @test.attr(type=["gate", "smoke", "negative", ]) def test_show_share_server_details_with_inexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.show_share_server_details, 'fake_id') @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_wrong_filter_key(self): search_opts = {'fake_filter_key': 'ACTIVE'} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(len(servers), 0) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_wrong_filter_value(self): search_opts = {'host': 123} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(len(servers), 0) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_fake_status(self): search_opts = {"status": data_utils.rand_name("fake_status")} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(len(servers), 0) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_fake_host(self): search_opts = {"host": data_utils.rand_name("fake_host")} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(len(servers), 0) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_fake_project(self): search_opts = {"project_id": data_utils.rand_name("fake_project_id")} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(len(servers), 0) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_fake_share_network(self): search_opts = { "share_network": data_utils.rand_name("fake_share_network"), } servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(len(servers), 0) @test.attr(type=["gate", "smoke", "negative", ]) def test_delete_share_server_with_nonexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share_server, "fake_nonexistent_share_server_id") @test.attr(type=["gate", "smoke", "negative", ]) def test_delete_share_server_with_member(self): self.assertRaises(lib_exc.Forbidden, self.member_shares_client.delete_share_server, "fake_nonexistent_share_server_id")
from __future__ import absolute_import from __future__ import print_function from __future__ import division import math import time import random random.seed(67) import numpy as np np.random.seed(67) import pandas as pd from scipy.sparse import csc_matrix from fastFM.als import FMClassification from sklearn.decomposition import RandomizedPCA from sklearn.metrics import log_loss from sklearn.pipeline import make_pipeline, FeatureUnion from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union from sklearn.feature_selection import SelectKBest from transformers import ItemSelector def main(): # load data df_train = pd.read_csv('data/train_data.csv') df_valid = pd.read_csv('data/valid_data.csv') df_test = pd.read_csv('data/test_data.csv') feature_cols = list(df_train.columns[:-1]) target_col = df_train.columns[-1] X_train = df_train[feature_cols].values y_train = df_train[target_col].values y_train[y_train == 0] = -1 X_valid = df_valid[feature_cols].values y_valid = df_valid[target_col].values y_valid[y_valid == 0] = -1 X_test = df_test[feature_cols].values tsne_data_2d_5p = np.load('data/tsne_2d_5p.npz') tsne_data_2d_10p = np.load('data/tsne_2d_10p.npz') tsne_data_2d_15p = np.load('data/tsne_2d_15p.npz') tsne_data_2d_20p = np.load('data/tsne_2d_20p.npz') tsne_data_2d_30p = np.load('data/tsne_2d_30p.npz') tsne_data_2d_40p = np.load('data/tsne_2d_40p.npz') tsne_data_2d_50p = np.load('data/tsne_2d_50p.npz') tsne_data_3d_30p = np.load('data/tsne_3d_30p.npz') # concat features X_train_concat = { 'X': X_train, 'tsne_2d_5p': tsne_data_2d_5p['train'], 'tsne_2d_10p': tsne_data_2d_10p['train'], 'tsne_2d_15p': tsne_data_2d_15p['train'], 'tsne_2d_20p': tsne_data_2d_20p['train'], 'tsne_2d_30p': tsne_data_2d_30p['train'], 'tsne_2d_40p': tsne_data_2d_40p['train'], 'tsne_2d_50p': tsne_data_2d_50p['train'], 'tsne_3d_30p': tsne_data_3d_30p['train'], } X_valid_concat = { 'X': X_valid, 'tsne_2d_5p': tsne_data_2d_5p['valid'], 'tsne_2d_10p': tsne_data_2d_10p['valid'], 'tsne_2d_15p': tsne_data_2d_15p['valid'], 'tsne_2d_20p': tsne_data_2d_20p['valid'], 'tsne_2d_30p': tsne_data_2d_30p['valid'], 'tsne_2d_40p': tsne_data_2d_40p['valid'], 'tsne_2d_50p': tsne_data_2d_50p['valid'], 'tsne_3d_30p': tsne_data_3d_30p['valid'], } X_test_concat = { 'X': X_test, 'tsne_2d_5p': tsne_data_2d_5p['test'], 'tsne_2d_10p': tsne_data_2d_10p['test'], 'tsne_2d_15p': tsne_data_2d_15p['test'], 'tsne_2d_20p': tsne_data_2d_20p['test'], 'tsne_2d_30p': tsne_data_2d_30p['test'], 'tsne_2d_40p': tsne_data_2d_40p['test'], 'tsne_2d_50p': tsne_data_2d_50p['test'], 'tsne_3d_30p': tsne_data_3d_30p['test'], } # build pipeline pipeline = Pipeline(steps=[ ('features', FeatureUnion(transformer_list=[ ('X', ItemSelector('X')), ('tsne_2d_5p', ItemSelector('tsne_2d_5p')), ('tsne_2d_10p', ItemSelector('tsne_2d_10p')), ('tsne_2d_15p', ItemSelector('tsne_2d_15p')), ('tsne_2d_20p', ItemSelector('tsne_2d_20p')), ('tsne_2d_30p', ItemSelector('tsne_2d_30p')), ('tsne_2d_40p', ItemSelector('tsne_2d_40p')), ('tsne_2d_50p', ItemSelector('tsne_2d_50p')), ('tsne_3d_30p', ItemSelector('tsne_3d_30p')), ])), ('poly', PolynomialFeatures(degree=2)), ('scaler', MinMaxScaler()), ]) fm = FMClassification(n_iter=300, rank=8, l2_reg_w=1e-2, l2_reg_V=1e-2) print('Fitting...') start_time = time.time() fm.fit(csc_matrix(pipeline.fit_transform(X_train_concat, y_train)), y_train) print('Fit: {}s'.format(time.time() - start_time)) p_valid = fm.predict_proba(csc_matrix(pipeline.transform(X_valid_concat))) loss = log_loss(y_valid, p_valid) print('Loss: {}'.format(loss)) p_test = fm.predict_proba(csc_matrix(pipeline.transform(X_test_concat))) df_pred = pd.DataFrame({ 't_id': df_test['t_id'], 'probability': p_test }) csv_path = 'predictions/predictions_{}_{}.csv'.format(int(time.time()), loss) df_pred.to_csv(csv_path, columns=('t_id', 'probability'), index=None) print('Saved: {}'.format(csv_path)) if __name__ == '__main__': main()
import sys import json import logging import collections import os import re import datetime # Location where data gets stored after validation database:str = "database.json" def check_parameter_type (data, keys, validType): dict_valueType = [] message:str = "" # Get value types from data for key in keys: # Store value types in list dict_valueType.append(type(data[key]).__name__) # Check value type differences for index, (first, second) in enumerate(zip(validType, dict_valueType)): if first != second: message += "Fail: {} should be {} but is {} instead. ".format(keys[index], validType[index], dict_valueType[index]) return message # Validate patient info def validate_patient_info (data): dict_validDataKeys = ["deviceId", "patientId", "patientName", "gender", "dob", "phoneNumber"] dict_validDataValueType = ['int', 'int', 'str', 'str', 'str', 'str'] message:str = "" message+= check_parameter_type(data, dict_validDataKeys, dict_validDataValueType) # Check deviceID parameter if type(data["deviceId"]).__name__ == 'int' and (data["deviceId"] < 0 or data["deviceId"] > 999999): message += "Fail: deviceId value should be between 0 and 999999. " # Check patientId parameter if type(data["patientId"]).__name__ == 'int' and (data["patientId"] < 0 or data["patientId"] > 999999): message += "Fail: deviceId value should be between 0 and 999999. " # Check patientName parameter first_last = data["patientName"].split(' ') for name in first_last: if len(name) < 2 or not name.isalpha() or len(first_last) < 2: message += "Fail: {} is invalid. System expects a first and last name separated with a space. ".format(data["patientName"]) break # Check gender parameter if data["gender"] != "Male" and data['gender'] != "Female": message += "Fail: {} is not a valid gender. Gender should either be Male or Female. ".format(data["gender"]) # Check dob parameter try: datetime.datetime.strptime(data["dob"], '%d-%m-%Y') except: message += "Fail: {} is not a valid dob. dob should be in MM/DD/YYYY format. ".format(data["dob"]) # Check phoneNumber parameter if type(data["phoneNumber"]).__name__ == 'str' and not re.match(r'^(?:\(\d{3}\)|\d{3}-)\d{3}-\d{4}$', data["phoneNumber"]): message += "Fail: {} is not a valid phone number. Phone number should be in XXX-XXX-XXXX format. ".format(data["phoneNumber"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient info is validated" logging.info(message) return [True, message] # Validate all parent keys def validate_parent_items (dataKeys, data): dict_dataKeys = ["deviceId", "patientId", "patientName", "gender", "dob", "phoneNumber", "address", "measurements"] logging.info("Processing: validating json parent keys of {}".format(dict_dataKeys)) # Check if all primary keys are there, order does not matter if (collections.Counter(dict_dataKeys) != collections.Counter(dataKeys)): missing_keys = set(dict_dataKeys) - set(dataKeys) extra_keys = set(dataKeys) - set(dict_dataKeys) message:str = "Fail: json file has missing primary key(s) {} and extra key(s) {}. It should only contain {}. ".format(missing_keys, extra_keys, dict_dataKeys) logging.error(message) return [False, message] # Validate values in primary keys validatePatientInfo = validate_patient_info (data) return [validatePatientInfo[0], validatePatientInfo[1]] # Validate address keys def validate_address_info (address): dict_validAddressKeys = ["street", "city", "state", "zipcode"] dict_validAddressValueType = ['str', 'str', 'str', 'int'] message:str = "" message+= check_parameter_type(address, dict_validAddressKeys, dict_validAddressValueType) # Check street parameter if (len(address["street"]) < 2): message += "Fail: {} is not a valid street. ".format(address["street"]) # Check city parameter if (len(address["city"]) < 2): message += "Fail: {} is not a valid city. ".format(address["city"]) # Check state parameter if (len(address["state"]) != 2): message += "Fail: {} is not a valid state. State should be in XX format. ".format(address["state"]) # Check zipcode parameter if len(str(address["zipcode"])) != 5: message += "Fail: {} is not a valid zipcode. Zipcode should be in 5-digit format. ".format(address["zipcode"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient address is validated. " logging.info(message) return [True, message] def validate_address_items (addressKeys, address): dict_dataKeys = ["street", "city", "state", "zipcode"] logging.info("Processing: validating json address keys of {}".format(dict_dataKeys)) # Check if all address keys are there, order does not matter if (collections.Counter(dict_dataKeys) != collections.Counter(addressKeys)): missing_keys = set(dict_dataKeys) - set(addressKeys) extra_keys = set(addressKeys) - set(dict_dataKeys) message:str = "Fail: json file has missing address key(s) {} and extra key(s) {}. It should only contain {}. ".format(missing_keys, extra_keys, dict_dataKeys) logging.error(message) return [False, message] # Validate values in address keys validateAddressInfo = validate_address_info (address) return [validateAddressInfo[0], validateAddressInfo[1]] # Validate temperature info def validate_temperature_info (tempMeasurements): dict_validTempKeys = ["temperature", "unit"] dict_validTempValueType = ['int', 'str'] message:str = "" message+= check_parameter_type(tempMeasurements, dict_validTempKeys, dict_validTempValueType) # Check temperature parameter if type(tempMeasurements["temperature"]).__name__ == 'int' and (tempMeasurements["temperature"] < 50 or tempMeasurements["temperature"] > 200): message += "Fail: {} is not a valid temperature. ".format(tempMeasurements["temperature"]) # Check unit parameter if tempMeasurements["unit"] != "F": message += "Fail: {} is not a valid temperature unit. Please use F. ".format(tempMeasurements["unit"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient temperature is validated. " logging.info(message) return [True, message] # Validate blood pressure info def validate_BP_info (bpMeasurements): dict_validBPKeys = ["systolic", "diastolic", "unit"] dict_validBPValueType = ['int', 'int', 'str'] message:str = "" message+= check_parameter_type(bpMeasurements, dict_validBPKeys, dict_validBPValueType) # Check systolic parameter if type(bpMeasurements["systolic"]).__name__ == 'int' and (bpMeasurements["systolic"] < 0 or bpMeasurements["systolic"] > 300): message += "Fail: {} is not a valid systolic. ".format(bpMeasurements["systolic"]) # Check diastolic parameter if type(bpMeasurements["diastolic"]).__name__ == 'int' and (bpMeasurements["diastolic"] < 0 or bpMeasurements["diastolic"] > 300): message += "Fail: {} is not a valid diastolic. ".format(bpMeasurements["diastolic"]) # Check unit parameter if bpMeasurements["unit"] != "mmHg": message += "Fail: {} is not a valid blood pressure unit. Please use mmHg. ".format(bpMeasurements["unit"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient blood pressure is validated. " logging.info(message) return [True, message] # Validate pulse info def validate_pulse_info (pulseMeasurements): dict_validPulseKeys = ["pulse", "unit"] dict_validPulseValueType = ['int', 'str'] message:str = "" message+= check_parameter_type(pulseMeasurements, dict_validPulseKeys, dict_validPulseValueType) # Check pulse parameter if type(pulseMeasurements["pulse"]).__name__ == 'int' and (pulseMeasurements["pulse"] < 0 or pulseMeasurements["pulse"] > 300): message += "Fail: {} is not a valid pulse. ".format(pulseMeasurements["pulse"]) # Check unit parameter if pulseMeasurements["unit"] != "bpm": message += "Fail: {} is not a valid temperature unit. Please use bpm. ".format(pulseMeasurements["unit"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient pulse is validated. " logging.info(message) return [True, message] # Validate oximeter info def validate_oximeter_info (oximeterMeasurements): dict_validOximeterKeys = ["oxygen", "unit"] dict_validOximeterValueType = ['int', 'str'] message:str = "" message+= check_parameter_type(oximeterMeasurements, dict_validOximeterKeys, dict_validOximeterValueType) # Check oxygen parameter if type(oximeterMeasurements["oxygen"]).__name__ == 'int' and oximeterMeasurements["oxygen"] < 0: message += "Fail: {} is not a valid oxygen level. ".format(oximeterMeasurements["oxygen"]) # Check unit parameter if oximeterMeasurements["unit"] != "%": message += "Fail: {} is not a valid oxygen level unit. Please use %. ".format(oximeterMeasurements["unit"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient oximeter is validated. " logging.info(message) return [True, message] # Validate weight info def validate_weight_info (weightMeasurements): dict_validWeightKeys = ["weight", "unit"] dict_validWeightValueType = ['int', 'str'] message:str = "" message+= check_parameter_type(weightMeasurements, dict_validWeightKeys, dict_validWeightValueType) # Check weight parameter if type(weightMeasurements["weight"]).__name__ == 'int' and weightMeasurements["weight"] < 0 : message += "Fail: {} is not a valid weight number. ".format(weightMeasurements["weight"]) # Check unit parameter if weightMeasurements["unit"] != "lb": message += "Fail: {} is not a valid weight unit. Please use lb. ".format(weightMeasurements["unit"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient weight is validated. " logging.info(message) return [True, message] # Validate pulse info def validate_glucometer_info (glucometerMeasurements): dict_validGlucometerKeys = ["bloodSugarLvl", "unit"] dict_validGlucometerValueType = ['int', 'str'] message:str = "" message+= check_parameter_type(glucometerMeasurements, dict_validGlucometerKeys, dict_validGlucometerValueType) # Check bloodSugarLvl parameter if type(glucometerMeasurements["bloodSugarLvl"]).__name__ == 'int' and glucometerMeasurements["bloodSugarLvl"] < 0 : message += "Fail: {} is not a valid blood sugar level. ".format(glucometerMeasurements["bloodSugarLvl"]) # Check unit parameter if glucometerMeasurements["unit"] != "mg-per-dL": message += "Fail: {} is not a valid blood sugar unit. Please use mg/dL. ".format(glucometerMeasurements["unit"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient glucometer is validated" logging.info(message) return [True, message] # Validate measurement keys def validate_measurement_items (measurementKeys, measurements): dict_dataKeys = ["temperature", "bloodPressure", "pulse", "oximeter", "weight", "glucometer", "timestamp"] logging.info("Processing: validating json measurement keys of {}".format(dict_dataKeys)) # Check if all measurement keys are there, order does not matter if (collections.Counter(dict_dataKeys) != collections.Counter(measurementKeys)): missing_keys = set(dict_dataKeys) - set(measurementKeys) extra_keys = set(measurementKeys) - set(dict_dataKeys) message:str = "Fail: json file has missing primary key(s) {} and extra key(s) {}. It should only contain {}. ".format(missing_keys, extra_keys, dict_dataKeys) logging.error(message) return [False, message] validateMeasurementInfo = [] # Validate values in temperature keys validateTemperatureInfo = validate_temperature_info (measurements["temperature"]) validateMeasurementInfo.append(validateTemperatureInfo) # Validate values in bloodPressure keys validateBloodPressureInfo = validate_BP_info (measurements["bloodPressure"]) validateMeasurementInfo.append(validateBloodPressureInfo) # Validate values in Pulse keys validatePulseInfo = validate_pulse_info (measurements["pulse"]) validateMeasurementInfo.append(validatePulseInfo) # Validate values in Pulse keys validateOximeterInfo = validate_oximeter_info (measurements["oximeter"]) validateMeasurementInfo.append(validateOximeterInfo) # Validate values in Pulse keys validateWeightInfo = validate_weight_info (measurements["weight"]) validateMeasurementInfo.append(validateWeightInfo) # Validate values in Pulse keys validateGlucometerInfo = validate_glucometer_info (measurements["glucometer"]) validateMeasurementInfo.append(validateGlucometerInfo) # Sum up all error messages if there are any false message:str = "" for results in validateMeasurementInfo: if (results[0] == False): message += results[1] # Validate timestamp parameter "%Y-%m-%dT%H:%M:%SZ" try: datetime.datetime.strptime(measurements["timestamp"], "%Y-%m-%dT%H:%M:%SZ") except: message += "Fail: {} is not a valid timestamp. Timestamp should be in %Y-%m-%dT%H:%M:%SZ format. ".format(measurements["timestamp"]) # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: patient measurements are validated. " logging.info(message) return [True, message] # Validate json file def validate_json (inputFile:str): message:str = "" logging.info("Processing: validating json format") try: # Check to see if json file is passed in if (inputFile[-5:] == ".json"): with open (inputFile, "r") as f: if os.path.getsize(inputFile) == 0: message = f"{inputFile} is empty" logging.error(message) return [False, message] data = json.load(f) logging.info("Processing input that's a json file") else: data = json.loads(inputFile) logging.info("Processing input that's not a json file") validateAllInfo = [] # Validate parent items validate_parentItems = validate_parent_items(data.keys(), data) validateAllInfo.append(validate_parentItems) # Validate address items validate_addressItems = validate_address_items(data["address"].keys(), data["address"]) validateAllInfo.append(validate_addressItems) # Validate measurement items validate_measurementItems = validate_measurement_items(data["measurements"].keys(), data["measurements"]) validateAllInfo.append(validate_measurementItems) # Sum up all error messages if there are any false for results in validateAllInfo: if (results[0] == False): message += results[1] # Return False along with all the messages when encountered with errors if message != "": return [False, message] # Otherwise, return True and success message message = "Success: all of patient's information is validated. " logging.info(message) return [True, message, data] except: message:str = "Fail: json file format is incorrect. " logging.error(message) return [False, message] # Validate input file is valide json file def validate (inputFile:str): logging.info("Processing: began validating file") # Validate argument parameter is a string if (not isinstance(inputFile, str)): message:str = "Fail: input argument is not a string. " logging.error(message) return [False, message] logging.info("Success: file argument passed in is a string") validateJsonResult = validate_json(inputFile) logging.info(validateJsonResult) return validateJsonResult def write_to_database (json_input): try: # Validate data json_results = validate (json_input) # Check validate results, return false and error message when invalid if (json_results[0] == False): message:str = json_results[1] + "Therefore, no data is written to database. Please try again. " logging.error(message) return [False, message] # Otherwise, try writing to database with open(database, "w") as json_input: if (json_results[0] == True): json.dump(json_results[2], json_input, indent=4) else: result = {} result['results'] = json_results[1] json.dump(result, json_input, indent=4) message:str = "Successfully written to database. " logging.info(message) return [True, message] except: message:str = "Could not write result or data to database. " logging.error(message) return [False, message] def main(): if len(sys.argv) != 2: message:str = "You must insert one file as an argument. Please try again." logging.error(message) exit(1) result = write_to_database (sys.argv[1]) print (result) if __name__ == '__main__': main()
"""Functions for managing worker state. In general, one uses these by first calling init_* or set_* to create the attribute, then calling get_* to retrieve the corresponding value. """ from dask.distributed import get_worker from src.objectives import ObjectiveBase from src.utils.noise_table import NoiseTable # # Generic # def set_worker_state(key: str, val: object): """Sets worker_state[key] = val""" worker = get_worker() setattr(worker, key, val) def get_worker_state(key: str) -> object: """Retrieves worker_state[key]""" worker = get_worker() return getattr(worker, key) # # Noise table # NOISE_TABLE_ATTR = "noise_table" def init_noise_table(): """Initializes this worker's noise table.""" set_worker_state(NOISE_TABLE_ATTR, NoiseTable()) def get_noise_table() -> NoiseTable: """Retrieves this worker's copy of the noise table.""" return get_worker_state(NOISE_TABLE_ATTR) # # Objective module # OBJECTIVE_MOD_ATTR = "objective_module" def init_objective_module(module_class: ObjectiveBase, config: "config object"): """Initializes this worker's objective module.""" set_worker_state(OBJECTIVE_MOD_ATTR, module_class(config)) def get_objective_module() -> ObjectiveBase: """Retrieves this worker's objective module.""" return get_worker_state(OBJECTIVE_MOD_ATTR) def close_objective_module_env() -> ObjectiveBase: """Closes the env in the objective module. Mainly for GymControl modules. """ return get_worker_state(OBJECTIVE_MOD_ATTR).env.close()
from broker.base import AccountType UNIT_RATIO = 100000 class OrderType(object): MARKET = "MARKET" # A Market Order LIMIT = "LIMIT" # A Limit Order STOP = "STOP" # A Stop Order MARKET_IF_TOUCHED = "MARKET_IF_TOUCHED" # A Market-if-touched Order TAKE_PROFIT = "TAKE_PROFIT" # A Take Profit Order STOP_LOSS = "STOP_LOSS" # A Stop Loss Order TRAILING_STOP_LOSS = "TRAILING_STOP_LOSS" # A Trailing Stop Loss Order FIXED_PRICE = "FIXED_PRICE" # A Fixed Price Order class CancellableOrderType(object): LIMIT = 'LIMIT' # A Limit Order", STOP = 'STOP' # A Stop Order", MARKET_IF_TOUCHED = 'MARKET_IF_TOUCHED' # A Market-if-touched Order", TAKE_PROFIT = 'TAKE_PROFIT' # A Take Profit Order", STOP_LOSS = 'STOP_LOSS' # A Stop Loss Order", TRAILING_STOP_LOSS = 'TRAILING_STOP_LOSS' # A Trailing Stop Loss Order", class OrderState(object): PENDING = 'PENDING' # The Order is currently pending execution", FILLED = 'FILLED' # The Order has been filled", TRIGGERED = 'TRIGGERED' # The Order has been triggered", CANCELLED = 'CANCELLED' # The Order has been cancelled", class OrderStateFilter(object): PENDING = 'PENDING' # The orders that are currently pending execution", FILLED = 'FILLED' # The orders that have been filled", TRIGGERED = 'TRIGGERED' # The orders that have been triggered", CANCELLED = 'CANCELLED' # The orders that have been cancelled", ALL = 'ALL' # The orders that are in any of the possible states: PENDING, FILLED, TRIGGERED, CANCELLED", class TimeInForce(object): GTC = 'GTC' # The Order is “Good unTil Cancelled”", GTD = 'GTD' # The Order is “Good unTil Date” and will be cancelled at the provided time", GFD = 'GFD' # The Order is “Good for Day” and will be cancelled at 5pm New York time", FOK = 'FOK' # The Order must be immediately “Filled Or Killed”", IOC = 'IOC' # The Order must be “Immediately partially filled Or Killed”", class OrderPositionFill(object): OPEN_ONLY = 'OPEN_ONLY' # When the Order is filled, only allow Positions to be opened or extended.", REDUCE_FIRST = 'REDUCE_FIRST' # When the Order is filled, always fully reduce an existing Position before opening a new Position.", REDUCE_ONLY = 'REDUCE_ONLY' # When the Order is filled, only reduce an existing Position.", DEFAULT = 'DEFAULT' # When the Order is filled, use REDUCE_FIRST behaviour for non-client hedging Accounts, and OPEN_ONLY behaviour for client hedging Accounts." class OrderTriggerCondition(object): DEFAULT = 'DEFAULT' # Trigger an Order the “natural” way: compare its price to the ask for long Orders and bid for short Orders", INVERSE = 'INVERSE' # Trigger an Order the opposite of the “natural” way: compare its price the bid for long Orders and ask for short Orders.", BID = 'BID' # Trigger an Order by comparing its price to the bid regardless of whether it is long or short.", ASK = 'ASK' # Trigger an Order by comparing its price to the ask regardless of whether it is long or short.", MID = 'MID' # Trigger an Order by comparing its price to the midpoint regardless of whether it is long or short." class TransactionName(object): """transaction name in order response""" orderCreateTransaction = 'orderCreateTransaction' longOrderCreateTransaction = 'longOrderCreateTransaction' shortOrderCreateTransaction = 'shortOrderCreateTransaction' orderFillTransaction = 'orderFillTransaction' longOrderFillTransaction = 'longOrderFillTransaction' shortOrderFillTransaction = 'shortOrderFillTransaction' orderCancelTransaction = 'orderCancelTransaction' longOrderCancelTransaction = 'longOrderCancelTransaction' shortOrderCancelTransaction = 'shortOrderCancelTransaction' orderReissueTransaction = 'orderReissueTransaction' orderRejectTransaction = 'orderRejectTransaction' orderReissueRejectTransaction = 'orderReissueRejectTransaction' replacingOrderCancelTransaction = 'replacingOrderCancelTransaction' @classmethod def all(cls): return [v for k, v in TransactionName.__dict__.items() if not k.startswith('_') and isinstance(v, str)] class TransactionType(object): """type list of transaction object""" CREATE = 'CREATE' # Account Create Transaction CLOSE = 'CLOSE' # Account Close Transaction REOPEN = 'REOPEN' # Account Reopen Transaction CLIENT_CONFIGURE = 'CLIENT_CONFIGURE' # Client Configuration Transaction CLIENT_CONFIGURE_REJECT = 'CLIENT_CONFIGURE_REJECT' # Client Configuration Reject Transaction TRANSFER_FUNDS = 'TRANSFER_FUNDS' # Transfer Funds Transaction TRANSFER_FUNDS_REJECT = 'TRANSFER_FUNDS_REJECT' # Transfer Funds Reject Transaction # ORDER MARKET_ORDER = 'MARKET_ORDER' # Market Order Transaction MARKET_ORDER_REJECT = 'MARKET_ORDER_REJECT' # Market Order Reject Transaction FIXED_PRICE_ORDER = 'FIXED_PRICE_ORDER' # Fixed Price Order Transaction LIMIT_ORDER = 'LIMIT_ORDER' # Limit Order Transaction LIMIT_ORDER_REJECT = 'LIMIT_ORDER_REJECT' # Limit Order Reject Transaction STOP_ORDER = 'STOP_ORDER' # Stop Order Transaction STOP_ORDER_REJECT = 'STOP_ORDER_REJECT' # Stop Order Reject Transaction MARKET_IF_TOUCHED_ORDER = 'MARKET_IF_TOUCHED_ORDER' # Market if Touched Order Transaction MARKET_IF_TOUCHED_ORDER_REJECT = 'MARKET_IF_TOUCHED_ORDER_REJECT' # Market if Touched Order Reject Transaction TAKE_PROFIT_ORDER = 'TAKE_PROFIT_ORDER' # Take Profit Order Transaction TAKE_PROFIT_ORDER_REJECT = 'TAKE_PROFIT_ORDER_REJECT' # Take Profit Order Reject Transaction STOP_LOSS_ORDER = 'STOP_LOSS_ORDER' # Stop Loss Order Transaction STOP_LOSS_ORDER_REJECT = 'STOP_LOSS_ORDER_REJECT' # Stop Loss Order Reject Transaction TRAILING_STOP_LOSS_ORDER = 'TRAILING_STOP_LOSS_ORDER' # Trailing Stop Loss Order Transaction TRAILING_STOP_LOSS_ORDER_REJECT = 'TRAILING_STOP_LOSS_ORDER_REJECT' # Trailing Stop Loss Order Reject Transaction ORDER_FILL = 'ORDER_FILL' # Order Fill Transaction ORDER_CANCEL = 'ORDER_CANCEL' # Order Cancel Transaction ORDER_CANCEL_REJECT = 'ORDER_CANCEL_REJECT' # Order Cancel Reject Transaction ORDER_CLIENT_EXTENSIONS_MODIFY = 'ORDER_CLIENT_EXTENSIONS_MODIFY' # Order Client Extensions Modify Transaction ORDER_CLIENT_EXTENSIONS_MODIFY_REJECT = 'ORDER_CLIENT_EXTENSIONS_MODIFY_REJECT' # Order Client Extensions Modify Reject Transaction # Trade TRADE_CLIENT_EXTENSIONS_MODIFY = 'TRADE_CLIENT_EXTENSIONS_MODIFY' # Trade Client Extensions Modify Transaction TRADE_CLIENT_EXTENSIONS_MODIFY_REJECT = 'TRADE_CLIENT_EXTENSIONS_MODIFY_REJECT' # Trade Client Extensions Modify Reject Transaction MARGIN_CALL_ENTER = 'MARGIN_CALL_ENTER' # Margin Call Enter Transaction MARGIN_CALL_EXTEND = 'MARGIN_CALL_EXTEND' # Margin Call Extend Transaction MARGIN_CALL_EXIT = 'MARGIN_CALL_EXIT' # Margin Call Exit Transaction DELAYED_TRADE_CLOSURE = 'DELAYED_TRADE_CLOSURE' # Delayed Trade Closure Transaction DAILY_FINANCING = 'DAILY_FINANCING' # Daily Financing Transaction RESET_RESETTABLE_PL = 'RESET_RESETTABLE_PL' # Reset Resettable PL Transaction OANDA_ENVIRONMENTS = { "streaming": { AccountType.REAL: "stream-fxtrade.oanda.com", AccountType.DEMO: "stream-fxpractice.oanda.com", AccountType.SANDBOX: "stream-sandbox.oanda.com" }, "api": { AccountType.REAL: "api-fxtrade.oanda.com", AccountType.DEMO: "api-fxpractice.oanda.com", AccountType.SANDBOX: "api-sandbox.oanda.com" } }
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('lexicon', '0034_auto_20160419_1657'), ] operations = [ migrations.CreateModel( name='Clade', fields=[ ('id', models.AutoField( verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('family_ix', models.IntegerField(blank=True)), ('level1_branch_ix', models.IntegerField(default=0)), ('level2_branch_ix', models.IntegerField(default=0)), ('level3_branch_ix', models.IntegerField(default=0)), ('level1_branch_name', models.TextField( unique=True, blank=True)), ('hexColor', models.CharField(max_length=6, blank=True)), ('shortName', models.CharField(max_length=5, blank=True)), ('export', models.BooleanField(default=0)), ('exportDate', models.BooleanField(default=0)), ('taxonsetName', models.CharField(max_length=100, blank=True)), ('atMost', models.IntegerField(null=True)), ('atLeast', models.IntegerField(null=True)), ('distribution', models.CharField( default=b'_', max_length=1, choices=[(b'U', b'Uniform'), (b'N', b'Normal'), (b'L', b'Log normal'), (b'O', b'Offset log normal'), (b'_', b'None')])), ('logNormalOffset', models.IntegerField(null=True)), ('logNormalMean', models.IntegerField(null=True)), ('logNormalStDev', models.IntegerField(null=True)), ('normalMean', models.IntegerField(null=True)), ('normalStDev', models.IntegerField(null=True)), ('uniformUpper', models.IntegerField(null=True)), ('uniformLower', models.IntegerField(null=True)), ('cladeLevel0', models.IntegerField(default=0)), ('cladeLevel1', models.IntegerField(default=0)), ('cladeLevel2', models.IntegerField(default=0)), ('cladeLevel3', models.IntegerField(default=0)), ], ), ]
from typing import List from project.car.car import Car from project.car.muscle_car import MuscleCar from project.car.sports_car import SportsCar from project.driver import Driver from project.race import Race all_valid_car_types = {"MuscleCar": MuscleCar, "SportsCar": SportsCar} class Controller: def __init__(self): self.cars: List[Car] = [] self.drivers: List[Driver] = [] self.races: List[Race] = [] def create_car(self, car_type: str, model: str, speed_limit: int): if car_type in all_valid_car_types: searched_model = [car for car in self.cars if car.model == model] if searched_model: raise Exception(f"Car {model} is already created!") car = all_valid_car_types[car_type](model, speed_limit) self.cars.append(car) return f"{car_type} {model} is created." def create_driver(self, driver_name: str): searched_driver = [driver for driver in self.drivers if driver.name == driver_name] if searched_driver: raise Exception(f"Driver {driver_name} is already created!") driver = Driver(driver_name) self.drivers.append(driver) return f"Driver {driver_name} is created." def create_race(self, race_name: str): searched_race = [r for r in self.races if r.name == race_name] if searched_race: raise Exception(f"Race {race_name} is already created!") race = Race(race_name) self.races.append(race) return f"Race {race_name} is created." def add_car_to_driver(self, driver_name: str, car_type: str): try: driver = [d for d in self.drivers if d.name == driver_name][0] except IndexError: raise Exception(f"Driver {driver_name} could not be found!") try: car = [c for c in self.cars if c.__class__.__name__ == car_type and c.is_taken is False][-1] except IndexError: raise Exception(f"Car {car_type} could not be found!") # if driver own a car --> change it if driver.car: old_model = driver.car.model driver.car.free_the_car() driver.car = car car.take_the_car() return f"Driver {driver_name} changed his car from {old_model} to {car.model}." # if driver doesn`t own a car driver.car = car car.take_the_car() return f"Driver {driver_name} chose the car {car.model}." def add_driver_to_race(self, race_name: str, driver_name: str): try: race = [r for r in self.races if r.name == race_name][0] except IndexError: raise Exception(f"Race {race_name} could not be found!") try: driver = [d for d in self.drivers if d.name == driver_name][0] except IndexError: raise Exception(f"Driver {driver_name} could not be found!") # If the driver doesn't own a car if not driver.car: raise Exception(f"Driver {driver_name} could not participate in the race!") # If the driver has already participated in the race if driver in race.drivers: return f"Driver {driver_name} is already added in {race_name} race." # If they both exist and the driver owns a car and driver is NOT in the race race.drivers.append(driver) return f"Driver {driver_name} added in {race_name} race." def start_race(self, race_name: str): try: race = [r for r in self.races if r.name == race_name][0] except IndexError: raise Exception(f"Race {race_name} could not be found!") if len(race.drivers) < 3: raise Exception(f"Race {race_name} cannot start with less than 3 participants!") # start race winners = race.get_top_3_fastest_drivers() return f"Driver {winners[0].name} wins the {race_name} race with a speed of {winners[0].car.speed_limit}.\n" +\ f"Driver {winners[1].name} wins the {race_name} race with a speed of {winners[1].car.speed_limit}.\n" +\ f"Driver {winners[2].name} wins the {race_name} race with a speed of {winners[2].car.speed_limit}."
# Generated by Django 2.0.4 on 2018-10-29 17:21 from django.db import migrations import wagtail.core.blocks import wagtail.core.fields import wagtail.images.blocks class Migration(migrations.Migration): dependencies = [ ('core', '0040_auto_20181024_1536'), ] operations = [ migrations.AlterField( model_name='homepage', name='slider', field=wagtail.core.fields.StreamField((('slider_item', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=25, required=False)), ('summary', wagtail.core.blocks.TextBlock(max_length=60, required=False)), ('photo', wagtail.images.blocks.ImageChooserBlock(help_text='This image MUST BE EXACTLY 1400px by 550px')), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('external_url', wagtail.core.blocks.URLBlock(required=False)), ('active', wagtail.core.blocks.BooleanBlock(required=False))))),), blank=True), ), ]
#!/usr/bin/env python # -*- coding: utf-8 -*- #0.1.0 import lldb import re import os # command 是用户输入的符号地址 def gMapSource(debugger, command, result, internal_dict): print('command: ' + command) savedFilePath = '/Users/guohongwei719/Desktop/GHWBinaryMapSource/script/path.txt' localSourcePath = '/Users/guohongwei719/Desktop/GHWBinaryMapSource/localPods/MapSourceTest' if command == '': print('没参数') current_path = os.getcwd() print('当前所在路径:' + current_path) interpreter = lldb.debugger.GetCommandInterpreter() returnObject = lldb.SBCommandReturnObject() file_handler = open(savedFilePath, 'r') content = file_handler.readlines() if len(content) == 2: compileFilePath = content[0].replace('\n', '') localSourceFilePath = content[1].replace('\n', '') # filePath = '/Users/guohongwei719/Desktop/test/MapSourceTest/MapSourceTest/GHWMapSourceTest.m' # sourcePath = '/Users/guohongwei719/Desktop/GHWBinaryMapSource/localPods/BinaryToSource/MapSourceTest/MapSourceTest/GHWMapSourceTest.m' print('编译时文件路径 compileFilePath = ' + compileFilePath) print('本地对应源码文件路径 sourcePath = ' + localSourceFilePath) interpreter.HandleCommand('settings set target.source-map ' + compileFilePath + ' ' + localSourceFilePath, returnObject) output = returnObject.GetOutput(); # print('output: ' + output) # file_handler.close() else: print('缺失路径信息') else: print('有参数') # 获取 lldb 的命令交互环境,可以动态执行一些命令,比如 po obj interpreter = lldb.debugger.GetCommandInterpreter() # 创建一个对象,命令执行结果会通过该对象保存 returnObject = lldb.SBCommandReturnObject() # 通过 image loopup 命令查找输入符号地址所在的编译模块信息 interpreter.HandleCommand('image lookup -v --address ' + command, returnObject) # 获取返回结果 output = returnObject.GetOutput(); print('output: \n' + output) # 下面的代码设计思想是: # 1、根据{地址}查找该地址所属的{源码编译路径}+{编译文件名} # 2、通过{编译文件名}动态在{指定路径}查找相应的{源码路径} # 3、将{源码编译路径}与{源码路径}映射 # 实际使用时,可以参考下面的方案。 # 1、根据{地址}查找该地址所属的{编译模块}。比如,SDWebImage # 2、通过脚本动态下载{编译模块}的{源码仓库} # 3、将{编译模块}与{源码仓库}映射 # 通过正则获取二进制编译时,源码的真正路径 compileFilePath = re.match(r'(.|\n)*file = "(.*?)".*', output,re.M).group(2) print('编译时文件路径 compileFilePath = ' + compileFilePath) # 通过真正路径获取编译源文件的文件名 fileName = re.match(r'/.*/(.*)', compileFilePath).group(1) print('文件名称 fileName = ' + fileName) # 通过文件名在 ~/MMAViewabilitySDK_iOS 目录(可以是任意的地址或者通过 git clone 动态下载)下查找源文件 localSourceFilePath = os.popen('mdfind -onlyin ' + localSourcePath + ' ' + fileName).read().replace('\n','') print('本地对应源码文件路径 localSourceFilePath = ' + localSourceFilePath) # current_path = os.getcwd() # print('current_path = ' + current_path) # txtFilePath = os.path.join(current_path, 'path.txt') # print('txtFilePath = ' + txtFilePath) content = [] content.append(compileFilePath) content.append('\n') content.append(localSourceFilePath) out = open(savedFilePath, 'w') out.writelines(content) out.close() # 通过 settings set target.source-map 命令执行编译源码位置与当前源码位置的映射 interpreter.HandleCommand('settings set target.source-map ' + compileFilePath + ' ' + localSourceFilePath, returnObject) # 添加一个 扩展命令 gMapSource # 在 lldb 输入 mapSource 0x10803839 时,会执行 GHWBinaryMapSource.py 文件的 gMapSource 方法 def __lldb_init_module(debugger, internal_dict): debugger.HandleCommand('command script add gMapSource -f GHWBinaryMapSource.gMapSource')
import random import numpy as np from conform_agent.env.rllib.storage_env import RLLibConFormSimStorageEnv from conform_agent.models.tf.simple_rcnn import SimpleRCNNModel import ray from ray import tune from ray.tune.registry import register_env from conform_agent.conform_callbacks import ConFormCallbacks from ray.rllib.models import ModelCatalog from ray.tune.schedulers import ASHAScheduler import experiments.storage_env_configs as StorageEnvConfig random.seed(42) np.random.seed(42) # ray initialization and stuff ray.init(num_cpus=4, num_gpus=1) # ray.init(address='auto') register_env("StorageEnv", RLLibConFormSimStorageEnv) ModelCatalog.register_custom_model("SimpleRCNNModel", SimpleRCNNModel) config={ "env": "StorageEnv", "env_config": StorageEnvConfig.easy_visual_obs, "model":{ "custom_model": "SimpleRCNNModel", "custom_model_config": { # Defines the convolutiontional layers. For each layer there has # to be [num_filters, kernel, stride]. "conv_layers": [ [16, [8, 8], 4], [16, [4, 4], 2]], # Defines the dense layers following the convolutional layers (if # any). For each layer the num_hidden units has to be defined. "dense_layers": [128, 128], # whether to use a LSTM layer after the dense layers. "use_recurrent": False, }, }, # Whether to use V-trace weighted advantages. If false, PPO GAE # advantages will be used instead. "vtrace": True, # == These two options only apply if vtrace: False == # Should use a critic as a baseline (otherwise don't use value # baseline; required for using GAE). "use_critic": True, # If true, use the Generalized Advantage Estimator (GAE) # with a value function, see https://arxiv.org/pdf/1506.02438.pdf. "use_gae": True, # GAE(lambda) parameter "lambda": 0.95, # == PPO surrogate loss options == "clip_param": 0.2, # == PPO KL Loss options == "use_kl_loss": True, "kl_coeff": 0.56, "kl_target": 0.27, # System params. # # == Overview of data flow in IMPALA == # 1. Policy evaluation in parallel across `num_workers` actors produces # batches of size `rollout_fragment_length * num_envs_per_worker`. # 2. If enabled, the replay buffer stores and produces batches of size # `rollout_fragment_length * num_envs_per_worker`. # 3. If enabled, the minibatch ring buffer stores and replays batches of # size `train_batch_size` up to `num_sgd_iter` times per batch. # 4. The learner thread executes data parallel SGD across `num_gpus` GPUs # on batches of size `train_batch_size`. # "rollout_fragment_length": 64, "train_batch_size": 2048, "min_iter_time_s": 10, "num_workers": 3, # number of GPUs the learner should use. "num_gpus": 1, # set >1 to load data into GPUs in parallel. Increases GPU memory usage # proportionally with the number of buffers. "num_data_loader_buffers": 1, # how many train batches should be retained for minibatching. This conf # only has an effect if `num_sgd_iter > 1`. "minibatch_buffer_size": 10, # number of passes to make over each train batch "num_sgd_iter": 10, # set >0 to enable experience replay. Saved samples will be replayed with # a p:1 proportion to new data samples. "replay_proportion": 0.0, # number of sample batches to store for replay. The number of transitions # saved total will be (replay_buffer_num_slots * rollout_fragment_length). "replay_buffer_num_slots": 0, # max queue size for train batches feeding into the learner "learner_queue_size": 16, # wait for train batches to be available in minibatch buffer queue # this many seconds. This may need to be increased e.g. when training # with a slow environment "learner_queue_timeout": 300, # level of queuing for sampling. "max_sample_requests_in_flight_per_worker": 2, # max number of workers to broadcast one set of weights to "broadcast_interval": 1, # use intermediate actors for multi-level aggregation. This can make sense # if ingesting >2GB/s of samples, or if the data requires decompression. "num_aggregation_workers": 0, # Learning params. "grad_clip": 40.0, # either "adam" or "rmsprop" "opt_type": "adam", "lr": 9e-4, "lr_schedule": [[0, 9e-4], [10e6, 0]], # rmsprop considered "decay": 0.99, "momentum": 0.0, "epsilon": 0.1, # balancing the three losses "vf_loss_coeff": 0.8, "entropy_coeff": 1.5e-3, "entropy_coeff_schedule": None, # Discount factor of the MDP. "gamma": 0.90, "callbacks": ConFormCallbacks, } stopping_criteria = { # "training_iteration": 180, # # "time_total_s" : 1800, } result = tune.run( "APPO", name="appo_visual_obs", stop=stopping_criteria, reuse_actors=False, checkpoint_freq=100, checkpoint_at_end=True, config=config, num_samples=1, max_failures=3, # resume = True, ) print("Best hyperparameters found were: ", result.get_best_config(metric="episode_reward_mean", mode="max"))
#!/usr/bin/python # -*- coding:utf-8 -*- # --------------------------- # Author: deangao # Copyright: 2016 deangao # Version: v1.0.0 # Created: 2016/2/3 # --------------------------- __author__ = 'deangao' dict1 = {'name': 'deangao', 'address': ['sz', 'wh'], 'sex': 'm'} print dict1 # ---其它操作--- # 1. pop: 剔除指定key的项并返回其value tmp = dict1.pop('name') print tmp # 2. clear: 清除整个字典 dict1.clear() print dict1 dict1 = {'name': 'deangao', 'address': ['sz', 'wh'], 'sex': 'm'} # 3. copy: 复制 dict2 = dict1.copy() print dict2 # 4. fromkeys: 由序列元素为键来创建值为空的字典 tmp = dict1.fromkeys(['name', 'a', 'b']) print tmp print dict1 # 5. get: 根据键来获取值 print dict1.get('name') # 6. items: 获取键值对-返回列表(元素为元组) print dict1.items() # 7. update: 添加其它的字典 dict2 = {'name1': 'age1', 'age1': 22} print dict1 dict1.update(dict2) print dict1 # 8. keys: 列举所有的key print dict1.keys() # 9. values: 列举所有的value print dict1.values()
# Python - 2.7.6 Test.assert_equals(shortcut('hello'), 'hll')