text
stringlengths
8
6.05M
from ..model import SimpleTZ from datetime import datetime, timedelta try: import vobject except ImportError: pass class ICalExporter: extension = 'ics' def write(self, fileobj, conf): now = datetime.now(SimpleTZ()) domain = conf.get_domain() cal = vobject.iCalendar() cal.add('prodid').value = '-//pretalx//{}//'.format(domain) for event in sorted(conf.events): if not event.active or not event.room: continue vevent = cal.add('vevent') speakers = ', '.join(sp.name for sp in event.speakers) vevent.add('summary').value = '{} — {}'.format(event.title, speakers) vevent.add('dtstamp').value = now vevent.add('location').value = event.room.name vevent.add('uid').value = 'pretalx-{}-{}@{}'.format( conf.slug, event.guid, domain) vevent.add('dtstart').value = event.start vevent.add('dtend').value = event.start + timedelta(minutes=event.duration) vevent.add('description').value = event.abstract or event.description or '' if event.url: vevent.add('url').value = event.url fileobj.write(cal.serialize())
""" Run metrics to determine how well the ResNet model identify the same car This script will create a plot that will have * x-axis: the range for the number of times we have to go through in the set sorted by the shortest cosine distance to match the target car image * y-axis: cummulative sum of (the number of times we have to go through in the set to match the target car image) divided by (the number of run) Assumption: * FEATURE is a json file with imageName (filename, not filepath) and resNet50 (feature vectors) * FEATURE includes all the feature vectors for both test and training images Output: * cmc_metric.pdf: plot of CMC metric * str_metric.pdf: plot of STR metric * metric.log: log messages Reliance: The script will rely on ExperimentGenerator to build a list of sets (of images) for the experiment. Metric: 1. CMC: compare the target car with a set of 10 car images Step #1: Using the ExperimentGenerator, create a set of 10 car images with zero drop rate, meaning the target car will exist in the set. Step #2: Match each feature vector to the images in the set. Step #3: Calculate the cosine distance of the target car's feature vector and the feature vector of each image in the set, resulting in 10 computations. Step #4: Sort the list of images in the set by the shortest cosine distance. Step #5: Determine how many images it has to go through the sorted cosine distance list to find the target car. Step #6: Plot out the graph. 2. STR: compare a set of 10 car images with another set of 10 car images Step #1: Using the ExperimentGenerator, create two sets of 10 car images with zero drop rate, meaning the target car will exist in both set. Step #2: Match each feature vector to the images in the sets. Step #3: Calculate the cosine distance of each car's feature vector in set 1 and set 2, resulting in 100 computations. Step #4: Sort the list of images in the set by the shortest cosine distance. Step #5: Determine how many image pairs it has to go through the sorted cosine distance list to find the target car image pair. Step #6: Plot out the graph. Usage: metric.py [-hv] metric.py [-e <SEED> -c -s] -w <DATASET_TYPE> -y <SET_TYPE> -r <NUM_RUN> <DATASET_PATH> <FEATURE> Arguments: DATASET_PATH : Path to the dataset unzipped FEATURE : Path to the feature json file Make sure that feature is the same type as SET_TYPE Options: -h, --help : Show this help message. -v, --version : Show the version number. -c, --cmc : Run CMC (Cummulative Matching Curve) metric. -s, --str : Run STR (N^2) metric. -r NUM_RUN : How many iterations to run the ranking. -w DATASET_TYPE : Specify the datasets to use. ["CompcarsDataset", "StrDataset", "VeriDataset"] -y SET_TYPE : Determine which type of images and features to use. 0: all, 1: query, 2: test, 3: train -e SEED : Seed to be used for random number generator. (default: random [1-100]) """ import argparse import collections import logging import matplotlib.pyplot as plt import os import random import scipy import scipy.spatial import sys import pelops.utils as utils from .experiment import ExperimentGenerator class MetricRunner(object): CMC = 0 STR = 1 def __init__(self, dataset_path, feature_path, seed, dataset_type, num_run, set_type): # mandatory self.dataset_path = dataset_path self.feature_path = feature_path self.num_run = num_run self.set_type = set_type self.dataset_type = dataset_type # optional but initialized self.seed = seed # logging log_file = "metric.log" utils.remove_file(log_file) logging.basicConfig(filename=log_file, level=logging.DEBUG) # @timewrapper def run_str(self): """ STR will compare a set of 10 car images with another set of 10 car images """ logging.info("=" * 80) logging.info("Running STR metric") # instantiate ExperimentGenerator num_cams = 2 num_cars_per_cam = 10 drop_percentage = 0 logging.info("-" * 80) logging.info("Instantiate ExperimentGenerator") logging.info("num_cams = {}, num_cars_per_cam = {}, drop_percentage = {}".format(num_cams, num_cars_per_cam, drop_percentage)) logging.info("-" * 80) exp = ExperimentGenerator(self.dataset_path, self.dataset_type, num_cams, num_cars_per_cam, drop_percentage, self.seed, self.set_type) # run the metric self.__run(exp, MetricRunner.STR) logging.info("=" * 80) return # @timewrapper def run_cmc(self): """ CMC will compare target car image with a set of 10 car images """ logging.info("=" * 80) logging.info("Running CMC metric") # instantiate ExperimentGenerator num_cams = 1 num_cars_per_cam = 10 drop_percentage = 0 logging.info("-" * 80) logging.info("Instantiate ExperimentGenerator") logging.info("num_cams = {}, num_cars_per_cam = {}, drop_percentage = {}".format(num_cams, num_cars_per_cam, drop_percentage)) logging.info("-" * 80) exp = ExperimentGenerator(self.dataset_path, self.dataset_type, num_cams, num_cars_per_cam, drop_percentage, self.seed, self.set_type) # run the metric self.__run(exp, MetricRunner.CMC) logging.info("=" * 80) return def __run(self, exp, which_metric): # get feature vectors feature_vectors = self.__get_feature_vectors() # calculate which image in the set has the shortest cosine distance # and add the number of times we have to go through the sorted set to find the matching target car attempts = list() for i in range(0, self.num_run): logging.info("Run #{}".format(i + 1)) attempts.append(self.__get_attempt(exp, feature_vectors, which_metric)) logging.info("Adding the number of attempts to find the matching target car into the list:") logging.info(attempts) logging.info("-" * 80) # plot the output output_name = { MetricRunner.CMC: "cmc_metric.pdf", MetricRunner.STR: "str_metric.pdf", }.get(which_metric) self.__plot(collections.Counter(attempts), output_name) return def __get_attempt(self, exp, feature_vectors, which_metric): # index reference for set creation CMC = MetricRunner.CMC STR = MetricRunner.STR # index reference for cosine_distances which is in the format of [(car_id, cosine_distance)] CHOSEN_CAR_INDEX = 0 COMP_CAR_INDEX = 1 COSINE_DISTANCE_INDEX = 2 logging.info("Generate set of images") camsets = exp.generate() logging.info("Match target car to its respective vector") target_car_filename = utils.get_basename(exp.target_car.filepath) logging.info("target {}".format(target_car_filename)) target_car_vector = feature_vectors[target_car_filename] logging.info("Identify chosen set vs comparison set") # chosen chosen_set = { CMC: [exp.target_car], STR: camsets[0], }.get(which_metric) # comparison comp_sets = { CMC: camsets, STR: camsets[1:], }.get(which_metric) logging.info("Calculate cosine distances between the sets") cosine_distances = list() for chosen_car in chosen_set: logging.info(">> Match chosen car to its respective vector") chosen_car_filename = utils.get_basename(chosen_car.filepath) chosen_car_vector = feature_vectors[chosen_car_filename] for comp_set in comp_sets: for comp_car in comp_set: logging.info(">> Match comparison car to its respective vector") comp_car_filename = utils.get_basename(comp_car.filepath) comp_car_vector = feature_vectors[comp_car_filename] logging.info(">> Calculate the cosine distance") cosine_distance = scipy.spatial.distance.cosine(chosen_car_vector, comp_car_vector) cosine_distances.append((chosen_car.car_id, comp_car.car_id, cosine_distance)) logging.info(">> chosen {}, comp {}, cosine distance {}".format(chosen_car.filepath, comp_car.filepath, cosine_distance)) logging.info("Sort the cosine distances") cosine_distances = sorted(cosine_distances, key=lambda tupl:tupl[COSINE_DISTANCE_INDEX]) logging.info("Determine how many times we have to go through the sorted list to find the matching target car") attempt = { CMC: utils.get_index_of_tuple(cosine_distances, COMP_CAR_INDEX, exp.target_car.car_id), STR: utils.get_index_of_pairs(cosine_distances, CHOSEN_CAR_INDEX, COMP_CAR_INDEX, exp.target_car.car_id), }.get(which_metric) return attempt def __get_feature_vectors(self): # assume the json file has # imageName that references the image's name # resnet50 that references the image's feature vector feature_vectors = dict() objs = utils.read_json(self.feature_path) for obj in objs: feature_vectors[obj["imageName"]] = obj["resnet50"] return feature_vectors def __plot(self, num_per_index, output_name): def get_y(x): y = list() total = 0. for key in x: total = total + (float(num_per_index[key]) / float(self.num_run)) y.append(total) return y x = sorted(num_per_index.keys()) y = get_y(x) logging.info("x value: {}".format(x)) logging.info("y value: {}".format(y)) plt.plot(x, y, '-o') plt.xlabel("number of attempts to find the matching target car") plt.ylabel("cummulative sum") plt.axis([1, max(x), 0, max(y)]) """ # annotate all value points for i, y_val in enumerate(y): plt.annotate(y_val, xy=(x[i], y[i])) """ # annotate only the first point plt.annotate(y[0], xy=(x[0], y[0])) plt.savefig(output_name) return # ----------------------------------------------------------------------------- # Execution example # ----------------------------------------------------------------------------- def main(args): # extract arguments from command line dataset_path = args.dataset_path feature_path = args.feature is_cmc = args.cmc is_str = args.str num_run = args.num_run dataset_type = args.dataset_type set_type = args.set_type seed = args.seed # check that input_path points to a directory if not os.path.exists(dataset_path) or not os.path.isdir(dataset_path): sys.exit("ERROR: filepath to VeRi directory (%s) is invalid" % dataset_path) # check that a metric is selected to run if not is_cmc and not is_str: sys.exit("ERROR: you need to specify which metric to run") # create the metric runner runner = MetricRunner(dataset_path, feature_path, seed, dataset_type, num_run, set_type) # run the experiment if is_cmc: runner.run_cmc() if is_str: runner.run_str() # ----------------------------------------------------------------------------- # Entry # ----------------------------------------------------------------------------- if __name__ == '__main__': parser = argparse.ArgumentParser(prog="metric.py", description="Run metrics to determine how well the ResNet model identify the same car. Outputs: ", formatter_class=argparse.RawTextHelpFormatter) # arguments parser.add_argument("dataset_path", default="dataset_path", action="store", type=str, help="Path to the dataset unzipped.") parser.add_argument("feature", default="feature", action="store", type=str, help="Path to the feature json file.\nMake sure that feature is the same type as TYPE.") # options parser.add_argument("-v", "--version", action="version", version="Metric Runner 1.0") parser.add_argument("-w", dest="dataset_type", action="store", choices=["CompcarDataset", "StrDataset", "VeriDataset"], type=str, help="Specify the datasets to use.") parser.add_argument("-c", "--cmc", dest="cmc", action="store_true", default=False, help="Run CMC metric.") parser.add_argument("-s", "--str", dest="str", action="store_true", default=False, help="Run STR metric.") parser.add_argument("-r", dest="num_run", action="store", type=int, help="NUM_RUN defines how many iterations to run the metric.") parser.add_argument("-y", dest="set_type", action="store", choices=[0, 1, 2, 3], type=int, help="TYPE determines which type of images to use.\n0: all, 1: query, 2: test, 3: train") parser.add_argument("-e", dest="seed", action="store", type=int, default=random.randint(1, 100), help="(OPTIONAL) SEED is used for random number generator.") main(parser.parse_args())
#!/usr/bin/python import os, sys from apiclient import errors from email.mime.text import MIMEText import base64 import httplib2 from apiclient import discovery from apiclient.http import MediaFileUpload from oauth2client import client from oauth2client import tools from oauth2client.file import Storage #all avaialble auto scopes: https://developers.google.com/gmail/api/auth/scopes SCOPES = [ "https://www.googleapis.com/auth/gmail.readonly", "https://www.googleapis.com/auth/gmail.modify", ] CLIENT_SECRET_FILE = 'client_secret.json' APPLICATION_NAME = 'Drive API Python' class GmailController(object): serviceName = "gmail" serviceVersion = "v1" def __init__(self, credentialName): self.flags = None home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) self.credentialPath = os.path.join(credential_dir, "{}.json".format(credentialName)) self.credentials = self.initCredentials() print('Storing credentials to ' + self.credentialPath) self.http = self.credentials.authorize(httplib2.Http()) self.service = discovery.build(self.serviceName, self.serviceVersion, http = self.http) def initCredentials(self): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ store = Storage(self.credentialPath) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if self.flags: credentials = tools.run_flow(flow, store, self.flags) else: # Needed only for compatibility with Python 2.6 credentials = tools.run(flow, store) return credentials @staticmethod def create_message_with_attachment(sender, to, subject, message_text, file): import mimetypes from email import encoders from email.message import Message from email.mime.audio import MIMEAudio from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText """Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. message_text: The text of the email message. file: The path to the file to be attached. Returns: An object containing a base64url encoded email object. """ message = MIMEMultipart() message['to'] = to message['from'] = sender message['subject'] = subject msg = MIMEText(message_text) message.attach(msg) content_type, encoding = mimetypes.guess_type(file) if content_type is None or encoding is not None: content_type = 'application/octet-stream' main_type, sub_type = content_type.split('/', 1) if main_type == 'text': fp = open(file, 'r') msg = MIMEText(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'image': fp = open(file, 'rb') msg = MIMEImage(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'audio': fp = open(file, 'rb') msg = MIMEAudio(fp.read(), _subtype=sub_type) fp.close() else: fp = open(file, 'rb') msg = MIMEBase(main_type, sub_type) msg.set_payload(fp.read()) fp.close() filename = os.path.basename(file) msg.add_header('Content-Disposition', 'attachment', filename=filename) message.attach(msg) return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8")} @staticmethod def create_message(sender, to, subject, message_text): """Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. message_text: The text of the email message. Returns: An object containing a base64url encoded email object. """ message = MIMEText(message_text) message['to'] = to message['from'] = sender message['subject'] = subject return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8")} #response structure: https://developers.google.com/gmail/api/v1/reference/users/messages#resource def sendMessage(self, message, user_id = "me"): """Send an email message. Args: service: Authorized Gmail API service instance. user_id: User's email address. The special value "me" can be used to indicate the authenticated user. message: Message to be sent. Returns: Sent Message. """ try: message = (self.service.users().messages().send(userId=user_id, body=message).execute()) return message except errors.HttpError as error: print('An error occurred:', error) return None #response structure: https://developers.google.com/gmail/api/v1/reference/users/messages#resource def listMessagesMatchingQuery(self, user_id = "me", query='', messageLimit = 10): """List all Messages of the user's mailbox matching the query. Args: service: Authorized Gmail API service instance. user_id: User's email address. The special value "me" can be used to indicate the authenticated user. query: String used to filter messages returned. Eg.- 'from:user@some_domain.com' for Messages from a particular sender. Returns: List of Messages that match the criteria of the query. Note that the returned list contains Message IDs, you must use get with the appropriate ID to get the details of a Message. """ try: response = self.service.users().messages().list(userId=user_id,q=query, maxResults = messageLimit).execute() messages = [] if 'messages' in response: messages.extend(response['messages']) while 'nextPageToken' in response: if len(messages) >= messageLimit: return messages page_token = response['nextPageToken'] response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute() messages.extend(response['messages']) return messages except errors.HttpError as error: print('An error occurred: ', error) def getMailDetail(self, mail_id, user_id = "me", type = "full"): response = self.service.users().messages().get(userId = user_id, id = mail_id).execute() return response def listMessagesWithLabels(self, user_id = "me", label_ids=[], messageLimit = 10): """List all Messages of the user's mailbox with label_ids applied. Args: service: Authorized Gmail API service instance. user_id: User's email address. The special value "me" can be used to indicate the authenticated user. label_ids: Only return Messages with these labelIds applied. Returns: List of Messages that have all required Labels applied. Note that the returned list contains Message IDs, you must use get with the appropriate id to get the details of a Message. """ try: response = self.service.users().messages().list(userId=user_id, labelIds=label_ids, maxResults = messageLimit).execute() messages = [] if 'messages' in response: messages.extend(response['messages']) while 'nextPageToken' in response: page_token = response['nextPageToken'] response = service.users().messages().list(userId=user_id, labelIds=label_ids, pageToken=page_token).execute() messages.extend(response['messages']) return messages except errors.HttpError as error: print ('An error occurred: ', error) if __name__ == "__main__": try: import argparse flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args() except ImportError: flags = None xianghangMailController = GmailController("xianghangmi") muxingMailController = GmailController("senmuxing") from datetime import datetime import time number = 5 for i in range(number): nowDate = datetime.now() nowStr = nowDate.strftime("%Y-%m-%d %H-%M-%S") message2Send = GmailController.create_message(to = "senmuxing@gmail.com", sender = "xianghangmi@gmail.com", subject = "testMessage_{}".format(nowStr), message_text = "welcome to automatic email sending {}".format(i)) preMsgList = muxingMailController.listMessagesMatchingQuery() sendResult = xianghangMailController.sendMessage(message2Send) sentTime = datetime.now() while True: latestMsgList = muxingMailController.listMessagesMatchingQuery() latestMsgId = latestMsgList[0]["id"] preMsgId = preMsgList[0]["id"] if latestMsgId == preMsgId: time.sleep(0.1) continue else: break receiveTime = datetime.now() timeCostDelta = receiveTime - sentTime timeCost = timeCostDelta.seconds + timeCostDelta.microseconds / float(1000000) print("time cost between sending and recieve is {} seconds".format(timeCost))
# A dictionary consists of a series of key-value pairs enclosed by curly braces { } # Python dictionary is an unordered collection of items # Creating empty Dictionary empty_dict = {} print('Empty Dictionary ', empty_dict) empty_dict = dict() print('Empty Dictionary ', empty_dict) #Value can be of any type #Key must be of Immutable types such as String , Number, Tuple #Key must be unique #Dictionary with Key Value Pairs capital_dict = { 'India' : 'New Delhi', 'Bangladesh' : 'Dhaka', 'Srilanka' : 'Columbo' } print ('\ncapital_dict ', capital_dict) #From List of Tuple -> Dictionary new_list = [(1,'One'), (2,'Two'), (3,'Three')] my_dict = dict(new_list) print('\nmy_dict ', my_dict) #Accessing Elements #values() returns view of all values dict_values = capital_dict.values() print('\nType of dict_values ', type(dict_values)) print('Values are ', dict_values) #keys() returns view of all keys dict_keys = capital_dict.keys() print('\nType of dict_keys ', type(dict_keys)) print('Keys are ',dict_keys) #get(key, default): return value for specific key, if key does not exist then returns default value print('\nCapital of India ', capital_dict.get('India')) print('Capital of Australlia ', capital_dict.get('Australlia')) print('Capital of Mayanmar ', capital_dict.get('Mayanmar', 'No Capital Found')) #items() returns key value pair dict_items = capital_dict.items() print('\nItems are ', dict_items) #Updating Dictionary capital_dict['Australia'] = 'Canbera' print('\nCapital_dict ', capital_dict) capital_dict['India'] = 'Delhi' print('capital_dict ', capital_dict) #Removing elements #pop() This method removes as item with the provided key and returns the value. popped_value = capital_dict.pop('Australia') print('\nPopped Item ', popped_value) print('capital_dict ', capital_dict) #All the items can be removed at once using the clear() method. capital_dict.clear() print('capital_dict ', capital_dict) # del() function can be used to delete individual element or whole dictionary new_dict = { 'Name' :'Rockey', 'Work_Locations' : ['Bangalore', 'Hyderabad'], 'Age' : 30} print('new_dict ', new_dict) del new_dict['Age'] print('new_dict after deleting an item ', new_dict) del new_dict #print('new_dict ', new_dict) # NameError: name 'new_dict' is not defined # Looping through Dictionary pow_dict = { 1: 1, 2: 4, 3: 9, 4: 16, 5: 25 } for i in pow_dict: print(i ,' -> ', pow_dict.get(i)) for item in pow_dict.items(): print(item)
from __future__ import print_function import sys def product(numbers): """Function to return the product of two numbers Params: numbers: List of two numbers to be multiplied Returns: product of two numbers """ #Write your solution here return float(numbers[0]) * float(numbers[1]) numbers = sys.argv[1:] # sys.argv contains the arguments passed to the program print(product(numbers))
from basketball import models as bmodels from django_filters import rest_framework as drf_filters import django_filters class StatlineFilter(drf_filters.FilterSet): date = django_filters.DateFromToRangeFilter() game_type = django_filters.CharFilter() class Meta: model = bmodels.StatLine fields = ['date'] class DailyStatlineFilter(StatlineFilter): class Meta: model = bmodels.DailyStatline fields = ['date'] class SeasonStatlineFilter(drf_filters.FilterSet): game_type = django_filters.CharFilter() class Meta: model = bmodels.SeasonStatline fields = ['season']
# portage.py -- core Portage functionality # Copyright 1998-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import atexit import errno import platform import signal import sys import traceback from portage import os from portage import _encodings from portage import _unicode_encode import portage portage.proxy.lazyimport.lazyimport(globals(), 'portage.util:dump_traceback,writemsg', ) from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY from portage.exception import CommandNotFound try: import resource max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0] except ImportError: max_fd_limit = 256 if sys.hexversion >= 0x3000000: basestring = str if os.path.isdir("/proc/%i/fd" % os.getpid()): def get_open_fds(): return (int(fd) for fd in os.listdir("/proc/%i/fd" % os.getpid()) \ if fd.isdigit()) if platform.python_implementation() == 'PyPy': # EAGAIN observed with PyPy 1.8. _get_open_fds = get_open_fds def get_open_fds(): try: return _get_open_fds() except OSError as e: if e.errno != errno.EAGAIN: raise return range(max_fd_limit) else: def get_open_fds(): return range(max_fd_limit) sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and os.access(SANDBOX_BINARY, os.X_OK)) fakeroot_capable = (os.path.isfile(FAKEROOT_BINARY) and os.access(FAKEROOT_BINARY, os.X_OK)) def spawn_bash(mycommand, debug=False, opt_name=None, **keywords): """ Spawns a bash shell running a specific commands @param mycommand: The command for bash to run @type mycommand: String @param debug: Turn bash debugging on (set -x) @type debug: Boolean @param opt_name: Name of the spawned process (detaults to binary name) @type opt_name: String @param keywords: Extra Dictionary arguments to pass to spawn @type keywords: Dictionary """ args = [BASH_BINARY] if not opt_name: opt_name = os.path.basename(mycommand.split()[0]) if debug: # Print commands and their arguments as they are executed. args.append("-x") args.append("-c") args.append(mycommand) return spawn(args, opt_name=opt_name, **keywords) def spawn_sandbox(mycommand, opt_name=None, **keywords): if not sandbox_capable: return spawn_bash(mycommand, opt_name=opt_name, **keywords) args=[SANDBOX_BINARY] if not opt_name: opt_name = os.path.basename(mycommand.split()[0]) args.append(mycommand) return spawn(args, opt_name=opt_name, **keywords) def spawn_fakeroot(mycommand, fakeroot_state=None, opt_name=None, **keywords): args=[FAKEROOT_BINARY] if not opt_name: opt_name = os.path.basename(mycommand.split()[0]) if fakeroot_state: open(fakeroot_state, "a").close() args.append("-s") args.append(fakeroot_state) args.append("-i") args.append(fakeroot_state) args.append("--") args.append(BASH_BINARY) args.append("-c") args.append(mycommand) return spawn(args, opt_name=opt_name, **keywords) _exithandlers = [] def atexit_register(func, *args, **kargs): """Wrapper around atexit.register that is needed in order to track what is registered. For example, when portage restarts itself via os.execv, the atexit module does not work so we have to do it manually by calling the run_exitfuncs() function in this module.""" _exithandlers.append((func, args, kargs)) def run_exitfuncs(): """This should behave identically to the routine performed by the atexit module at exit time. It's only necessary to call this function when atexit will not work (because of os.execv, for example).""" # This function is a copy of the private atexit._run_exitfuncs() # from the python 2.4.2 sources. The only difference from the # original function is in the output to stderr. exc_info = None while _exithandlers: func, targs, kargs = _exithandlers.pop() try: func(*targs, **kargs) except SystemExit: exc_info = sys.exc_info() except: # No idea what they called, so we need this broad except here. dump_traceback("Error in portage.process.run_exitfuncs", noiselevel=0) exc_info = sys.exc_info() if exc_info is not None: if sys.hexversion >= 0x3000000: raise exc_info[0](exc_info[1]).with_traceback(exc_info[2]) else: exec("raise exc_info[0], exc_info[1], exc_info[2]") atexit.register(run_exitfuncs) # We need to make sure that any processes spawned are killed off when # we exit. spawn() takes care of adding and removing pids to this list # as it creates and cleans up processes. spawned_pids = [] def cleanup(): while spawned_pids: pid = spawned_pids.pop() try: # With waitpid and WNOHANG, only check the # first element of the tuple since the second # element may vary (bug #337465). if os.waitpid(pid, os.WNOHANG)[0] == 0: os.kill(pid, signal.SIGTERM) os.waitpid(pid, 0) except OSError: # This pid has been cleaned up outside # of spawn(). pass atexit_register(cleanup) def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False, uid=None, gid=None, groups=None, umask=None, logfile=None, path_lookup=True, pre_exec=None): """ Spawns a given command. @param mycommand: the command to execute @type mycommand: String or List (Popen style list) @param env: A dict of Key=Value pairs for env variables @type env: Dictionary @param opt_name: an optional name for the spawn'd process (defaults to the binary name) @type opt_name: String @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example @type fd_pipes: Dictionary @param returnpid: Return the Process IDs for a successful spawn. NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them. @type returnpid: Boolean @param uid: User ID to spawn as; useful for dropping privilages @type uid: Integer @param gid: Group ID to spawn as; useful for dropping privilages @type gid: Integer @param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts. @type groups: List @param umask: An integer representing the umask for the process (see man chmod for umask details) @type umask: Integer @param logfile: name of a file to use for logging purposes @type logfile: String @param path_lookup: If the binary is not fully specified then look for it in PATH @type path_lookup: Boolean @param pre_exec: A function to be called with no arguments just prior to the exec call. @type pre_exec: callable logfile requires stdout and stderr to be assigned to this process (ie not pointed somewhere else.) """ # mycommand is either a str or a list if isinstance(mycommand, basestring): mycommand = mycommand.split() if sys.hexversion < 0x3000000: # Avoid a potential UnicodeEncodeError from os.execve(). env_bytes = {} for k, v in env.items(): env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \ _unicode_encode(v, encoding=_encodings['content']) env = env_bytes del env_bytes # If an absolute path to an executable file isn't given # search for it unless we've been told not to. binary = mycommand[0] if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \ (not os.path.isabs(binary) or not os.path.isfile(binary) or not os.access(binary, os.X_OK)): binary = path_lookup and find_binary(binary) or None if not binary: raise CommandNotFound(mycommand[0]) # If we haven't been told what file descriptors to use # default to propagating our stdin, stdout and stderr. if fd_pipes is None: fd_pipes = { 0:sys.__stdin__.fileno(), 1:sys.__stdout__.fileno(), 2:sys.__stderr__.fileno(), } # mypids will hold the pids of all processes created. mypids = [] if logfile: # Using a log file requires that stdout and stderr # are assigned to the process we're running. if 1 not in fd_pipes or 2 not in fd_pipes: raise ValueError(fd_pipes) # Create a pipe (pr, pw) = os.pipe() # Create a tee process, giving it our stdout and stderr # as well as the read end of the pipe. mypids.extend(spawn(('tee', '-i', '-a', logfile), returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]})) # We don't need the read end of the pipe, so close it. os.close(pr) # Assign the write end of the pipe to our stdout and stderr. fd_pipes[1] = pw fd_pipes[2] = pw parent_pid = os.getpid() pid = None try: pid = os.fork() if pid == 0: try: _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask, pre_exec) except SystemExit: raise except Exception as e: # We need to catch _any_ exception so that it doesn't # propagate out of this function and cause exiting # with anything other than os._exit() writemsg("%s:\n %s\n" % (e, " ".join(mycommand)), noiselevel=-1) traceback.print_exc() sys.stderr.flush() finally: if pid == 0 or (pid is None and os.getpid() != parent_pid): # Call os._exit() from a finally block in order # to suppress any finally blocks from earlier # in the call stack (see bug #345289). This # finally block has to be setup before the fork # in order to avoid a race condition. os._exit(1) if not isinstance(pid, int): raise AssertionError("fork returned non-integer: %s" % (repr(pid),)) # Add the pid to our local and the global pid lists. mypids.append(pid) spawned_pids.append(pid) # If we started a tee process the write side of the pipe is no # longer needed, so close it. if logfile: os.close(pw) # If the caller wants to handle cleaning up the processes, we tell # it about all processes that were created. if returnpid: return mypids # Otherwise we clean them up. while mypids: # Pull the last reader in the pipe chain. If all processes # in the pipe are well behaved, it will die when the process # it is reading from dies. pid = mypids.pop(0) # and wait for it. retval = os.waitpid(pid, 0)[1] # When it's done, we can remove it from the # global pid list as well. spawned_pids.remove(pid) if retval: # If it failed, kill off anything else that # isn't dead yet. for pid in mypids: # With waitpid and WNOHANG, only check the # first element of the tuple since the second # element may vary (bug #337465). if os.waitpid(pid, os.WNOHANG)[0] == 0: os.kill(pid, signal.SIGTERM) os.waitpid(pid, 0) spawned_pids.remove(pid) # If it got a signal, return the signal that was sent. if (retval & 0xff): return ((retval & 0xff) << 8) # Otherwise, return its exit code. return (retval >> 8) # Everything succeeded return 0 def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask, pre_exec): """ Execute a given binary with options @param binary: Name of program to execute @type binary: String @param mycommand: Options for program @type mycommand: String @param opt_name: Name of process (defaults to binary) @type opt_name: String @param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 } @type fd_pipes: Dictionary @param env: Key,Value mapping for Environmental Variables @type env: Dictionary @param gid: Group ID to run the process under @type gid: Integer @param groups: Groups the Process should be in. @type groups: Integer @param uid: User ID to run the process under @type uid: Integer @param umask: an int representing a unix umask (see man chmod for umask details) @type umask: Integer @param pre_exec: A function to be called with no arguments just prior to the exec call. @type pre_exec: callable @rtype: None @return: Never returns (calls os.execve) """ # If the process we're creating hasn't been given a name # assign it the name of the executable. if not opt_name: if binary is portage._python_interpreter: # NOTE: PyPy 1.7 will die due to "libary path not found" if argv[0] # does not contain the full path of the binary. opt_name = binary else: opt_name = os.path.basename(binary) # Set up the command's argument list. myargs = [opt_name] myargs.extend(mycommand[1:]) # Use default signal handlers in order to avoid problems # killing subprocesses as reported in bug #353239. signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) # Quiet killing of subprocesses by SIGPIPE (see bug #309001). signal.signal(signal.SIGPIPE, signal.SIG_DFL) # Avoid issues triggered by inheritance of SIGQUIT handler from # the parent process (see bug #289486). signal.signal(signal.SIGQUIT, signal.SIG_DFL) _setup_pipes(fd_pipes) # Set requested process permissions. if gid: os.setgid(gid) if groups: os.setgroups(groups) if uid: os.setuid(uid) if umask: os.umask(umask) if pre_exec: pre_exec() # And switch to the new process. os.execve(binary, myargs, env) def _setup_pipes(fd_pipes, close_fds=True): """Setup pipes for a forked process. WARNING: When not followed by exec, the close_fds behavior can trigger interference from destructors that close file descriptors. This interference happens when the garbage collector intermittently executes such destructors after their corresponding file descriptors have been re-used, leading to intermittent "[Errno 9] Bad file descriptor" exceptions in forked processes. This problem has been observed with PyPy 1.8, and also with CPython under some circumstances (as triggered by xmpppy in bug #374335). In order to close a safe subset of file descriptors, see portage.locks._close_fds(). """ my_fds = {} # To protect from cases where direct assignment could # clobber needed fds ({1:2, 2:1}) we first dupe the fds # into unused fds. for fd in fd_pipes: my_fds[fd] = os.dup(fd_pipes[fd]) # Then assign them to what they should be. for fd in my_fds: os.dup2(my_fds[fd], fd) if close_fds: # Then close _all_ fds that haven't been explicitly # requested to be kept open. for fd in get_open_fds(): if fd not in my_fds: try: os.close(fd) except OSError: pass def find_binary(binary): """ Given a binary name, find the binary in PATH @param binary: Name of the binary to find @type string @rtype: None or string @return: full path to binary or None if the binary could not be located. """ for path in os.environ.get("PATH", "").split(":"): filename = "%s/%s" % (path, binary) if os.access(filename, os.X_OK) and os.path.isfile(filename): return filename return None
x = int(input()) y = int(input()) flag = True icm = 0 while flag: icm += 1 if icm % x == 0 and icm % y == 0: flag = False print(icm)
import models as m pep149 = m.peptide('227.pdb', cutoff=15, indx_cutoff=2, potential=10.0, backbone_weight=90.0) # pep149.calc_bonds_bounded(11, 15) pep149.calc_bonds() pep149.calc_a3s() m.export_to_simulation(pep149, '227.pdb')
from django.test import TestCase from django.test import override_settings from nautobot.core import checks class CheckCacheopsDefaultsTest(TestCase): @override_settings( CACHEOPS_DEFAULTS={"timeout": 0}, ) def test_timeout_invalid(self): """Error if CACHEOPS_DEFAULTS['timeout'] is 0.""" self.assertEqual(checks.cache_timeout_check(None), [checks.E001])
class InvalidInput(Exception): status_code = 500 def __init__(self, description): Exception.__init__(self) self.description = description def to_dict(self): rv = dict() rv['description'] = self.description return rv
version = '0.9.0.dev'
"""Absorb neighbouring zones; extract word data; overwrite zones as newly-defined table.""" import xml.etree.ElementTree as ET import xmlStaticOperators class xmlTableAbsorption(object): def __init__(self, zone_list, working_zones, page_zone_data, page_data, column_zones_total): self.zone_list = zone_list self.working_zones = working_zones self.page_zone_data = page_zone_data self.page_data = page_data self.column_zones_total = column_zones_total self.zone_words = self.extract_words() self.remove_zones() self.manufacture_wordZones() def within_new_zone(self, object_in): """Identify if object_in is within new tableZone.""" within_new_zone = False if (object_in[1] <= self.zone_list[1] + .002 and object_in[2] <= self.zone_list[2] + .002 and object_in[3] >= self.zone_list[3] - .002 and object_in[4] >= self.zone_list[4] - .002): within_new_zone = True return within_new_zone def extract_words(self): """Extract words from all zones in newly absorbed tableZone.""" zone_words = [] for zone in self.column_zones_total: zone_element = zone[5] for word in zone_element.findall('.//wd'): word.text = xmlStaticOperators.none_to_empty(word.text) l = float('{:.5f}'.format(((int(word.get('l')) * 400) / 1440)/self.page_data.page_dimensions[0])) r = float('{:.5f}'.format(((int(word.get('r')) * 400) / 1440)/self.page_data.page_dimensions[0])) t = float('{:.5f}'.format(1-((int(word.get('t')) * 400) / 1440)/self.page_data.page_dimensions[1])) b = float('{:.5f}'.format(1-((int(word.get('b')) * 400) / 1440)/self.page_data.page_dimensions[1])) word_zone = ['wordZone', t, r, b, l, word.text] if self.within_new_zone(word_zone): zone_words.append(['wordZone', t, r, b, l, word.text]) return zone_words def remove_zones(self): """Remove zones in newly expanded tableZone.""" self.page_zone_data = [zone for zone in self.page_zone_data if not self.within_new_zone(zone) or zone is self.zone_list] def manufacture_wordZones(self): """Add words in new tables into wordZones.""" for word in self.zone_words: self.page_zone_data.append(word)
from ._coupling import Coupling, coupling_dict2list, coupling_list2dict from ._mapping import swap_mapper, direction_mapper, cx_cancellation, optimize_1q_gates from ._compiling import two_qubit_kak, euler_angles_1q from ._mappererror import MapperError
# -*- coding: utf-8 -*- # @Time : 2018/11/13 14:49 # @Author : Monica # @Email : 498194410@qq.com # @File : run.py.py import unittest import HTMLTestRunnerNew import HTMLTestRunnerP from TestCases import test_0_login from Common.project_path import test_report_path suite = unittest.TestSuite() loader = unittest.TestLoader() suite.addTest(loader.loadTestsFromTestCase(test_0_login.TestLogin)) suite.addTest(loader.loadTestsFromTestCase(test_0_login.TestLogin_ddt)) with open(test_report_path, "wb") as file: runner = HTMLTestRunnerP.HTMLTestRunner(stream=file, verbosity=2, title="UI测试报告", description="cloudhawk") runner.run(suite) # 生成测试报告后发送给指定收件人 # SendEmail().send_email("3097944154@qq.com", test_report_path)
class TriggerPlate: # Triggered when the player is standing on top of it def __init__(self, x, y, w, h, action, entity): self.x = x self.y = y self.w = w self.h = h self.action = action self.entity = entity def over(self): return self.entity.attributes["x"] > self.x and self.entity.attributes["x"] < self.x + self.w and self.entity.attributes["y"] > self.y and self.entity.attributes["y"] < self.y + self.h def run(self): noFill() noStroke() rect(self.x, self.y, self.w, self.h) if self.over(): return True else: return False
#!/usr/bin/env python3 from enum import Enum from string import ascii_lowercase class Symbol(Enum): BREAK_SIGN = '|' MUL_SIGN = '*' DIV_SIGN = '/' PLUS_SIGN = '+' MINUS_SIGN = '-' LEFT_BRACKET = '(' RIGHT_BRACKET = ')' class Action(Enum): BREAK_SIGN = {'|': 4, '-': 1, '+': 1, '*': 1, '/': 1, '(': 1, ')': 5} PLUS_SIGN = {'|': 2, '-': 2, '+': 2, '*': 1, '/': 1, '(': 1, ')': 2} MINUS_SIGN = {'|': 2, '-': 2, '+': 2, '*': 1, '/': 1, '(': 1, ')': 2} MUL_SIGN = {'|': 2, '-': 2, '+': 2, '*': 2, '/': 2, '(': 1, ')': 2} DIV_SIGN = {'|': 2, '-': 2, '+': 2, '*': 2, '/': 2, '(': 1, ')': 2} LEFT_BRACKET = {'|': 5, '-': 1, '+': 1, '*': 1, '/': 1, '(': 1, ')': 3} def brackets_trim(input_data: str) -> str: """Removes extra brackets from expression""" def to_postfix(input_data: str) -> str: """Implementation of E.W. Dejkstra algorithm https://habr.com/post/100869/""" input_data = input_data + '|' lst_postfix = [] stack = ['|'] pos = 0 while True: sym = input_data[pos] if sym in set(ascii_lowercase): lst_postfix.append(sym) pos += 1 else: LAST_SIGN = Symbol(stack[-1]).name action_choice = Action[LAST_SIGN].value[sym] if action_choice == 1: stack.append(sym) pos += 1 elif action_choice == 2: last = stack.pop(-1) lst_postfix.append(last) elif action_choice == 3: stack.pop(-1) pos += 1 elif action_choice == 4: break else: raise Exception('invalid input string') return ''.join(lst_postfix) def get_unwrapped(expression: str) -> str: wrapped = 0 unwrapped = '' for s in expression: if s == '(': wrapped += 1 elif s == ')': wrapped -= 1 elif wrapped == 0 and s not in set(ascii_lowercase): unwrapped += str(s) return unwrapped def to_infix(input_data: str) -> str: """Postfix to infix algorithm described on http://scanftree.com/Data_Structure/postfix-to-infix""" stack = [] sign_priority = {'-':1, '+':1, '*':2, '/':2} prev_sign_priority = 1 for sym in input_data: if sym in set(ascii_lowercase): stack.append(sym) else: # if operator s1, s2 = stack.pop(-2), stack.pop(-1) if len(s2) > 1 and sym in '/': # composite right part with 2, 1 priorities s2 = '(' + s2 + ')' elif len(s2) > 1 and sym in '-': # composite right part with 1 priority unwrapped = get_unwrapped(s2) priorities = {sign_priority[s] for s in unwrapped} if priorities == {1}: s2 = '(' + s2 + ')' if sign_priority[sym] == 2: # left part contains unwrapped sign with low priority unwrapped = get_unwrapped(s1) priorities = {sign_priority[s] for s in unwrapped} if 1 in priorities: s1 = '(' + s1 + ')' if prev_sign_priority < sign_priority[sym]: # change of priority # wrap subexpressions with brackets if len(s1) > 1 and '(' not in s1: s1 = '(' + s1 + ')' if len(s2) > 1 and '(' not in s2: s2 = '(' + s2 + ')' stack.append(s1 + sym + s2) else: stack.append(s1 + sym + s2) prev_sign_priority = sign_priority[sym] return stack[0] # Remove spaces input_data = input_data.replace(' ','') return to_infix(to_postfix(input_data)) assert brackets_trim('(x*y)/(j*z)+g') == 'x*y/(j*z)+g' assert brackets_trim('a-(b+c)') == 'a-(b+c)' assert brackets_trim('(a)') == 'a' assert brackets_trim('(a*(b/c)+((d-f)/k))*(h*(g-r))') == '(a*b/c+(d-f)/k)*h*(g-r)' assert brackets_trim('(x+y)/(j*z)+g') == '(x+y)/(j*z)+g' assert brackets_trim('x+y/(j*z)+g') == 'x+y/(j*z)+g' assert brackets_trim('(a*(b/c)+((d-f)/k))') == 'a*b/c+(d-f)/k' assert brackets_trim('a*(b+c)') == 'a*(b+c)' assert brackets_trim('a+(b+c)') == 'a+b+c' assert brackets_trim('(x*y)-(j*z)+g') == 'x*y-j*z+g' # print(data) # print(brackets_trim(data))
import functools import numpy as np from prettytable import PrettyTable from time import time as clock class Call(object): def __init__(self, func, *args, **kwargs): self.args = args self.kwargs = kwargs t = clock() self.output = func(*args, **kwargs) self.time = clock() - t class CallTracker(object): """Func calls tracking class, goes along trackedfunc decorator""" trackers = {} def __init__(self, func): """Init a tracker (to use for a given function) Tracker can be find at CallTracker.trackers[func.__qualname__] """ # Add func to tracked functions funcname = func.__qualname__ cls = self.__class__ # # Should be checked but not convenient when using notebook # if funcname in cls.trackers: # raise ValueError( # "Can't track a function already tracked: %s" % funcname # ) cls.trackers[funcname] = self # Init instance self.func = func self._n_calls = 0 self._exc_time = 0 @property def n_calls(self): """Number of calls to tracked functions""" return self._n_calls @property def exc_time(self): """Total running time of tracked function""" return self._exc_time def mean_exc_time(self): """Return mean execution time by call""" try: return self.exc_time / self.n_calls except ZeroDivisionError: return np.nan def call(self, *args, **kwargs): """Call func and track execution time""" call = Call(self.func, *args, **kwargs) self._n_calls += 1 self._exc_time += call.time return call.output def reset(self): """Reset call-results, n_calls and total exc time""" self._n_calls = 0 self._exc_time = 0 # *********************************************************************** # # Class method @classmethod def display_stats(cls, w_uncalled=False, sortby=None, reverse=False): """Return pretty table of called function stats Args: w_uncalled (bool): display uncalled tracked functions below table sortby (str): sort res table by given header should be in "function", "n_calls", "exc_time", "mean_exc_time" reverse (bool): reverse sorting """ table = PrettyTable() table.field_names = ["function", "n_calls", "exc_time", "mean_exc_time"] # Gather called / uncalled called = [] uncalled_funcs = [] for funcname, tracker in cls.trackers.items(): if tracker.n_calls > 0: called.append((funcname, tracker)) else: uncalled_funcs.append(funcname) # Sort if sortby is None: pass elif sortby in ["name", "funcname", "function"]: called.sort(key=lambda item: item[0], reverse=reverse) elif sortby == "n_calls": called.sort(key=lambda item: item[1].n_calls, reverse=reverse) elif sortby == "exc_time": called.sort(key=lambda item: item[1].exc_time, reverse=reverse) elif sortby == "mean_exc_time": called.sort(key=lambda item: item[1].mean_exc_time(), reverse=reverse) else: raise ValueError( "sortby unknown value %s, must be picked in %s" % (sortby, table.field_names) ) # Fills table for funcname, tracker in called: row = [ funcname, tracker.n_calls, "%.3e" % tracker.exc_time, "%.3e" % tracker.mean_exc_time(), ] table.add_row(row) # Display print(table) if w_uncalled: print("*", ", ".join(uncalled_funcs)) @classmethod def reset_all(cls): """Reset trackers stats""" for tracker in cls.trackers.values(): tracker.reset() def trackedfunc(func): """Decorator to track calls to func. In a class, decorator must be bellow @clsmethod or @staticmethod decorator """ @functools.wraps(func) def func_wrapper(*args, **kwargs): return func_wrapper.tracker.call(*args, **kwargs) func_wrapper.tracker = CallTracker(func) func_wrapper.__doc__ = func.__doc__ if func.__doc__ else "" func_wrapper.__doc__ += "\n@about: tracked function\n" return func_wrapper
import defi.defi_tools as dft import pandas as pd # import matplotlib.pyplot as plt # # df = dft.getProtocols() # fig, ax = plt.subplots(figsize=(12,6)) # top_20 = df.sort_values('tvl', ascending=False).head(20) # # chains = top_20.groupby('chain').size().index.values.tolist() # for chain in chains: # filtro = top_20.loc[top_20.chain==chain] # ax.bar(filtro.index, filtro.tvl, label=chain) # # ax.set_title('Top 20 dApp TVL, groupBy dApp main Chain', fontsize=14) # plt.legend() # plt.xticks(rotation=90) # plt.show() pd.options.display.max_columns = 250 # data = dft.geckoList(page=2, per_page=1000) print(data) print(data.columns) # df = dft.geckoMarkets('ltc') # print(df) # print(df.info()) diccionario = {'base': 'ADA', 'quote': 'BTC', 'value_quote': 10, 'value_usdt': 100}
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import render, redirect from .models import Book def index(request): context = { 'book': Book.objects.all() } return render(request, 'stack/index.html', context) def add(request): # print Book.objects.all() # request.session['my_id'] = request.POST['my_id'] Book.objects.create( # id=request.POST['my_id'], title=request.POST['title'], category=request.POST['category'], author=request.POST['author']) return redirect('/') def remove(request): # Book.objects.filter(id=id).delete # b = Book.objects.get(id=1) Book.objects.all().delete() # b.delete() return redirect('/')
import math def is_square(n): if n < 0: return False num = int(math.sqrt(n)) return num ** 2 == n
# Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu. import numpy as np from scipy.io import loadmat from lib.pylabyk import np2 from lib.pylabyk.np2 import filt_dict from data_2d import consts #%% def ____LOAD____(): pass scale_en = [1/600, 1/4] # to make them similar to the coherence file_matlab_combined = '../../data/orig/data_RT_VD.mat' # file_matlab_combined = '../../Data_2D/sTr/combined_2D_RT_sh_VD_unibimanual' \ # '.mat' dat = loadmat(file_matlab_combined) print('Loaded %s' % file_matlab_combined) for key in ['parads', 'subjs']: d = dat[key] d2 = [s[0] for s in d.flatten()] # for ii in range(d.shape[0]): # d1 = d[ii][0] # if len(d1) == 0: # d1 = '' # else: # d1 = d1[0] # d2.append(d1) dat[key] = d2 for key in dat.keys(): if key.startswith('id_'): dat[key] -= 1 # MATLAB starts with 1; Python starts with 0 for key, val in dat.items(): if (isinstance(val, np.ndarray) and val.ndim == 2 and val.shape[1] == 1): dat[key] = val.flatten() # # REMOVE: en unused # dat['en'] = np.transpose(dat['en'], [0, 2, 1]) # dat['en'] *= np2.vec_on(np.array(scale_en), 1, 3) dat['dim_rel'] = dat['dim_rel'].astype(np.bool) dat['to_excl'] = dat['to_excl'].astype(np.bool) # REMOVE: S1 filtered already # # Exclude S1's last two blocks of A, where they guessed on motion # dat1 = np2.filt_dict(dat, ( # dat['id_subj'] == dat['subjs'].index('S1') # ) & ( # dat['task'] == 'A' # ) & ( # dat['id_parad'] == dat['parads'].index('RT') # )) # runs_to_exclude = np.unique(dat1['i_all_Run'])[-2:] # # dat1_high_motion_excl = np2.filt_dict( # dat1, (dat1['cond'][:, 0] == 0.512) # & np.isin(dat1['i_all_Run'], runs_to_exclude) # ) # # dat1_high_motion_incl = np2.filt_dict( # dat1, (dat1['cond'][:, 0] == 0.512) # & ~np.isin(dat1['i_all_Run'], runs_to_exclude) # ) # # Plot showing why last two runs of S1 are an exception: # from matplotlib import pyplot as plt # for dim in [0, 1]: # accu = [] # se_accu = [] # i_runs = np.unique(dat1['i_all_Run']) # for i_run in i_runs: # dat2 = np2.filt_dict(dat1, ( # (np.abs(dat1['cond'][:, dim]) > 0) # & (dat1['i_all_Run'] == i_run) # )) # accu1 = (np.sign(dat2['cond'][:, dim]) == # np.sign(dat2['ch'][:, dim] - 1.5)) # accu.append(np.mean(accu1)) # se_accu.append(np2.sem(accu1)) # plt.errorbar(i_runs, accu, yerr=se_accu) # plt.show() # REMOVE: already filtered # # --- Remove last two runs of S1, during which the motion accuracy was at chance # dat1 = np2.filt_dict(dat, ~np.array(( # dat['id_subj'] == dat['subjs'].index('S1') # ) & np.isin(dat['i_all_Run'], runs_to_exclude) & ( # dat['id_parad'] == dat['parads'].index('RT') # ) # )) # dat = dat1 # # REMOVE: color converted already # # --- Change color coherence values for VD and manual # # color_coh = 'logit' # '2p_blue-1' | 'logit' # color_coh = '2p_blue-1' # for parad in ['VD', 'RT']: # # for parad in ['VD', 'RT', 'sh']: # REMOVE: sh unused # for subj in consts.SUBJS[parad]: # incl = ( # (dat['id_parad'] == dat['parads'].index(parad)) # & (dat['id_subj'] == dat['subjs'].index(subj)) # ) # dim_color = consts.DIM_NAMES_SHORT.index('C') # coh_color = dat['cond'][incl, dim_color] # # # # REMOVE: color coh already unified # # if color_coh == '2p_blue-1': # # if parad == 'VD': # # coh_color = coh_color * 2 # # elif parad in ['RT', 'sh']: # # coh_color = 2 * np2.logistic(coh_color) - 1. # # # # elif color_coh == 'logit': # # sign_coh = np.sign(coh_color) # # _, ix_abs_coh = np.unique(np.abs(coh_color), return_inverse=True) # # for i_abs_coh in range(np.amax(ix_abs_coh) + 1): # # incl1 = ix_abs_coh == i_abs_coh # # coh_color[incl1] = sign_coh[incl1] * consts.COHS_COLOR[parad][subj][ # # i_abs_coh] # # else: # # raise ValueError() # # dat['cond'][incl, dim_color] = coh_color # NOTE: unibimanual is left unchanged at this point def load_data_combined(): return dat def load_data_parad(parad='VD'): raise NotImplementedError() #%% def ___MOMENTARY_EVIDENCE____(): pass file_impulse = '../Data_2D/sTr/impulse_MotionEnergy.csv' def load_impulse(): impulse = np.loadtxt(file_impulse) return impulse
# written and debugged by all import numpy as np import os from .data_manager import DataManager def calculate_rsi(prices, n=14): deltas = np.diff(prices) seed = deltas[:n+1] up = seed[seed>=0].sum()/n down = -seed[seed<0].sum()/n rs = up/down rsi = np.zeros_like(prices) rsi[:n] = 100. - 100./(1.+rs) for i in range(n, len(prices)): delta = deltas[i-1] # cause the diff is 1 shorter if delta>0: upval = delta downval = 0. else: upval = 0. downval = -delta up = (up*(n-1) + upval)/n down = (down*(n-1) + downval)/n rs = up/down rsi[i] = 100. - 100./(1.+rs) return rsi def read_historical(stockname, from_time, to_time): dirname = os.path.dirname(__file__) path = '/csv/' + stockname + '_historical.csv' dm = DataManager(dirname + path) column_name = dm.column_names dm._data = dm._data.loc[:, ['date', '4. close']] dm._back_up = dm._data.copy() dm.filter_by_range('date', from_time, to_time, include_max=True) historical_prices = dm.data['4. close'].tolist() # print(historical_prices) return historical_prices def read_realtime(stockname, from_time, to_time): dirname = os.path.dirname(__file__) path = '/csv/' + stockname + '_realtime.csv' dm = DataManager(dirname + path) column_name = dm.column_names dm._data = dm._data.loc[:, ['date', '4. close']] dm._back_up = dm._data.copy() dm.filter_by_range('date', from_time, to_time, include_max=True) realtime_prices = dm.data['4. close'].tolist() # print(historical_prices) return realtime_prices def get_RSI(stockname, time_type, from_time, to_time): if time_type == 'historical': data = read_historical(stockname, from_time, to_time) else: data = read_realtime(stockname, from_time, to_time) return calculate_rsi(data).tolist()
from django.conf import settings from django.conf.urls.i18n import i18n_patterns from django.contrib import admin from django.urls import include, path, re_path from django.views.generic.base import TemplateView {% if cookiecutter.api == "y" or cookiecutter.api == "Y" %} from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi schema_view = get_schema_view( openapi.Info( title="{{ cookiecutter.project_name }} API", default_version='v1', description="{{ cookiecutter.description }}", contact=openapi.Contact(email="{{ cookiecutter.email }}"), license=openapi.License(name="BSD License"), ), public=False, permission_classes=(permissions.AllowAny,), ) {% endif %} admin.site.site_title = "{{ cookiecutter.project_name|title }} Administration" admin.site.site_header = "{{ cookiecutter.project_name|title }} Administration" urlpatterns = i18n_patterns( path('admin/', admin.site.urls), prefix_default_language = False ) handler404 = '{{ cookiecutter.project_name }}.views.handler404' handler500 = '{{ cookiecutter.project_name }}.views.handler500' if settings.DEBUG: {% if cookiecutter.api == "y" or cookiecutter.api == "Y" %} urlpatterns += [ re_path(r'^document(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'), path('document/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), path('api-auth/', include('rest_framework.urls')), ] {% endif %} urlpatterns += [ # Testing 404 and 500 error pages path('404/', TemplateView.as_view(template_name='404.html'), name='404'), path('500/', TemplateView.as_view(template_name='500.html'), name='500'), ] from django.conf.urls.static import static urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) import debug_toolbar urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]
cost_journey = float(input()) number_months = int(input()) collected_money = 0 for month in range(1, number_months + 1): if month % 4 == 0: collected_money *= 125/100 if month % 2 != 0 and month > 1: collected_money *= 84/100 collected_money += (cost_journey * 25/100) needed_left_money = abs(collected_money - cost_journey) if collected_money >= cost_journey: print(f"Bravo! You can go to Disneyland and you will have {needed_left_money:.2f}lv. for souvenirs.") else: print(f"Sorry. You need {needed_left_money:.2f}lv. more.")
from flask import Flask, jsonify class GeneDB(object): def __init__(self): self.labels = [] self.fields = [] self.genes = {} self.friendly_names = {} self.keys = [] def read_data(self): with open('variant_results.tsv', 'r') as f: headers = f.readline().rstrip() self.labels = headers.split('\t') self.fields = headers.lower().replace(' ', '_').split('\t') self.friendly_names = dict(zip(self.fields, self.labels)) for line in f: try: values = line.rstrip().split('\t') except UnicodeDecodeError as e: print 'Error decoding line: {}'.format(str(e)) record = dict(zip(self.fields, values)) self.genes[record['gene']] = record self.keys = sorted([w.upper() for w in self.genes.keys()]) def get_matches(self, query): return [m for m in self.keys if m.startswith(query.upper())] # initialize gene database, and load data from TSV genedb = GeneDB() genedb.read_data() # initialize API app = Flask(__name__) @app.route('/api/v1/suggest/<query>/', methods=['GET']) def suggest(query): matches = genedb.get_matches(query) response = jsonify(matches) response.headers.add('Access-Control-Allow-Origin', '*') return response @app.route('/api/v1/retrieve/<query>/', methods=['GET']) def retrieve(query): matches = genedb.get_matches(query) records = [genedb.genes[m] for m in matches] return jsonify(records) @app.route('/api/v1/genes/', methods=['GET']) def all_genes(): return jsonify(genedb.genes) @app.route('/api/v1/keys/', methods=['GET']) def all_keys(): response = jsonify(genedb.keys) response.headers.add('Access-Control-Allow-Origin', '*') return response
import os from pathlib import Path import random import pickle import os def pickle_dump(item, out_file): with open(out_file, "wb") as opened_file: pickle.dump(item, opened_file) def delete_existing_files(): os.remove(r"/home/Shai/PycharmProjects/UNET_3D/debug_split/validation_ids.pkl") os.remove(r"/home/Shai/PycharmProjects/UNET_3D/debug_split/training_ids.pkl") os.remove(r"/home/Shai/PycharmProjects/UNET_3D/debug_split/test_ids.pkl") def create_files(training_list, validation_list, test_list): pickle_dump(training_list, r"/home/Shai/PycharmProjects/UNET_3D/debug_split/training_ids.pkl") pickle_dump(validation_list, r"/home/Shai/PycharmProjects/UNET_3D/debug_split/validation_ids.pkl") pickle_dump(test_list, r"/home/Shai/PycharmProjects/UNET_3D/debug_split/test_ids.pkl") def run_training_size_exp(subject_ids, exp_names_prefix, conf_to_imitate=None): all_list = subject_ids #[0, 10, 20, 6, 12, 16, 1, 3, 5, 9, 13, 14, 15, 17, 18, 21, 22, 23, 24, 25, 26, 19, 11, 8, 7, 4, 2] trainset_sizes = [4, 8, 12, 16, 20] n_iters = [5, 4, 3, 2, 1] all_experiment_names = [] # checks for 3 random sets of validation+test set for iValMode in range(3): print(f'Started iteration {iValMode+1} - fixing validation and test set') random.shuffle(all_list) all_list_temp = all_list test_list = all_list_temp[:3] validation_list = all_list_temp[3:6] training_list = all_list_temp[6:] print(f"Test set: {test_list}") print(f"Val set: {validation_list}") for i in range(len(trainset_sizes)): print(f"In round {i+1} out of {len(trainset_sizes)}") for j in range(n_iters[i]): print(f"In sample {j+1} out of {n_iters[i]}") try: delete_existing_files() except: print('Problem deleting files') try: cur_training_list = random.sample(training_list, trainset_sizes[i]) create_files(cur_training_list, validation_list, test_list) except: print('Problem deleting files') print('Created files, now training') cur_exp_name = f"trainset_experiment_{iValMode}_val_{trainset_sizes[i]}_train_{j}_iter" if conf_to_imitate is None: cmd = f"python train_fetal.py --experiment_name='{cur_exp_name}'" else: cmd = f"python train_fetal.py --experiment_name='{cur_exp_name}'" \ f" --imitate_experiment='{conf_to_imitate}'" print(cmd) os.system(cmd) print("Finished training, now running on test") conf_dir = '../../../../../datadrive/configs/' + f'{cur_exp_name}' cmd = f"python predict.py --split='test' --config='{conf_dir}'" print(cmd) os.system(cmd) print('Finished forward') all_experiment_names = all_experiment_names + [cur_exp_name] return all_experiment_names if __name__ == "__main__": subject_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] exp_names_prefix = '' to_imit = 'experiment_no_augs' run_training_size_exp(subject_ids, exp_names_prefix, to_imit)
from rest_framework.views import APIView from core.models import Device from rest_framework import status from django.shortcuts import get_object_or_404 from rest_framework.decorators import api_view from rest_framework.response import Response @api_view(['GET']) def activate_device(request): api_key = request.query_params.get('API_KEY') longitude = request.query_params.get('longitude') latitude = request.query_params.get('latitude') if api_key and longitude and latitude: device = get_object_or_404(Device, key=api_key) device.longitude = longitude device.latitude = latitude device.is_active = True device.save() return Response({'result': 'activated'}) else: return Response({'detail': 'Invalid request'}, status=status.HTTP_400_BAD_REQUEST)
#this program is to find the sum of three numbers in a list is zero or not from data import functional try: list1 = [int(x) for x in input("enter the elements: ").split()] functional.triple(list1) # calls the method and passes the value except ValueError: print("Input only accepts decimal numbers")
import os import re import shutil from PIL import Image from PIL import ImageOps from tqdm import tqdm import time from mtcnn import MTCNN import cv2 def ResizeToSquare(path): desired_size = 256 im = Image.open(path) old_size = im.size # old_size[0] is in (width, height) format ratio = float(desired_size) / max(old_size) new_size = tuple([int(x * ratio) for x in old_size]) im = im.resize(new_size, Image.ANTIALIAS) # new_im = Image.new("RGB", (desired_size, desired_size)) # new_im.paste(im, ((desired_size - new_size[0]) // 2, # (desired_size - new_size[1]) // 2)) # # new_im.show() delta_w = desired_size - new_size[0] delta_h = desired_size - new_size[1] padding = (delta_w // 2, delta_h // 2, delta_w - (delta_w // 2), delta_h - (delta_h // 2)) new_im = ImageOps.expand(im, padding, fill= (225, 225, 225)) #new_im.show() return new_im def CropImageTop(topCoordinate, imCrop): width, height = imCrop.size # Cropped image of above dimension # (It will not change orginal image) im1 = imCrop.crop((0, topCoordinate, width, height)) # Shows the image in image viewer return im1 def CutFromBackgroundRatio(path): BACKGROUNDVALUE = 630 NECKPERCENTDECREASE = 5 im = Image.open(path) width, height = im.size imagePixels = im.getdata() bCounter, mostB, neckLine = 0, 0, 0 for i, pix in enumerate(imagePixels): if pix[0]+pix[1]+pix[2] >= BACKGROUNDVALUE: bCounter += 1 #print(pix) if i % width == 0: personPercent = round(100 - (bCounter / width) * 100) #print(f"{personPercent}%") if mostB < personPercent and personPercent != 99: mostB = personPercent if personPercent < mostB - NECKPERCENTDECREASE: neckLine = i // width break bCounter = 0 #im.show() croppedImage = CropImageTop(neckLine, im) #croppedImage.show() #print(neckLine) #print("Image Finished.") return croppedImage def CutFromDetectFace(path, detector): img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB) face = detector.detect_faces(img) if len(face) == 0: print("Failed facial recognition, trying manual algorithm...") return None neckLine = face[0]['box'][0] - 15 if face[0]['box'][0] - 15 > 0 else 0 croppedImage = CropImageTop(neckLine, Image.open(path)) #croppedImage.show() return croppedImage counter = 0 for (root, dirs, files) in tqdm(os.walk("D:\PythonProjectsDDrive\ClothesTryOnStage2\\traindata\\train", topdown=False)): for i, file in enumerate(files): if bool(re.search("\d\.jpg", file)): counter += 1 detector = MTCNN() outImage = CutFromDetectFace(root + "\\" + file, detector) if outImage == None: outImage = CutFromBackgroundRatio(root + "\\" + file) #paddedImage = ResizeToSquare(root + "\\" + file) #paddedImage.thumbnail((128, 128), Image.ANTIALIAS) #paddedImage.save(f"SimplifiedDatasetResized\\{counter}.jpg", "JPEG") outImage.save(f"facelessDataset\\{counter}.jpg", "JPEG") outImage.close()
class ListaEncadeada(): def __init__(self): self._inicio = None self._fim = None self.prox = None self.ant = None def isVazia(self): return self._inicio is None and self._fim is None def inserirNoInicio(self,dado): novo_no = No(dado) if self.isVazia(): self._inicio = novo_no self._fim = novo_no else: novo_no.prox = self._inicio self._inicio.ant = novo_no self._inicio = novo_no def inserirNoFim(): def removerDoInicio(dado): def removerDoFim(self): if not self.isVazia(): if self._fim.ant is not None: penultimo = self._fim.ant penultimo.prox = None self._fim = penultimo return ultimo else: self._inicio = self._fim = None return ultimo return ultimo def buscar(self,dado): i = self._inicio while i != None: if i.dado == x: return i else: i = i.prox return i class pilha(ListaEncadeada): def __init__(self): super(Pilha).__init__(): print('blablabla')
from enum import Enum class Type(Enum): none = 0 normal = 1 fire = 2 water = 3 electric = 4 grass = 5 ice = 6 fighting = 7 poison = 8 ground = 9 flying = 10 psychic = 11 bug = 12 rock = 13 ghost = 14 dragon = 15 dark = 16 steel = 17 fairy = 18 TypeChart = [[1.0 for i in range(19)] for j in range(19)] superEffective = [] notVeryEffective = [] immuneEffective = [] superEffective.append([Type.fire, Type.grass]) superEffective.append([Type.water, Type.fire]) superEffective.append([Type.grass, Type.water]) superEffective.append([Type.electric, Type.water]) notVeryEffective.append([Type.fire, Type.water]) notVeryEffective.append([Type.grass, Type.fire]) notVeryEffective.append([Type.water, Type.grass]) notVeryEffective.append([Type.electric, Type.grass]) notVeryEffective.append([Type.electric, Type.electric]) notVeryEffective.append([Type.grass, Type.grass]) notVeryEffective.append([Type.water, Type.water]) notVeryEffective.append([Type.fire, Type.fire]) for s in superEffective: TypeChart[s[0].value][s[1].value] = 2.0 for n in notVeryEffective: TypeChart[n[0].value][n[1].value] = 0.5 for i in immuneEffective: TypeChart[i[0].value][i[1].value] = 0.0 if __name__ == "__main__": print("Water->Electric: " + str(TypeChart[Type.water.value][Type.electric.value])) print("Water->Fire: " + str(TypeChart[Type.water.value][Type.fire.value])) print("Water->Water: " + str(TypeChart[Type.water.value][Type.water.value]))
#! /usr/bin/env python # # Copyright (c) Members of the EGEE Collaboration. 2009-2010. # See http://www.eu-egee.org/partners/ for details on the copyright # holders. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os f = open("default.ldif.live") lines = f.readlines() f.close() f = open("default.ldif.clean","w") f.write(""" dn: o=grid objectClass: organization o: grid dn: Mds-Vo-name=local,o=grid objectClass: GlueTop objectClass: Mds Mds-Vo-name: local dn: Mds-Vo-name=resource,o=grid objectClass: GlueTop objectClass: Mds Mds-Vo-name: resource dn: Mds-Vo-name=IN2P3-CC,Mds-Vo-name=local,o=grid objectClass: GlueTop Mds-Vo-name: resource """) outputline = "" for line in lines[2:]: if line[0] == " ": outputline = outputline + line.strip() else: f.write(outputline + "\n") outputline = line.strip() f.write(outputline + "\n") f.close()
# Generated by Django 2.2.6 on 2019-11-07 05:36 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('work', '0030_logs'), ] operations = [ migrations.RenameModel( old_name='Logs', new_name='Log', ), ]
import os from plugins import BasePlugin from typing import Union, List import pyUbiForge import logging class Plugin(BasePlugin): plugin_name = 'Export Binary' plugin_level = 4 file_type = '*' def run(self, file_id: Union[str, int], forge_file_name: str, datafile_id: int, options: Union[List[dict], None] = None): data = pyUbiForge.temp_files(file_id, forge_file_name, datafile_id) if data is None: logging.warning(f"Failed to find file {file_id:016X}") return out_file = open( os.path.join( pyUbiForge.CONFIG.get('dumpFolder', 'output'), f'{pyUbiForge.game_identifier()}_{data.file_name}_{file_id:016X}.bin' ), 'wb' ) out_file.write(data.file.read_rest())
import gspread from oauth2client.service_account import ServiceAccountCredentials import os from google.oauth2 import service_account from google.cloud import firestore import RPi.GPIO as GPIO import time, sys from gpiozero import Servo from time import sleep IN_FLOW_SENSOR = 23 OUT_FLOW_SENSOR = 24 TRIG1 = 17 #level in storage ECHO1 = 27 #level in storage TRIG2 = 22 #level in canal ECHO2 = 25 #level in canal TRIG3 = 0 #level in dam2 ECHO3 = 1 #level in dam2 servo1 = Servo(6) #rainfall for dam1 servo2 = Servo(5) #rainfall for dam2 in1 = 10 in2 = 9 GPIO.setmode(GPIO.BCM) GPIO.setup(IN_FLOW_SENSOR, GPIO.IN, pull_up_down = GPIO.PUD_DOWN) GPIO.setup(OUT_FLOW_SENSOR, GPIO.IN, pull_up_down = GPIO.PUD_DOWN) GPIO.setup(TRIG1,GPIO.OUT) GPIO.setup(ECHO1,GPIO.IN) GPIO.setup(TRIG2,GPIO.OUT) GPIO.setup(ECHO2,GPIO.IN) GPIO.setup(TRIG3,GPIO.OUT) GPIO.setup(ECHO3,GPIO.IN) GPIO.setup(in1,GPIO.OUT) GPIO.setup(in2,GPIO.OUT) scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"] creds = ServiceAccountCredentials.from_json_keyfile_name(r"/home/pi/Downloads/credential.json", scope) client = gspread.authorize(creds) os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = r"/home/pi/Downloads/MECCLOUDTEST-468f2ea6f2c4.json" sheet = client.open("MEC ").sheet1 global count1 global count_a count1 = 0 global count2 count2=0 count_a=count1 global distance_11,distance_1,distance_22,distance2,distance_3,distance_33 distance_11 = 0 distance_1=0 distance_2=0 distance_22=0 distance_33=0 distance_3=0 distance_11 = distance_1 def countPulse1(channel): global count1,count_a count1 = count1+1 if count_a==count1-5: print("greater than 5") count_a=count1 print("count1 = ",count1,"count_a = ",count_a) def countPulse2(channel): global count2,count_b count_b = count2 count2 = count2+1 print("count2 = ",count2) GPIO.add_event_detect(IN_FLOW_SENSOR, GPIO.BOTH, callback=countPulse1) GPIO.add_event_detect(OUT_FLOW_SENSOR, GPIO.BOTH, callback=countPulse2) def upload_to_firestore_inflow(inflow): db = firestore.Client() doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa') doc_ref.update({ u'inflow': inflow, }) def upload_to_firestore_outflow(outflow): db = firestore.Client() doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa') doc_ref.update({ u'outflow':outflow, }) def upload_to_firestore_level(level): db = firestore.Client() doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa') doc_ref.update({ u'level':level, }) def upload_to_firestore_level_in_canal(level_in_canal): db = firestore.Client() doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa') doc_ref.update({ u'level_in_canal':level_in_canal, }) def upload_to_firestore_trigger(trigger): db = firestore.Client() doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa') doc_ref.update({ u'trigger':trigger, }) def upload_to_firestore_trigger2(trigger): db = firestore.Client() doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa') doc_ref.update({ u'trigger2':trigger, }) def level1(): #level in storage global distance_11,distance_1 GPIO.output(TRIG1, False) time.sleep(2) GPIO.output(TRIG1, True) time.sleep(0.00001) GPIO.output(TRIG1, False) while GPIO.input(ECHO1)==0: pulse_start = time.time() while GPIO.input(ECHO1)==1: pulse_end = time.time() pulse_duration = pulse_end - pulse_start distance_1 = pulse_duration*17150 distance_1 = round(distance_1, 2) print("Distance:",distance_1,"cm") if distance_11 >= distance_1+1 or distance_11 <= distance_1-1: #check if 1 precision is enough print("upload to firestore",distance_1) #upload data to firestore def level2(): #level in canal global distance_22,distance_2 distance_22 = distance_2 GPIO.output(TRIG2, False) time.sleep(2) GPIO.output(TRIG2, True) time.sleep(0.00001) GPIO.output(TRIG2, False) while GPIO.input(ECHO2)==0: pulse_start = time.time() while GPIO.input(ECHO2)==1: pulse_end = time.time() pulse_duration = pulse_end - pulse_start distance_2 = pulse_duration*17150 distance_2 = round(distance_2, 2) print("Distance:",distance_2,"cm") if distance_22 >= distance_2+1 or distance_22 <= distance_2-1: #check if 1 precision is enough print("upload to firestore",distance_2) #upload data to firestore def level3(): #level in dam2 global distance_33,distance_3 distance_33 = distance_3 GPIO.output(TRIG3, False) time.sleep(2) GPIO.output(TRIG3, True) time.sleep(0.00001) GPIO.output(TRIG3, False) while GPIO.input(ECHO3)==0: pulse_start = time.time() while GPIO.input(ECHO3)==1: pulse_end = time.time() pulse_duration = pulse_end - pulse_start distance_3 = pulse_duration*17150 distance_3 = round(distance_3, 2) print("Distance:",distance_3,"cm") if distance_33 >= distance_3+1 or distance_33 <=distance_3-1: #check if 1 precision is enough print("upload to firestore",distance_3) #upload data to firestore def rainfall_motor1_500ml(): servo1.min() sleep(4) #change the value according to the experiment servo1.max() sleep(4) #change the value according to the experiment def rainfall_motor1_1l(): servo1.min() sleep(4) #change the value according to the experiment servo1.max() sleep(4) #change the value according to the experiment def rainfall_motor2_500ml(): servo2.min() sleep(5) #change the value according to the experiment servo2.max() sleep(5) #change the value according to the experiment def rainfall_motor2_1l(): servo2.min() sleep(5) #change the value according to the experiment servo2.max() sleep(5) #change the value according to the experiment def read_from_excel(): data = sheet.get_all_records() # Get a list of all records row = sheet.row_values(1) # Get a specific row col = sheet.col_values(1) # Get a specific column cell = sheet.cell(1,2).value # Get the value of a specific cell #print(cell) return cell def write_to_excel(): sheet.update_cell(2,1,"4") # Update one cell def motor_dam2(x): GPIO.output(in1,True) GPIO.output(in2,False) sleep(x) #time to open #give x according to the release value, i.e 1lt or 2lts GPIO.output(in1,False) GPIO.output(in2,True) sleep(x) #time to close #we do not keep any holding time GPIO.output(in2,False) GPIO.output(in1,False) while True: #rainfall_motor1_500ml() #print(GPIO.input(23)) #time.sleep(1) #print(GPIO.input(24)) #time.sleep(1) level3() sleep(2) level2() #level3() #rainfall_motor1_1l() sleep(0.5) #rainfall_motor1_1l() start=read_from_excel() if start == '4': #change according to the value given by PLC, to get this value after they send value #enter all the cases here #check if flow is always on or we should call the print function print("in if") sleep(1) print("start of rainfall") #sleep(2) rainfall_motor1_500ml() #We send rain after PLC release water for case 1 print("motor1 rainfall case1 over") sleep(3) print("start second dam rainfall") #sleep(35) rainfall_motor2_500ml() print("dam2 rainfall completed") sleep(3) #to ensure steady value in level sensor after rainfall level3() #print("rainfall for dam2 over in case 1") upload_to_firestore_trigger2(1) #precaution that dam gate 1 will open #sleep(130) #delay sufficient for dam1 to release water for case 2 print("dam1 released water") sleep(1) print("rainfall for dam1 started") rainfall_motor1_1l() #sleep(65) print("rainfall for dam1 completed") sleep(3) print("start rainfall for dam2") rainfall_motor2_500ml() #sleep(30) print("dam 2 rain completed") sleep(3) level(3) sleep(0.5) level(2) sleep(3) # gap between cases, we cannot release water from dam1 without release of water from dam2 upload_to_firestore_trigger1(1) sleep(2) # as a precaution before opening dam #motor_dam2(20) #20 is the delay to release 1 liter of water sleep(2) #write_to_excel() #send a trigger to dam 1 through sheets #upload_to_firestore_trigger2(1) #precaution that dam gate 1 will open #sleep(10) #sufficient to release water from dam1 for case 3 print("rainfall for dam1 started") #rainfall_motor1_1l() sleep(4) print("rainfall for dam2 completed") sleep(3) print("rainfall for dam1 started") sleep(3) rainfall_motor2_500ml() print("rainfalll for dam2 completed") sleep(3) level3() sleep(8) #delay sufficient till they release water for case 4, since there is no water release in case 4, we can keep small delay upload_to_firestore_trigger(1) sleep(3) #motor_dam2(20) #sleep(80) # as a precaution before opening dam #motor_dam2(20) print(8) #20 is the delay to release 1 liter of water print("rainfall for dam2 started") #sleep(65) #rainfall_motor2_1l() print("rainfall for dam2 finished") sleep(3) print("rainfall for dam1 started") #sleep(65) rainfall_motor1_1l() print("rainfall for dam2 completed") sleep(3) print("rainfall for dam1 started") sleep(3) rainfall_motor1_500ml() print("rainfall for dam1 completed") sleep(3) #motor_dam2() print("release water from dam2") sleep(4) print("dam2 rainfall started") sleep(3) rainfall_motor2_500ml() print("dam2 rainfall completed") sleep(5) level(3) level(2) sleep(10) #arbitary delay as safety
# coding=gbk from pyspark import SparkConf, SparkContext # def main(args: Array[String]): Unit = { # System.setProperty("hadoop.home.dir","D:\\hadoop\\hadoop-2.5.2"); # System.setProperty("spark.sql.warehouse.dir","F:\\spark培训\\spark-2.0.0-bin-hadoop2.6"); # Logger.getLogger("org.apache.spark").setLevel(Level.WARN) # Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF) # # val conf = new SparkConf().setAppName("NaiveBayesExample").setMaster("local[2]") # val sc = new SparkContext(conf) # local 时URL,本地计算机(从族的概念) # My App 应用程序名字 from pyspark.sql import SQLContext conf = SparkConf().setMaster("local").setAppName("My App") sc = SparkContext(conf = conf) # 此时的sc就是一个SparkContext,了SparkContext的实例化对象,即刻就可以创建RDD了。 sqlContext = SQLContext(sc) df = sqlContext.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('/Users/shiqingwang/PycharmProjects/dm/data/20150401_train.csv') # Displays the content of the DataFrame to stdout df.show() from pyspark.ml.feature import StringIndexer indexer = StringIndexer(inputCol="Species", outputCol="labelindex") indexed = indexer.fit(df).transform(df) indexed.show() from pyspark.sql import Row from pyspark.mllib.linalg import Vectors from pyspark.ml.classification import NaiveBayes # Load and parse the data def parseRow(row): return Row(label=row["labelindex"], features=Vectors.dense([row["Sepal.Length"], row["Sepal.Width"], row["Petal.Length"], row["Petal.Width"]])) ## Must convert to dataframe after mapping parsedData = indexed.map(parseRow).toDF() labeled = StringIndexer(inputCol="label", outputCol="labelpoint") data = labeled.fit(parsedData).transform(parsedData) data.show() ## 训练模型 #Naive Bayes nb = NaiveBayes(smoothing=1.0, modelType="multinomial") model_NB = nb.fit(data) predict_data= model_NB.transform(data) traing_err = predict_data.filter(predict_data['label'] != predict_data['prediction']).count() total = predict_data.count() nb_scores = float(traing_err)/total print traing_err, total, nb_scores #7 150 0.0466666666667 #Logistic Regression########################################################### # Logistic regression. Currently, this class only supports binary classification. from pyspark.ml.classification import LogisticRegression lr = LogisticRegression(maxIter=5, regParam=0.01) model_lr = lr.fit(data) predict_data= model_lr.transform(data) traing_err = predict_data.filter(predict_data['label'] != predict_data['prediction']).count() total = predict_data.count() lr_scores = float(traing_err)/total print traing_err, total, float(traing_err)/total #Decision Tree from pyspark.ml.classification import DecisionTreeClassifier dt = DecisionTreeClassifier(maxDepth=2,labelCol = 'labelpoint') model_DT= dt.fit(data) predict_data= model_DT.transform(data) traing_err = predict_data.filter(predict_data['label'] != predict_data['prediction']).count() total = predict_data.count() dt_scores = float(traing_err)/total print traing_err, total, float(traing_err)/total #GBT########################################################### ## GBT. Currently, this class only supports binary classification. from pyspark.ml.classification import GBTClassifier gbt = GBTClassifier(maxIter=5, maxDepth=2,labelCol="labelpoint") model_gbt = gbt.fit(data) predict_data= model_gbt.transform(data) traing_err = predict_data.filter(predict_data['label'] != predict_data['prediction']).count() total = predict_data.count() dt_scores = float(traing_err)/total print traing_err, total, float(traing_err)/total #Random Forest from pyspark.ml.classification import RandomForestClassifier rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="labelpoint", seed=42) model_rf= rf.fit(data) predict_data= model_rf.transform(data) traing_err = predict_data.filter(predict_data['label'] != predict_data['prediction']).count() total = predict_data.count() dt_scores = float(traing_err)/total print traing_err, total, float(traing_err)/total #MultilayerPerceptronClassifier########################################################### # Classifier trainer based on the Multilayer Perceptron. Each layer has sigmoid activation function, output layer has softmax. # Number of inputs has to be equal to the size of feature vectors. Number of outputs has to be equal to the total number of labels. from pyspark.ml.classification import MultilayerPerceptronClassifier mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[150, 5, 150], blockSize=1, seed=11) model_mlp= mlp.fit(parsedData) predict_data= model_mlp.transform(parsedData) traing_err = predict_data.filter(predict_data['label'] != predict_data['prediction']).count() total = predict_data.count() dt_scores = float(traing_err)/total print traing_err, total, float(traing_err)/total
from battle.battlemenu.BattleOption import BattleOptions from battle.round.RoundAction import RoundAction from battle.battleeffect.RegularAttack import RegularAttack from ui.UI import UI from ui.UserInput import UserInput # Represents a menu of magical attacks: # viable targets are represented as a dictionary of arrays class MagicOption(BattleOptions): def __init__(self, fighter, targets): self.targets = targets super().__init__("Magic", fighter, targets) def generate_round_actions(self): if len(self.fighter.spells) > 0: UI().show_options(self.fighter.spells) index = UserInput().select_index_from_options(self.fighter.spells) action = self.fighter.spells[index] target = action.selection_strategy.select_target(self.targets) # get selection strategy from action! return RoundAction(action, target) else: print("no spells.") # this should never appear.
# -*- coding: utf-8 -*- class Solution: def setZeroes(self, matrix): for i in range(len(matrix)): for j in range(len(matrix[0])): if matrix[i][j] == 0: for h in range(len(matrix)): if matrix[h][j] == 0: continue matrix[h][j] = None for k in range(len(matrix[0])): if matrix[i][k] == 0: continue matrix[i][k] = None for i in range(len(matrix)): for j in range(len(matrix[0])): if matrix[i][j] is None: matrix[i][j] = 0 if __name__ == "__main__": solution = Solution() matrix = [ [1, 1, 1], [1, 0, 1], [1, 1, 1], ] assert solution.setZeroes(matrix) is None assert [ [1, 0, 1], [0, 0, 0], [1, 0, 1], ] == matrix matrix = [ [0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5], ] assert solution.setZeroes(matrix) is None assert [ [0, 0, 0, 0], [0, 4, 5, 0], [0, 3, 1, 0], ] == matrix
import math with open("input1.txt","r") as f: data = f.readlines() class Planet: def __init__(self,x,y,z): self.x = x self.y = y self.z = z self.velX = 0 self.velY = 0 self.velZ = 0 def doTimeStep(self): self.x+=self.velX self.y+=self.velY self.z+=self.velZ def getKE(self): return abs(self.velX)+abs(self.velY)+abs(self.velZ) def getPE(self): return abs(self.x)+abs(self.y)+abs(self.z) def getX(self): return self.x def getY(self): return self.y def getZ(self): return self.z def getVelX(self): return self.velX def getVelY(self): return self.velY def getVelZ(self): return self.velZ def applyGravity(self, planets): for planet in planets: if planet.getX()>self.x: self.velX+=1 elif planet.getX()<self.x: self.velX-=1 if planet.getY()>self.y: self.velY+=1 elif planet.getY()<self.y: self.velY-=1 if planet.getZ()>self.z: self.velZ+=1 elif planet.getZ()<self.z: self.velZ-=1 def getData(self): return (self.x, self.y, self.z, self.velX, self.velY, self.velZ) planets = [] for line in data: line = line.replace("<","").replace(">","").replace("x=","").replace("y=","").replace("z=","").split(",") planets.append(Planet(int(line[0]), int(line[1]), int(line[2]) )) step = 0 while step < 1001: energyCount = 0 step+=1 for planet in planets: planet.applyGravity(planets) for planet in planets: planet.doTimeStep() for planet in planets: energyCount+=planet.getKE()*planet.getPE() print("Step {}, ENERGY: {}".format(step, energyCount)) # Part 2 def findPosition(planets, index): step = 0 positionsPast = set() while True: step+=1 for planet in planets: planet.applyGravity(planets) for planet in planets: planet.doTimeStep() positions = [] for planet in planets: if index == 0: positions.append(planet.getX()) positions.append(planet.getVelX()) if index == 1: positions.append(planet.getY()) positions.append(planet.getVelY()) if index == 2: positions.append(planet.getZ()) positions.append(planet.getVelZ()) value = (positions[0], positions[1], positions[2], positions[3], positions[4], positions[5]) if value in positionsPast: return step-1 else: positionsPast.add(value) def lcm(a, b): return abs(a*b) // math.gcd(a, b) planets = [] step = 0 for line in data: line = line.replace("<","").replace(">","").replace("x=","").replace("y=","").replace("z=","").split(",") planets.append(Planet(int(line[0]), int(line[1]), int(line[2]) )) step = 0 planetDict = {} end = False xRep = findPosition(planets, 0) yRep = findPosition(planets, 1) zRep = findPosition(planets, 2) print("XReps at {}, YReps at {}, ZReps at {}".format(xRep, yRep, zRep)) print(lcm(lcm(xRep,yRep),zRep))
import cv2 as cv def show_img1(): img = cv.imread("Q1.1.jpg") cv.imshow("Image", img) cv.waitKey(0) cv.destroyAllWindows() def show_img2(): img = cv.imread("Q1Part2.jpg") cv.imshow("Image", img) cv.waitKey(0) cv.destroyAllWindows()
"""modoboa-stats tests.""" import datetime import os import shutil import tempfile from django.conf import settings from django.core.management import call_command from django.urls import reverse from django.test import override_settings from modoboa.admin import factories as admin_factories from modoboa.core import models as core_models from modoboa.lib.tests import ModoTestCase class RunCommandsMixin(object): """Mixin to run management commands.""" def setUp(self): super(RunCommandsMixin, self).setUp() self.workdir = tempfile.mkdtemp() self.set_global_parameter("rrd_rootdir", self.workdir) def tearDown(self): shutil.rmtree(self.workdir) pid_file = f"{settings.PID_FILE_STORAGE_PATH}/modoboa_logparser.pid" if os.path.exists(pid_file): os.remove(pid_file) def run_logparser(self): """Run logparser command.""" path = os.path.join( os.path.dirname(__file__), "mail.log") with open(path) as fp: content = fp.read() % { "day": datetime.date.today().strftime("%b %d")} path = os.path.join(self.workdir, "mail.log") with open(path, "w") as fp: fp.write(content) self.set_global_parameter("logfile", path) call_command("logparser") def run_update_statistics(self, rebuild=False): """Run update_statistics command.""" args = [] if rebuild: args.append("--rebuild") call_command("update_statistics", *args) @override_settings(RRDTOOL_TEST_MODE=True) class ViewsTestCase(RunCommandsMixin, ModoTestCase): """Views test cases.""" @classmethod def setUpTestData(cls): # noqa super(ViewsTestCase, cls).setUpTestData() admin_factories.populate_database() cls.da = core_models.User.objects.get(username="admin@test.com") def tearDown(self): super(ViewsTestCase, self).tearDown() self.set_global_parameter("greylist", False) def test_index(self): """Test index view.""" url = reverse("maillog:fullindex") response = self.client.get(url) self.assertContains(response, 'id="graphs_accountgraphicset"') self.assertContains(response, 'id="graphs_mailtraffic"') self.client.force_login(self.da) response = self.client.get(url) self.assertContains(response, 'id="graphs_mailtraffic"') def test_graphs(self): """Test graphs views.""" self.run_logparser() url = reverse("maillog:graph_list") self.ajax_get(url, status=404) response = self.ajax_get("{}?gset=mailtraffic".format(url)) self.assertIn("averagetraffic", response["graphs"]) for period in ["week", "month", "year"]: response = self.ajax_get( "{}?gset=mailtraffic&period={}".format(url, period)) self.assertIn("averagetraffic", response["graphs"]) self.assertEqual(response["period_name"], period) # custom period today = datetime.date.today() start = "{} 11:00:00".format(today) end = "{} 11:40:00".format(today) response = self.ajax_get( "{}?gset=mailtraffic&period=custom&start={}&end={}".format( url, start, end) ) self.assertIn("averagetraffic", response["graphs"]) # unknown domain response = self.ajax_get( "{}?gset=mailtraffic&searchquery=unknown.com".format(url), status=400) # check with greylist enabled self.set_global_parameter("greylist", True) response = self.ajax_get("{}?gset=mailtraffic".format(url)) self.assertIn("averagetraffic", response["graphs"]) def test_account_created_graph(self): """Check data.""" self.run_update_statistics(rebuild=True) url = reverse("maillog:graph_list") response = self.ajax_get("{}?gset=accountgraphicset".format(url)) data = ( response["graphs"]["accountcreationgraphic"]["series"][0]["data"]) self.assertEqual(data[-1]["y"], 5.0) def test_graphs_as_domainadmin(self): """Test graph views as domain admin.""" self.run_logparser() self.client.force_login(self.da) url = "{}?gset=mailtraffic".format(reverse("maillog:graph_list")) response = self.ajax_get(url) self.assertIn("averagetraffic", response["graphs"]) response = self.ajax_get("{}&searchquery=test.com".format(url)) self.assertIn("averagetraffic", response["graphs"]) response = self.ajax_get( "{}&searchquery=test2.com".format(url), status=403) def test_get_domain_list(self): """Test get_domain_list view.""" url = reverse("maillog:domain_list") response = self.ajax_get(url) self.assertIn("test.com", response) self.assertIn("test2.com", response) @override_settings(RRDTOOL_TEST_MODE=True) class ManagementCommandsTestCase(RunCommandsMixin, ModoTestCase): """Management command test cases.""" @classmethod def setUpTestData(cls): # noqa super(ManagementCommandsTestCase, cls).setUpTestData() admin_factories.populate_database() def test_logparser(self): """Test logparser command.""" self.run_logparser() for d in ["global", "test.com"]: path = os.path.join(self.workdir, "{}.rrd".format(d)) self.assertTrue(os.path.exists(path)) def test_logparser_with_greylist(self): """Test logparser when greylist activated.""" self.set_global_parameter("greylist", True) self.run_logparser() for d in ["global", "test.com"]: path = os.path.join(self.workdir, "{}.rrd".format(d)) self.assertTrue(os.path.exists(path)) def test_update_statistics(self): """Test update_statistics command.""" self.run_update_statistics() path = os.path.join(self.workdir, "new_accounts.rrd") self.assertTrue(os.path.exists(path)) self.run_update_statistics(rebuild=True) self.assertTrue(os.path.exists(path)) def test_locking(self): with open(f"{settings.PID_FILE_STORAGE_PATH}/modoboa_logparser.pid", "w") as fp: fp.write(f"{os.getpid()}\n") with self.assertRaises(SystemExit) as inst: self.run_logparser() self.assertEqual(inst.exception.code, 2)
from dfs_topsort import dfs_topsort def walk(G, s, S=set()): P, Q = dict(), set() P[s] = None Q.add(s) while Q: u = Q.pop() for v in G[u].difference(P, S): Q.add(v) P[v] = u return P def tr(G): GT = {} for u in range(len(G)): GT[u] = set() for u in range(len(G)): for v in G[u]: GT[v].add(u) return GT def scc(G): GT = tr(G) sccs, seen = [], set() for u in dfs_topsort(G): if u in seen: continue C = walk(GT, u, seen) seen.update(C) sccs.append(C) return sccs def main(): a, b, c, d, e, f, g, h, i = range(9) g = [[b, c], [d, e, i], [d], [a, h], [f], [g], [e, h], [i], [h]] print(scc(g)) if __name__ == '__main__': main()
# Generated by Django 3.0.3 on 2020-03-30 06:06 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('shorten', '0010_auto_20200330_0554'), ] operations = [ migrations.AlterField( model_name='urltable', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
# -*- coding: utf-8 -*- import sys from PyQt4 import QtGui from PyQt4 import QtCore from datetime import * class Ventana(QtGui.QWidget): def __init__(self, parent = None): QtGui.QWidget.__init__(self, parent) dia = "null" self.setGeometry(500, 500, 350, 250) self.setWindowTitle("!Viva Mexico!") self.setWindowIcon(QtGui.QIcon('icono.png')) texto = "\n Personajes importantes de la independecia de mexico:\n\n * Miguel Hidalgo y Costilla (1753 a 1811)\n * Ignacio Allende (1769 a 1811)\n *Josefa Ortiz de Domínguez (1768 a 1829)" self.label = QtGui.QLabel(texto, self) self.boton = QtGui.QPushButton('Aprietame', self) self.botonDia = QtGui.QPushButton('Aprietame', self) self.boton.setGeometry(125, 100, 100, 50) self.botonDia.setGeometry(10, 150, 100, 50) self.boton.clicked.connect(self.calcula_proximo_15) self.botonDia.clicked.connect(self.calcula_dia) def calcula_proximo_15(self): fecha_actual = datetime.now() #Fecha y hora actual. #Si en el año actual aún no ha pasado el 15 de septiembre. if fecha_actual.month < 9 or (fecha_actual == 9 and fecha_actual < 15): proximo_15 = datetime(fecha_actual.year, 9, 15, 0, 0, 0) else: #Si ya paso el 15 de septiembre en el año actual. proximo_15 = datetime(fecha_actual.year + 1, 9, 15, 0, 0, 0) diferencia = proximo_15 - fecha_actual self.boton.setText("Faltan " + str(diferencia.days) + " dias, para el proximo 15 de septiembre.") self.boton.resize(self.boton.sizeHint()) self.boton.move(15, 100) def calcula_dia(self): fecha_actual = datetime.now() #Fecha y hora actual. dias = {'MONDAY':'Lunes','TUESDAY':'Martes','WEDNESDAY':'Miercoles','THURSDAY':'Jueves', \ 'FRIDAY':'Viernes','SATURDAY':'Sabado','SUNDAY':'Domingo'} if fecha_actual.month < 9 or (fecha_actual == 9 and fecha_actual < 15): year = fecha_actual.year else: year = fecha_actual.year + 1 fecha = date(year, 9, 15) dia = str(dias[fecha.strftime('%A').upper()]) self.botonDia.setText("Calculando...\nSegun el calendario cae en dia " + dia) self.botonDia.resize(self.botonDia.sizeHint()) if __name__ == "__main__": app = QtGui.QApplication(sys.argv) empieza = Ventana() empieza.show()
## Draw a Gameboard # Draws a gameboard given a width and height inputted by the user. Can use for # games requiring a board layout like tic tac toe, checkers, etc. def drawBoard(height, width): hstroke = "--- " vstroke = "| " num_hstrokes = width num_vstrokes = width + 1 num_printlines = 2*height + 1 for i in range(num_printlines): if i % 2 == 1: row = "" for j in range(num_vstrokes): row += vstroke print(row) else: row = " " for j in range(num_hstrokes): row += hstroke print(row) if __name__=="__main__": height = int(input("How tall do you want your gameboard?: ")) width = int(input("How wide do you want your gameboard?: ")) drawBoard(height, width)
# -- coding: utf-8 -- from app import db tags_relationship = db.Table('tags_relationship', db.Column('tag_id', db.Integer, db.ForeignKey('tags.id')), db.Column('image_id', db.Integer, db.ForeignKey('images.id')) ) class Image(db.Model): __tablename__ = 'images' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(100)) tags = db.relationship("Tag", secondary=tags_relationship, backref='images') def __init__(self, name): self.name = name class Tag(db.Model): __tablename__ = 'tags' id = db.Column(db.Integer, primary_key=True) tag_name = db.Column(db.String(64), unique=True) def __init__(self, tag_name): self.tag_name = tag_name
import os, sys, time, threading, datetime sys.path.append('.') sys.path.append('..') from markets.okcoin import * from markets.bitfinex import * from util.util import * from common.common import * from secret import * class MarketWatcher(threading.Thread): def __init__(self, market): threading.Thread.__init__(self) self.market = market def run(self): while True: try: self.market.update_status() except Exception as e: print e print "Market update failed" # print 'Market %s updated' %self.market.name time.sleep(self.market.interval) class EaglesEye(threading.Thread): def __init__(self): self.markets = {} self.markets['okcoin'] = OKCoin(okcoin_partner, okcoin_secret) self.markets['bitfinex'] = Bitfinex(bitfinex_key, bitfinex_secret) self.market_watchers = {} # world best bid and offer self.WBBO = {kCNY: {'ask': -1, 'iask': -1, 'bid': -1, 'ibid': -1, 'ask_market': None, 'bid_market': None}, kUSD: {'ask': -1, 'iask': -1, 'bid': -1, 'ibid': -1, 'ask_market': None, 'bid_market': None}} for key in self.markets: self.market_watchers[key] = MarketWatcher(self.markets[key]) self.market_watchers[key].setDaemon(True) self.market_watchers[key].start() self.env_worker = EnvWorker(env) self.env_worker.setDaemon(True) self.env_worker.start() def timestamp(self): return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') def update_WBBO(self, fiat): best_bid = -1 best_ask = 1e9 has_best_bid = False has_best_ask = False best_bid_market = None best_ask_market = None for key in self.markets: market = self.markets[key] if market.fiat != fiat or (not market.success) or market.lagged(): continue bid = market.best_bid ask = market.best_ask if bid > best_bid: best_bid = bid best_bid_market = market has_best_bid = True if ask < best_ask: best_ask = ask best_ask_market = market has_best_ask = True if has_best_bid and has_best_ask: self.WBBO[fiat]['ask'] = best_ask self.WBBO[fiat]['bid'] = best_bid self.WBBO[fiat]['iask'] = best_ask * env.fiat_rate[best_ask_market.fiat] / (1 - best_ask_market.trade_fee) / (1 - best_ask_market.transaction_fee) self.WBBO[fiat]['ibid'] = best_bid * env.fiat_rate[best_bid_market.fiat] * (1 - best_bid_market.trade_fee) * (1 - best_bid_market.transaction_fee) self.WBBO[fiat]['ask_market'] = best_ask_market self.WBBO[fiat]['bid_market'] = best_bid_market return True else: self.WBBO[fiat]['ask'] = -1 self.WBBO[fiat]['bid'] = -1 self.WBBO[fiat]['iask'] = -1 self.WBBO[fiat]['ibid'] = -1 self.WBBO[fiat]['ask_market'] = None self.WBBO[fiat]['bid_market'] = None return False def calc_profit(self, fiat1, fiat2): if fiat1 == fiat2: return -1 profit = (self.WBBO[fiat2]['ibid'] - self.WBBO[fiat1]['iask']) / self.WBBO[fiat1]['iask'] print "[%s] Arbitrage: %s => %s, %.02f => %.02f, profit: %.02f%%" %(self.timestamp(), self.WBBO[fiat1]['ask_market'].name, self.WBBO[fiat2]['bid_market'].name, self.WBBO[fiat1]['iask'], self.WBBO[fiat2]['ibid'], profit * 100) return profit def watch(self): f = open('data.txt', 'w') while True: if not self.update_WBBO(kCNY) or not self.update_WBBO(kUSD): time.sleep(1) continue profit_cny2usd = self.calc_profit(kCNY, kUSD) profit_usd2cny = self.calc_profit(kUSD, kCNY) f.write("%f\t%f\t%f\t%f\t%f\t%f\t%f\n" %(time.time(), self.WBBO[kCNY]['iask'], self.WBBO[kUSD]['ibid'], profit_cny2usd, self.WBBO[kUSD]['iask'], self.WBBO[kCNY]['ibid'], profit_usd2cny)) f.flush() if False: profit = (best_bid - best_ask) / best_ask if ask_market.can_buy(0.01) and bid_market.can_sell(0.01): ask_market.buy(0.01) bid_market.sell(0.01) portfolio = {kBTC: 0, kCNY: 0, kUSD: 0} for key in self.markets: for equity in portfolio: portfolio[equity] += self.markets[key].balance[equity] print "[%s] Portfolio: %.02f CNY, %.02f USD, %02f BTC" %(self.timestamp(), portfolio[kCNY], portfolio[kUSD], portfolio[kBTC]) time.sleep(1) f.close() if __name__ == '__main__': eagle = EaglesEye() eagle.watch()
from rest_framework import permissions from rest_framework.authentication import SessionAuthentication, BasicAuthentication from .models import User from rest_framework.response import Response from rest_framework.views import APIView from django.db.utils import IntegrityError from django.contrib.auth import authenticate, login, logout class CsrfExemptSessionAuthentication(SessionAuthentication): def enforce_csrf(self, request): return None class UserReg(APIView): permission_classes = (permissions.AllowAny,) authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication) def post(self, request): """ Функция для регистрации нового пользователя --- Параметры: * username - Имя пользователя. Должно быть уникальным * email - Email пользователя. Должен быть уникальным * first_name - Имя пользователя * last_name - Фамилия пользователя --- Возвращает: username, если пользователь был создан, иначе ошибку """ try: user = User.objects.create_user( username=request.POST["username"], email=request.POST["email"], first_name=request.POST["first_name"], last_name=request.POST["last_name"], password=request.POST["password"] ) except KeyError as e: return Response("No " + e.args[0] + " field", status=400) except IntegrityError as e: return Response(e.args[0], status=400) return Response(user.username, status=201) class UserAuth(APIView): permission_classes = (permissions.AllowAny,) authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication) def post(self, request): """ Функция для входа пользователя --- Параметры: * username * password --- Возвращает: Ошибку со статусом 400 или статус 200, если всё прошло успешно """ try: username = request.POST['username'] password = request.POST['password'] except KeyError as e: return Response("No " + e.args[0] + " field", status=400) user = authenticate(request, username=username, password=password) if user is not None: login(request, user) return Response(status=200) else: return Response("User not found", status=400) class UserlLogOut(APIView): permission_classes = (permissions.AllowAny, permissions.IsAuthenticated) authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication) def post(self, request): """ Функция для выхода пользователя. --- Права: пользователь должен быть аутентифицирован --- Параметров нет --- Статус 200, если всё прошло штатно """ logout(request) return Response(status=200)
def magic_number_recursive(items, start, end): if start > end: return None middle = (start + end) // 2 item = items[middle] if item == middle: return item left = magic_number_recursive(items, start, min(item, middle - 1)) right = magic_number_recursive(items, max(item, middle + 1), end) return left or right def magic_number(items): return magic_number_recursive(items, 0, len(items) - 1) assert magic_number([-10, -3, 2, 4, 5, 6, 16]) == 2 assert magic_number([-10, -3, -3, 7, 7, 7, 7, 7, 15]) == 7
#coding: utf-8 from __future__ import print_function, absolute_import import logging import re import json import requests import uuid import time import os import argparse import uuid import datetime import socket import apache_beam as beam from apache_beam.io import ReadFromText from apache_beam.io import WriteToText from apache_beam.io.filesystems import FileSystems from apache_beam.metrics import Metrics from apache_beam.metrics.metric import MetricsFilter from apache_beam import pvalue from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions TABLE_SCHEMA = ( 'IDKEY:STRING, ' 'FECHA:STRING, ' 'NOMBRE:STRING, ' 'APELLIDO:STRING, ' 'TIPOID:STRING, ' 'ID:STRING, ' 'EDAD:STRING, ' 'SEXO:STRING, ' 'PAIS:STRING, ' 'DEPARTAMENTO:STRING, ' 'CIUDAD:STRING, ' 'ZONA:STRING, ' 'DIRECCION:STRING, ' 'OPT1:STRING, ' 'OPT2:STRING, ' 'OPT3:STRING, ' 'OPT4:STRING, ' 'OPT5:STRING, ' 'OPT6:STRING, ' 'OPT7:STRING, ' 'OPT8:STRING, ' 'OPT9:STRING, ' 'OPT10:STRING, ' 'OPT11:STRING, ' 'OPT12:STRING, ' 'TEL1:STRING, ' 'TEL2:STRING, ' 'TEL3:STRING, ' 'TEL4:STRING, ' 'TEL5:STRING, ' 'TEL6:STRING, ' 'TEL7:STRING, ' 'TEL8:STRING, ' 'TEL9:STRING, ' 'TEL10:STRING, ' 'OTROSTEL:STRING, ' 'EMAIL:STRING, ' 'RECALL_INFO:STRING, ' 'AGENTE:STRING, ' 'RESULTADOREG:STRING, ' 'FECHAFINREG:STRING, ' 'LLAMADAS:STRING, ' 'IDCALL:STRING, ' 'COD01:STRING, ' 'COD02:STRING, ' 'COMENTARIOSACUMULADOS:STRING, ' 'DATE_RECALL:STRING, ' 'COUNT_RECALL:STRING, ' 'TEL_RECALL:STRING, ' 'LAST_DIAL_TEL:STRING, ' 'HISTORY_TEL:STRING ' ) # ? class formatearData(beam.DoFn): def __init__(self, mifecha): super(formatearData, self).__init__() self.mifecha = mifecha def process(self, element): # print(element) arrayCSV = element.split(';') tupla= {'IDKEY' : str(uuid.uuid4()), # 'fecha' : datetime.datetime.today().strftime('%Y-%m-%d'), 'FECHA': self.mifecha, 'NOMBRE' : arrayCSV[0], 'APELLIDO' : arrayCSV[1], 'TIPOID' : arrayCSV[2], 'ID' : arrayCSV[3], 'EDAD' : arrayCSV[4], 'SEXO' : arrayCSV[5], 'PAIS' : arrayCSV[6], 'DEPARTAMENTO' : arrayCSV[7], 'CIUDAD' : arrayCSV[8], 'ZONA' : arrayCSV[9], 'DIRECCION' : arrayCSV[10], 'OPT1' : arrayCSV[11], 'OPT2' : arrayCSV[12], 'OPT3' : arrayCSV[13], 'OPT4' : arrayCSV[14], 'OPT5' : arrayCSV[15], 'OPT6' : arrayCSV[16], 'OPT7' : arrayCSV[17], 'OPT8' : arrayCSV[18], 'OPT9' : arrayCSV[19], 'OPT10' : arrayCSV[20], 'OPT11' : arrayCSV[21], 'OPT12' : arrayCSV[22], 'TEL1' : arrayCSV[23], 'TEL2' : arrayCSV[24], 'TEL3' : arrayCSV[25], 'TEL4' : arrayCSV[26], 'TEL5' : arrayCSV[27], 'TEL6' : arrayCSV[28], 'TEL7' : arrayCSV[29], 'TEL8' : arrayCSV[30], 'TEL9' : arrayCSV[31], 'TEL10' : arrayCSV[32], 'OTROSTEL' : arrayCSV[33], 'EMAIL' : arrayCSV[34], 'RECALL_INFO' : arrayCSV[35], 'AGENTE' : arrayCSV[36], 'RESULTADOREG' : arrayCSV[37], 'FECHAFINREG' : arrayCSV[38], 'LLAMADAS' : arrayCSV[39], 'IDCALL' : arrayCSV[40], 'COD01' : arrayCSV[41], 'COD02' : arrayCSV[43], 'COMENTARIOSACUMULADOS' : arrayCSV[45], 'DATE_RECALL' : arrayCSV[46], 'COUNT_RECALL' : arrayCSV[47], 'TEL_RECALL' : arrayCSV[48], 'LAST_DIAL_TEL' : arrayCSV[49], 'HISTORY_TEL' : arrayCSV[50] } return [tupla] def run(archivo, mifecha): gcs_path = "gs://ct-bancolombia_castigada" #Definicion de la raiz del bucket gcs_project = "contento-bi" mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"] pipeline = beam.Pipeline(runner=mi_runer, argv=[ "--project", gcs_project, "--staging_location", ("%s/dataflow_files/staging_location" % gcs_path), "--temp_location", ("%s/dataflow_files/temp" % gcs_path), "--output", ("%s/dataflow_files/output" % gcs_path), "--setup_file", "./setup.py", "--max_num_workers", "5", "--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1" # "--num_workers", "30", # "--autoscaling_algorithm", "NONE" ]) # lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1) #lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1) lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1) transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha))) # lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='') # transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='') #transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='') transformed | 'Escritura a BigQuery Bancolombia' >> beam.io.WriteToBigQuery( gcs_project + ":bancolombia_castigada.predictivo", schema=TABLE_SCHEMA, create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED, write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND ) jobObject = pipeline.run() jobObject.wait_until_finish() return ("Corrio Full HD")
# mathematical_operation print('Enter two numbers') a = int(input()) b = int(input()) def mathematical_operation(a, b): print(a+b) print(a-b) print(a*b) print(a//b) # integer division print(a/b) # float division mathematical_operation(a, b)
import sys, os, shutil, json import numpy as np import dask.array as da from repartition_experiments.file_formats.hdf5 import HDF5_manager from repartition_experiments.algorithms.utils import get_blocks_shape def load_json(filepath): with open(filepath) as f: return json.load(f) def create_input_chunks(cs, partition, data_dir, file_format): """ cs: chunk shape file_format: file format data_dir: to store the file """ if file_format == "HDF5": file_manager = HDF5_manager() else: print("File format not supported yet. Aborting...") sys.exit(1) print(f"Creating input chunks at {data_dir}") create_empty_dir(data_dir) _slices = ((0,cs[0]), (0,cs[1]), (0,cs[2])) for i in range(partition[0]): for j in range(partition[1]): for k in range(partition[2]): print(f"Creating random array... shape: {cs}") arr = da.random.uniform(size=cs) print(f"Done, converting to float16...") arr = arr.astype(np.float16) out_filename = f'{i}_{j}_{k}.hdf5' print(f"Building {out_filename} with shape {cs}") outfilepath = os.path.join(data_dir, out_filename) print(f"Storing...") da.to_hdf5(outfilepath, '/data', arr, chunks=None, compression=None) # data = np.random.uniform(size=cs) # file_manager.write_data(i, j, k, data_dir, data, _slices, cs) def create_input_chunks_distributed(cs, partition, data_dir, file_format): """ for HDF5 only for now cs: chunk shape file_format: file format data_dir: to store the file """ if not file_format == "HDF5": print("File format not supported yet. Aborting...") sys.exit(1) for i in range(6): for filename in os.listdir('/disk' + str(i) + '/gtimothee'): if filename.endswith(".json") or filename.endswith(".hdf5"): os.remove(os.path.join('/disk' + str(i) + '/gtimothee', filename)) print(f"Creating input chunks...") disk_index = 0 repartition_dict = dict() for i in range(partition[0]): for j in range(partition[1]): for k in range(partition[2]): print(f"Creating random array... shape: {cs}") arr = da.random.uniform(size=cs) print(f"Done, converting to float16...") arr = arr.astype(np.float16) out_filename = f'{i}_{j}_{k}.hdf5' print(f"Building {out_filename} with shape {cs}") data_dirpath = os.path.join('/disk' + str(disk_index), 'gtimothee') outfilepath = os.path.join(data_dirpath, out_filename) print(f"Storing on {data_dirpath}...") da.to_hdf5(outfilepath, '/data', arr, chunks=None, compression=None) repartition_dict[str((i,j,k))] = outfilepath disk_index += 1 if disk_index == 6: disk_index = 0 print(f"Writing repartition file...") json_file = os.path.join('/disk0', 'gtimothee', 'repartition_dict.json') if os.path.isfile(json_file): os.remove(json_file) with open(json_file, 'w+') as outfile: json.dump(repartition_dict, outfile) def create_empty_dir(dir_path): """ dir exists => erase content dir does not exist => creates dir """ if os.path.isdir(dir_path): shutil.rmtree(dir_path) os.mkdir(dir_path) if not os.path.isdir(dir_path): raise OSError() def verify_results(outdir_path, original_array_path, R, O, file_format, addition, split_merge=False): """ Compare content of each output file against expected subarrays from original array. WARNING: this function opens all output files + the original array """ if file_format == "HDF5": file_manager = HDF5_manager() else: print("File format not supported yet. Aborting...") sys.exit(1) partition = get_blocks_shape(R, O) orig_arr_data = file_manager.read_all(original_array_path) all_true = True if split_merge: result_arrpath = os.path.join(outdir_path, "0_0_0.hdf5") return file_manager.check_split_merge(original_array_path, result_arrpath) for i in range(partition[0]): for j in range(partition[1]): for k in range(partition[2]): outfilepath = os.path.join(outdir_path, str(i) + "_" + str(j) + "_" + str(k) + ".hdf5") data_stored = file_manager.read_all(outfilepath) ground_truth = orig_arr_data[i*O[0]:(i+1)*O[0],j*O[1]:(j+1)*O[1],k*O[2]:(k+1)*O[2]] if addition: ground_truth = ground_truth +1 try: assert np.allclose(data_stored, ground_truth, rtol=1e-02) # print(f"Good output file {outfilepath}") except: print(f"Error: bad rechunking {outfilepath}") print(f"Slices from ground truth {i*O[0]}:{(i+1)*O[0]}, {j*O[1]}:{(j+1)*O[1]}, {k*O[2]}:{(k+1)*O[2]}") print("data_stored", data_stored) print("ground_truth", ground_truth) all_true = False # do not return here to see all failures file_manager.close_infiles() # close all files return all_true def get_case_arguments(): if case == 1: R, O, I = tuple(run["R"]), tuple(run["O"]), tuple(run["I"]) lambd = get_input_aggregate(O, I) B, volumestokeep = (lambd[0], lambd[1], lambd[2]), list(range(1, 8)) elif case == 2: R, O, I, B, volumestokeep = tuple(run["R"]), tuple(run["O"]), tuple(run["I"]), tuple(run["B"]), run["volumestokeep"] else: raise ValueError("Case index does not exist") return R, O, I, B, volumestokeep
from django.urls import path from . import views urlpatterns = [ path('', views.ShorteningView.as_view(), name='homepage_view'), path('shorted/<shortcode>', views.result, name='shortening_result'), path('<str:shortcode>', views.url_redirect_view, name='check') ]
import nltk import time from sklearn import metrics import string import os from sklearn.cluster.dbscan_ import DBSCAN from sklearn.cluster.dbscan_ import dbscan from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer from sklearn.manifold import MDS from nltk.stem.porter import PorterStemmer from scipy.spatial import distance import collections import pickle from pprint import pprint import numpy as np from featureUtil import * from sklearn.feature_extraction import DictVectorizer from sklearn.neighbors import NearestNeighbors from sklearn.cluster import KMeans, MiniBatchKMeans import reviewGoldStandard token_dict = {} stemmer = PorterStemmer() def process_text(text, stem=True): text = text.translate(string.punctuation) tokens = word_tokenize(text) if stem: stemmer = PorterStemmer() tokens = [stemmer.stem(t) for t in tokens] return tokens def calculate_purity(clusters, gs): #for each cluster, get the count of the most frequent class, keep a running sum of this, then at the end #divide by the number of samples in the gold standard #clusters will be a dictionary with index 0: cluster1, index 1: cluster2 etc #goldStandard is an array with index 0: labeled category cluster1, ... etc. numAgreed = 0 for clusterIndex, cluster in clusters.items(): agreeMap = {} #create a map with categories and their counts. then sort the map and grab the category with the highest count for sample in cluster: sampleClass = gs.goldStandardMap[sample] if sampleClass in agreeMap: agreeMap[sampleClass] += 1 else: agreeMap[sampleClass] = 1 #now we can choose the cluster with the highest class highestList = sorted([(cluster, agreed) for cluster,agreed in agreeMap.items()], key=itemgetter(1), reverse=True) highest = highestList[0][1] numAgreed += highest return numAgreed * 1.0 / float(len(gs.goldStandardList)) #fit game into a cluster, then figure out what game is in that cluster by looking up all the values def getGameCluster(reviews, gameName, gameText): numReviews = 800 revDict = listn_reviews(reviews, numReviews) revDict[gameName] = gameText.lower().translate(string.punctuation) cluster = mkm_tfidf_cluster_text(token_dict, 90, gameName=gameName) return cluster def listn_reviews(reviews, n=100): count = 0 orderedSystems = sorted([gameSystemKey for gameSystemKey in reviews.keys()]) for gameSystemKey in orderedSystems: systemDict = reviews[gameSystemKey] if count > n: break orderedKeys = sorted([gameKey for gameKey in systemDict.keys()]) for gameKey in orderedKeys: gameReview = systemDict[gameKey] if count > n:#just to reduce sample size break if "review" in gameReview and "scores" in gameReview and "gamespot score" in gameReview["scores"]: lowers = gameReview["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation count += 1 return token_dict def cluster_games(reviews): for gameSystemKey, systemDict in reviews.items(): count = 0 for gameKey, gameReview in systemDict.items(): count+=1 if count > 50:#just to reduce sample size break if "review" in gameReview and "scores" in gameReview and "gamespot score" in gameReview["scores"]: lowers = gameReview["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation #print(getEntities(gameReview["review"])) cluster = mkm_cluster_text(token_dict) return cluster def cluster_games(reviews): for gameSystemKey, systemDict in reviews.items(): count = 0 for gameKey, gameReview in systemDict.items(): count+=1 if count > 50:#just to reduce sample size break if "review" in gameReview and "scores" in gameReview and "gamespot score" in gameReview["scores"]: lowers = gameReview["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation #print(getEntities(gameReview["review"])) cluster = mkm_cluster_text(token_dict) return cluster def cluster_gamesGSkm_ent(reviewsList, gs): tokenDict = {} for review in reviewsList: #reviewsList.append((gameKey, gameSystemKey, gameReview)) gameKey = review[0] lowers = review[2]["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation #print(getEntities(gameReview["review"])) print(len(reviewsList)) print(len(gs.goldStandard)) start = time.time() cluster = mkm_ent_cluster_text(token_dict, len(gs.goldStandard)) print("%f" % (time.time() - start)) return cluster def cluster_gamesGSkm_tfidf(reviewsList, gs): tokenDict = {} for review in reviewsList: #reviewsList.append((gameKey, gameSystemKey, gameReview)) gameKey = review[0] lowers = review[2]["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation #print(getEntities(gameReview["review"])) start = time.time() cluster = mkm_tfidf_cluster_text(token_dict, len(gs.goldStandard)) print("%f" % (time.time() - start)) return cluster def cluster_gamesGSkm_wc(reviewsList, gs): tokenDict = {} for review in reviewsList: #reviewsList.append((gameKey, gameSystemKey, gameReview)) gameKey = review[0] lowers = review[2]["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation #print(getEntities(gameReview["review"])) start = time.time() cluster = mkm_wc_cluster_text(token_dict, len(gs.goldStandard)) print("%f" % (time.time() - start)) return cluster def cluster_gamesGSdb_tfidf(reviewsList, gs): tokenDict = {} for review in reviewsList: #reviewsList.append((gameKey, gameSystemKey, gameReview)) gameKey = review[0] lowers = review[2]["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation #print(getEntities(gameReview["review"])) start = time.time() cluster = cluster_textDB_tfidf(token_dict) print("%f" % (time.time() - start)) return cluster def cluster_gamesGSdb_ent(reviewsList, gs): tokenDict = {} for review in reviewsList: #reviewsList.append((gameKey, gameSystemKey, gameReview)) gameKey = review[0] lowers = review[2]["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation #print(getEntities(gameReview["review"])) start = time.time() cluster = cluster_textDB_ent(token_dict) print("%f" % (time.time() - start)) return cluster def cluster_gamesGSdb_wc(reviewsList, gs): tokenDict = {} for review in reviewsList: #reviewsList.append((gameKey, gameSystemKey, gameReview)) gameKey = review[0] lowers = review[2]["review"].lower() no_punctuation = lowers.translate(string.punctuation) token_dict[gameKey] = no_punctuation #print(getEntities(gameReview["review"])) start = time.time() cluster = cluster_textDB_wc(token_dict) print("%f" % (time.time() - start)) return cluster def mkm_tfidf_cluster_text(docs, numClusters, gameName=None): """ Transform texts to coordinates using named entities and cluster texts using DBSCAN """ tfidf = HashingVectorizer(tokenizer=tokenize, stop_words='english') sortedValues = [token_dict[key] for key in sorted(token_dict.keys())] sortedLabels = [key for key in sorted(token_dict.keys())] model = tfidf.fit_transform(sortedValues) km = MiniBatchKMeans(n_clusters=numClusters, init='k-means++', n_init=1, init_size=1000, batch_size=1000).fit(model) tfidf_cluster = collections.defaultdict(list) inpCluster = -1 for idx, label in enumerate(km.labels_): tfidf_cluster[label].append(sortedLabels[idx]) if gameName != None and sortedLabels[idx] == gameName: inpCluster = label print(tfidf_cluster) if gameName != None and inpCluster != -1: return tfidf_cluster[inpCluster] else: return tfidf_cluster def mkm_wc_cluster_text(docs, numClusters, gameName=None): """ Transform texts to coordinates using named entities and cluster texts using DBSCAN """ tfidf = HashingVectorizer(tokenizer=tokenize, stop_words='english') sortedValues = [token_dict[key] for key in sorted(token_dict.keys())] sortedLabels = [key for key in sorted(token_dict.keys())] model = tfidf.fit_transform(sortedValues) km = MiniBatchKMeans(n_clusters=numClusters, init='k-means++', n_init=1, init_size=1000, batch_size=1000).fit(model) tfidf_cluster = collections.defaultdict(list) inpCluster = -1 for idx, label in enumerate(km.labels_): tfidf_cluster[label].append(sortedLabels[idx]) if gameName != None and sortedLabels[idx] == gameName: inpCluster = label print(tfidf_cluster) if gameName != None and inpCluster != -1: return tfidf_cluster[inpCluster] else: return tfidf_cluster def mkm_ent_cluster_text(docs, numClusters, gameName=None): """ Transform texts to coordinates using named entities and cluster texts using DBSCAN """ vec = DictVectorizer() #tfidf = HashingVectorizer(tokenizer=tokenize, stop_words='english') docFeaturesLabeled = [(docName, getKwEntityFeatures(doc)) for docName, doc in docs.items()] docFeatures = [item[1] for item in docFeaturesLabeled] labels = [item[0] for item in docFeaturesLabeled] model = vec.fit_transform(docFeatures) km = MiniBatchKMeans(n_clusters=numClusters, init='k-means++', n_init=1, init_size=1000, batch_size=1000).fit(model) tfidf_cluster = collections.defaultdict(list) for idx, label in enumerate(km.labels_): tfidf_cluster[label].append(labels[idx]) return tfidf_cluster def cluster_textDB_tfidf(docs): """ Transform texts to coordinates using tfidf and cluster texts using DBSCAN """ tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english') sortedValues = [token_dict[key] for key in sorted(token_dict.keys())] sortedLabels = [key for key in sorted(token_dict.keys())] tfidf_model = tfidf.fit_transform(sortedValues).todense() #eps = .37 #radius eps = .37 min_samples = 2 #number of samples in a cluster metric = distance.cosine dbscan_model = DBSCAN(eps=eps, min_samples=min_samples, metric = metric).fit(tfidf_model) tfidf_cluster = collections.defaultdict(list) for idx, label in enumerate(dbscan_model.labels_): tfidf_cluster[label].append(sortedLabels[idx]) print(tfidf_cluster) #plot(tfidf_model, dbscan_model, sortedLabels) return tfidf_cluster def cluster_textDB_wc(docs): """ Transform texts to coordinates using named entities and cluster texts using DBSCAN """ tfidf = HashingVectorizer(tokenizer=tokenize, stop_words='english') sortedValues = [token_dict[key] for key in sorted(token_dict.keys())] sortedLabels = [key for key in sorted(token_dict.keys())] tfidf_model = tfidf.fit_transform(sortedValues).todense() eps = .37 #radius min_samples = 2 #number of samples in a cluster metric = distance.cosine dbscan_model = DBSCAN(eps=eps, min_samples=min_samples, metric = metric).fit(tfidf_model) tfidf_cluster = collections.defaultdict(list) for idx, label in enumerate(dbscan_model.labels_): tfidf_cluster[label].append(sortedLabels[idx]) print(tfidf_cluster) #plot(tfidf_model, dbscan_model, sortedLabels) return tfidf_cluster def cluster_textDB_ent(docs): """ Transform texts to coordinates using named entities and cluster texts using DBSCAN """ vec = DictVectorizer() #tfidf = HashingVectorizer(tokenizer=tokenize, stop_words='english') docFeaturesLabeled = [(docName, getKwEntityFeatures(doc)) for docName, doc in docs.items()] docFeatures = [item[1] for item in docFeaturesLabeled] labels = [item[0] for item in docFeaturesLabeled] model = vec.fit_transform(docFeatures).todense() eps = .6 #radius min_samples = 2 #number of samples in a cluster metric = distance.cosine dbscan_model = DBSCAN(eps=eps, min_samples=min_samples, metric = metric).fit(model) tfidf_cluster = collections.defaultdict(list) for idx, label in enumerate(dbscan_model.labels_): tfidf_cluster[label].append(labels[idx]) #plot(tfidf_model, dbscan_model, sortedLabels) return tfidf_cluster def knncluster_text(docs): """ Transform texts to coordinates using named entities and cluster texts using DBSCAN """ vec = DictVectorizer() #tfidf = HashingVectorizer(tokenizer=tokenize, stop_words='english') docFeaturesLabeled = [(docName, getKwEntityFeatures(doc)) for docName, doc in docs.items()] docFeatures = [item[1] for item in docFeaturesLabeled] labels = [item[0] for item in docFeaturesLabeled] model = vec.fit_transform(docFeatures) metric = distance.cosine knn_model = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(model) tfidf_cluster = collections.defaultdict(list) for idx, label in enumerate(knn_model.labels_): tfidf_cluster[label].append(labels[idx]) #plot(tfidf_model, dbscan_model, sortedLabels) return tfidf_cluster def cluster_textDB(token_dict): """ Transform texts to Tf-Idf coordinates and cluster texts using DBSCAN """ #entities = [getEntities(doc) for docName, doc in tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english') sortedValues = [token_dict[key] for key in sorted(token_dict.keys())] sortedLabels = [key for key in sorted(token_dict.keys())] tfidf_model = tfidf.fit_transform(sortedValues).todense() #eps = .37 #radius eps = .1 min_samples = 2 #number of samples in a cluster metric = distance.cosine dbscan_model = DBSCAN(eps=eps, min_samples=min_samples, metric = metric).fit(tfidf_model) tfidf_cluster = collections.defaultdict(list) for idx, label in enumerate(dbscan_model.labels_): tfidf_cluster[label].append(sortedLabels[idx]) print(tfidf_cluster) #plot(tfidf_model, dbscan_model, sortedLabels) return tfidf_cluster def stem_tokens(tokens, stemmer): stemmed = [] for item in tokens: stemmed.append(stemmer.stem(item)) return stemmed def tokenize(text): tokens = nltk.word_tokenize(text) stems = stem_tokens(tokens, stemmer) return stems def plot(X, db, labels_true): core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) print('Estimated number of clusters: %d' % n_clusters_) print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)) print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)) print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)) print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels)) print("Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels_true, labels)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels)) ############################################################################## # Plot result import matplotlib.pyplot as plt # Black removed and is used for noise instead. unique_labels = set(labels) colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels))) for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = 'k' class_member_mask = (labels == k) xy = X[class_member_mask & core_samples_mask] plt.plot(xy[:, 0], xy[:, .2], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14) xy = X[class_member_mask & ~core_samples_mask] plt.plot(xy[:, 0], xy[:, .2], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show() #for k, v in reviews.items(): # for subKey, gameReview in v.items(): # # if "review" in gameReview: # lowers = gameReview["review"].lower() # no_punctuation = lowers.translate(string.punctuation) # token_dict[k] = no_punctuation """for subdir, dirs, files in os.walk(path): for file in files: file_path = subdir + os.path.sep + file shakes = open(file_path, 'r') text = shakes.read() lowers = text.lower() no_punctuation = lowers.translate(None, string.punctuation) token_dict[file] = no_punctuation""" #this can take some time #print(cluster_games(reviews))
from __future__ import annotations import textwrap from typing import ( Sequence, ) from ai.backend.client.output.types import ( FieldSpec, PaginatedResult, ) from ai.backend.client.output.fields import agent_fields from ai.backend.client.request import Request from ai.backend.client.session import api_session from ai.backend.client.pagination import generate_paginated_results from .base import api_function, BaseFunction __all__ = ( 'Agent', 'AgentWatcher', ) _default_list_fields = ( agent_fields['id'], agent_fields['status'], agent_fields['scaling_group'], agent_fields['available_slots'], agent_fields['occupied_slots'], ) _default_detail_fields = ( agent_fields['id'], agent_fields['status'], agent_fields['scaling_group'], agent_fields['addr'], agent_fields['region'], agent_fields['first_contact'], agent_fields['cpu_cur_pct'], agent_fields['mem_cur_bytes'], agent_fields['available_slots'], agent_fields['occupied_slots'], ) class Agent(BaseFunction): """ Provides a shortcut of :func:`Admin.query() <ai.backend.client.admin.Admin.query>` that fetches various agent information. .. note:: All methods in this function class require your API access key to have the *admin* privilege. """ @api_function @classmethod async def paginated_list( cls, status: str = 'ALIVE', scaling_group: str = None, *, fields: Sequence[FieldSpec] = _default_list_fields, page_offset: int = 0, page_size: int = 20, filter: str = None, order: str = None, ) -> PaginatedResult: """ Lists the keypairs. You need an admin privilege for this operation. """ return await generate_paginated_results( 'agent_list', { 'status': (status, 'String'), 'scaling_group': (scaling_group, 'String'), 'filter': (filter, 'String'), 'order': (order, 'String'), }, fields, page_size=page_size, page_offset=page_offset, ) @api_function @classmethod async def detail( cls, agent_id: str, fields: Sequence[FieldSpec] = _default_detail_fields, ) -> Sequence[dict]: query = textwrap.dedent("""\ query($agent_id: String!) { agent(agent_id: $agent_id) {$fields} } """) query = query.replace('$fields', ' '.join(f.field_ref for f in fields)) variables = {'agent_id': agent_id} data = await api_session.get().Admin._query(query, variables) return data['agent'] class AgentWatcher(BaseFunction): """ Provides a shortcut of :func:`Admin.query() <ai.backend.client.admin.Admin.query>` that manipulate agent status. .. note:: All methods in this function class require you to have the *superadmin* privilege. """ @api_function @classmethod async def get_status(cls, agent_id: str) -> dict: """ Get agent and watcher status. """ rqst = Request('GET', '/resource/watcher') rqst.set_json({'agent_id': agent_id}) async with rqst.fetch() as resp: data = await resp.json() if 'message' in data: return data['message'] else: return data @api_function @classmethod async def agent_start(cls, agent_id: str) -> dict: """ Start agent. """ rqst = Request('POST', '/resource/watcher/agent/start') rqst.set_json({'agent_id': agent_id}) async with rqst.fetch() as resp: data = await resp.json() if 'message' in data: return data['message'] else: return data @api_function @classmethod async def agent_stop(cls, agent_id: str) -> dict: """ Stop agent. """ rqst = Request('POST', '/resource/watcher/agent/stop') rqst.set_json({'agent_id': agent_id}) async with rqst.fetch() as resp: data = await resp.json() if 'message' in data: return data['message'] else: return data @api_function @classmethod async def agent_restart(cls, agent_id: str) -> dict: """ Restart agent. """ rqst = Request('POST', '/resource/watcher/agent/restart') rqst.set_json({'agent_id': agent_id}) async with rqst.fetch() as resp: data = await resp.json() if 'message' in data: return data['message'] else: return data
def järjestä_luvut_aakkosjärjestykseen(lukuja): espanja_suomi = {} espanja_suomi = sorted(lukuja, key = lambda lukuja: str(lukuja)) return espanja_suomi def main(): lukuja = [10, 1, 101, 2, 111, 212, 100000, 22, 222, 112, 10101, 1100, 11, 0] print(järjestä_luvut_aakkosjärjestykseen(lukuja)) main()
import pandas as pd import numpy as np from scipy.stats import ttest_ind pd.options.mode.chained_assignment = None # turn off warning messages # Dictionary to map state names to two letter acronyms states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', 'WY': 'Wyoming', 'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon', 'MT': 'Montana', 'IL': 'Illinois', 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont', 'ID': 'Idaho', 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin', 'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', 'MS': 'Mississippi', 'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', 'SD': 'South Dakota', 'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', 'CT': 'Connecticut', 'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', 'KS': 'Kansas', 'NY': 'New York', 'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', 'CA': 'California', 'CO': 'Colorado', 'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', 'RI': 'Rhode Island', 'MN': 'Minnesota', 'VI': 'Virgin Islands', 'NH': 'New Hampshire', 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota', 'VA': 'Virginia'} import re def get_list_of_university_towns(): '''Returns a DataFrame of towns and the states they are in from the university_towns.txt list. The format of the DataFrame should be: DataFrame( [ ["Michigan", "Ann Arbor"], ["Michigan", "Yipsilanti"] ], columns=["State", "RegionName"] ) The following cleaning needs to be done: 1. For "State", removing characters from "[" to the end. 2. For "RegionName", when applicable, removing every character from " (" to the end. 3. Depending on how you read the data, you may need to remove newline character '\n'. ''' with open('university_towns.txt', 'r') as file: raw_data = file.read() raw_data = raw_data.split('\n') raw_data.pop() uni_towns = pd.DataFrame(columns = ['State', 'RegionName']) for i in raw_data: if '[edit]' in i: current_state_name = i.replace('[edit]', '') continue uni_towns = uni_towns.append({'State': current_state_name, 'RegionName': i}, ignore_index = True) uni_towns['RegionName'] = uni_towns.apply(lambda x: re.sub(' \(.*', '', x['RegionName']), axis=1) return uni_towns #print(get_list_of_university_towns()) def get_recession_start(): '''Returns the year and quarter of the recession start time as a string value in a format such as 2005q3''' gdp = pd.read_excel('gdplev.xls', skiprows = 219, names = ['quarters', 'billions'], parse_cols = [4,6]) gdp['billions'].astype(np.float64, copy = True) gdp['differences'] = gdp['billions'].diff() recession_start = None for i in np.arange(1, len(gdp) - 1): if (gdp.loc[i, 'differences'] < 0) and (gdp.loc[i + 1, 'differences'] < 0): recession_start = gdp.loc[i, 'quarters'] break return recession_start #print(get_recession_start()) def get_recession_end(): '''Returns the year and quarter of the recession end time as a string value in a format such as 2005q3''' gdp = pd.read_excel('gdplev.xls', skiprows = 219, names = ['quarters', 'billions'], parse_cols = [4,6]) gdp['billions'].astype(np.float64, copy = True) gdp['differences'] = gdp['billions'].diff() recession_start = None for i in np.arange(1, len(gdp) - 1): if (gdp.loc[i, 'differences'] < 0) and (gdp.loc[i + 1, 'differences'] < 0): recession_start = i break recession_end = None for i in np.arange(recession_start, len(gdp) - 1): if (gdp.loc[i, 'differences'] > 0) and (gdp.loc[i + 1, 'differences'] > 0): recession_end = gdp.loc[i + 1, 'quarters'] break return recession_end #print(get_recession_end()) def get_recession_bottom(): '''Returns the year and quarter of the recession bottom time as a string value in a format such as 2005q3''' gdp = pd.read_excel('gdplev.xls', skiprows = 219, names = ['quarters', 'billions'], parse_cols = [4,6]) gdp['billions'].astype(np.float64, copy = True) gdp['differences'] = gdp['billions'].diff() recession_start = None for i in np.arange(1, len(gdp) - 1): if (gdp.loc[i, 'differences'] < 0) and (gdp.loc[i + 1, 'differences'] < 0): recession_start = i break recession_end = None for i in np.arange(recession_start, len(gdp) - 1): if (gdp.loc[i, 'differences'] > 0) and (gdp.loc[i + 1, 'differences'] > 0): recession_end = i break recession_bottom = np.argmin(gdp.loc[recession_start:recession_end, 'billions']) return gdp.loc[recession_bottom, 'quarters'] #print(get_recession_bottom()) def convert_housing_data_to_quarters(): '''Converts the housing data to quarters and returns it as mean values in a dataframe. This dataframe should be a dataframe with columns for 2000q1 through 2016q3, and should have a multi-index in the shape of ["State","RegionName"]. Note: Quarters are defined in the assignment description, they are not arbitrary three month periods. The resulting dataframe should have 67 columns, and 10,730 rows. ''' housing = pd.read_csv('City_Zhvi_AllHomes.csv')#read csv housing.drop(housing.columns[housing.columns < '2000'], axis = 1, inplace = True)#drop early dates housing.drop(['RegionID', 'Metro', 'CountyName', 'SizeRank'], axis = 1, inplace = True)#drop some columns housing['State'] = housing.apply(lambda x: x['State'].replace(x['State'], states[x['State']]), axis=1)#replace acronyms housing.set_index(['State', 'RegionName'], inplace = True)#set new index housing.columns = pd.PeriodIndex(housing.columns, freq = 'q')#convert strings of dates to quarterly time periods housing = housing.groupby(housing.columns, axis = 1).mean()#group same quarters together and calculate mean for group return housing #print(convert_housing_data_to_quarters()) def run_ttest(): '''First creates new data showing the decline or growth of housing prices between the recession start and the recession bottom. Then runs a ttest comparing the university town values to the non-university towns values, return whether the alternative hypothesis (that the two groups are the same) is true or not as well as the p-value of the confidence. Return the tuple (different, p, better) where different=True if the t-test is True at a p<0.01 (we reject the null hypothesis), or different=False if otherwise (we cannot reject the null hypothesis). The variable p should be equal to the exact p value returned from scipy.stats.ttest_ind(). The value for better should be either "university town" or "non-university town" depending on which has a lower mean price ratio (which is equivilent to a reduced market loss).''' housing_prices = convert_housing_data_to_quarters() uni_towns = get_list_of_university_towns() recession_start = get_recession_start() recession_bottom = get_recession_bottom() price_growth = pd.DataFrame(housing_prices[recession_bottom] - housing_prices[recession_start]).copy() price_growth.columns = ['growth'] uni_towns_price_growth = price_growth.merge(uni_towns.set_index(['State', 'RegionName']), how = 'inner', left_index = True, right_index = True) uni_towns_price_growth.dropna(inplace = True) non_uni_towns_price_growth = price_growth.drop(uni_towns.set_index(['State', 'RegionName']).index) non_uni_towns_price_growth.dropna(inplace = True) statistic, pvalue = ttest_ind(uni_towns_price_growth['growth'], non_uni_towns_price_growth['growth']) if pvalue < 0.01: if statistic > 0: return (True, pvalue, 'university town') else: return (True, pvalue, 'non-university town') else: if statistic > 0: return (False, pvalue, 'university town') else: return (False, pvalue, 'non-university town') #print(run_ttest())
import numpy as np import pickle import pandas as pd def get_format(curves_or, labels_, train_whole_id, ids_train, y_pred_train): save_curve_train = [] save_ids_train = [] save_label_train = [] save_true_label = [] save_photoz = [] df = pd.read_csv('./training_set_metadata.csv') names = {6:0, 15:1, 16:2, 42:3, 52:4, 53:5, 62:6, 64:7, 65:8, 67:9, 88:10, 90:11, 92:12, 95:13} labels = [names[l] for l in labels_] for curve_original, la, train_id in zip(curves_or, labels, train_whole_id): for id_train, y_tr in zip(ids_train, y_pred_train): if id_train == train_id: photoz = df[df['object_id'] == train_id]['hostgal_photoz'].values[0] err_photoz = df[df['object_id'] == train_id]['hostgal_photoz_err'].values[0] #save_curve_train[id_train] = np.array(curve_original) time_u, mag_u, err_u1, err_u2, \ time_g, mag_g, err_g1, err_g2, \ time_r, mag_r, err_r1, err_r2, \ time_i, mag_i, err_i1, err_i2, \ time_z, mag_z, err_z1, err_z2, \ time_y, mag_y, err_y1, err_y2 = [],[],[],[], \ [],[],[],[], \ [],[],[],[], \ [],[],[],[], \ [],[],[],[], \ [],[],[],[] for x in curve_original: if x[3] == 0: time_u.append(x[0]) mag_u.append(x[1]) err_u1.append(x[1]+x[2]) err_u2.append(x[1]-x[2]) if x[3] == 1: time_g.append(x[0]) mag_g.append(x[1]) err_g1.append(x[1]+x[2]) err_g2.append(x[1]-x[2]) if x[3] == 2: time_r.append(x[0]) mag_r.append(x[1]) err_r1.append(x[1]+x[2]) err_r2.append(x[1]-x[2]) if x[3] == 3: time_i.append(x[0]) mag_i.append(x[1]) err_i1.append(x[1]+x[2]) err_i2.append(x[1]-x[2]) if x[3] == 4: time_z.append(x[0]) mag_z.append(x[1]) err_z1.append(x[1]+x[2]) err_z2.append(x[1]-x[2]) if x[3] == 5: time_y.append(x[0]) mag_y.append(x[1]) err_y1.append(x[1]+x[2]) err_y2.append(x[1]-x[2]) save_curve_train.append([[time_u,mag_u,err_u1,err_u2],[time_g,mag_g,err_g1,err_g2], [time_r,mag_r,err_r1,err_r2],[time_i,mag_i,err_i1,err_i2], [time_z,mag_z,err_z1,err_z2],[time_y,mag_y,err_y1,err_y2] ]) save_ids_train.append(id_train) save_label_train.append(y_tr) save_true_label.append(la) save_photoz.append([photoz, err_photoz]) return save_curve_train, save_ids_train, save_label_train, save_true_label, save_photoz def get_curves(dir_test='./result_lstm/split_1_test.pkl', dir_train='./result_lstm/split_1_test.pkl', curves_dir='./plasticc_multiband.pkl'): with open(curves_dir, 'rb') as handle: curves = pickle.load(handle) with open(dir_test, 'rb') as f: split_test = pickle.load(f) with open(dir_train, 'rb') as f: split_train = pickle.load(f) ids_test = split_test['object_ids'] ids_train = split_train['object_ids'] annotations_test = split_test['annotations'] annotations_train = split_train['annotations'] y_pred_test = np.argmax(annotations_test, axis=1) y_pred_train = np.argmax(annotations_train[:,0:-1], axis=1) train_whole_id = np.array(curves['ids'], dtype=np.int32) save_curve_train, save_ids_train, \ save_label_train, save_true_label_train, \ save_photoz = get_format(curves['lcs'], curves['labels'], train_whole_id, ids_train, y_pred_train) save_curve_test, save_ids_test, \ save_label_test, save_true_label_test, \ save_photoz = get_format(curves['lcs'], curves['labels'], train_whole_id, ids_test, y_pred_test) print(np.unique(save_true_label_train)) return {'train': { 'curves': save_curve_train, 'y_pred': save_label_train, 'y_true': save_true_label_train, 'ids':save_ids_train, 'photoz': save_photoz }, 'test': { 'curves': save_curve_test, 'y_pred': save_label_test, 'y_true': save_true_label_test, 'ids':save_ids_test, 'photoz': save_photoz } } if __name__ == '__main__': res = get_curves(curves_dir='../../Tesis/phasedClassification-e/data/plasticc_multiband.pkl')
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from positioning_coordination_v2 import Position from positioning_coordination_v2 import group_2_color #Importing data data_set_knee = pd.read_csv('merge_bottom_view.csv') animal_key = pd.read_csv('animal_key_kinematics.csv') #Creating variables for data adjustment keep_columns_knee = ['Dist [cm].1', 'RH.index', 'day'] new_column_names_knee = {'Dist [cm].1': 'inter_knee_distance'} #Creating instance and adjusting data instance_knee = Position(data_set_knee) instance_knee.column_adjuster(keep_columns_knee, new_column_names_knee) instance_knee.data_frame = instance_knee.key_data_adder(animal_key, instance_knee.data_frame) del instance_knee.data_frame['force'] #Adjusting for displacement displacement_min = min(instance_knee.data_frame['displacement']) instance_knee.data_frame['displacement'] /=displacement_min instance_knee.data_frame.loc[instance_knee.data_frame['day']==3,['displacement']] = 1 instance_knee.data_frame['inter_knee_distance_adjust'] = instance_knee.data_frame['inter_knee_distance']*instance_knee.data_frame['displacement'] #Aggregating data data_knee_aggregate = instance_knee.data_frame.groupby(['RH.index', 'day', 'group'], as_index=False).agg({'inter_knee_distance_adjust':['mean']}) data_knee_aggregate.columns = ['RH.index', 'day', 'group', 'inter_knee_distance_adjust'] data_knee_summary = data_knee_aggregate.groupby(['day', 'group'], as_index=False).agg({'inter_knee_distance_adjust':['mean']}) data_knee_summary.columns = ['day', 'group', 'inter_knee_distance_adjust'] #Plot preparations palette_BrBG = pd.DataFrame(list(sns.color_palette("BrBG", 7))) palette_RdBu_r = pd.DataFrame(list(sns.color_palette("RdBu_r", 7))) palette_custom_1 = [tuple(palette_BrBG.iloc[0,:]), tuple(palette_RdBu_r.iloc[0,:]), tuple(palette_RdBu_r.iloc[6,:])] #List of groups study_groups = ['sci', 'sci_medium', 'sci_msc'] #Creating x-variable (for origo) and y-variable (for vertical separation) instance_knee.data_frame['x'], data_knee_aggregate['x'], data_knee_summary['x'] = [0, 0, 0] instance_knee.data_frame['y'], data_knee_aggregate['y'], data_knee_summary['y'] = [1, 1, 1] #Adjusting y-values def y_adjustor(dataset): dataset.loc[dataset['group'] == 'sci', 'y'] = 1.25 dataset.loc[dataset['group'] == 'sci_medium', 'y'] = 0.75 list(map(lambda data_set: y_adjustor(data_set), [instance_knee.data_frame, data_knee_aggregate, data_knee_summary])) #Creating an artifical crotch point data_knee_summary['x_artificial'] = data_knee_summary['inter_knee_distance_adjust']/2 data_knee_summary['y_artificial'] = data_knee_summary['y']+2 #Optimize code!!!! temp = data_knee_summary.melt(id_vars=['day', 'group', 'inter_knee_distance_adjust']) temp['group'] = temp['group']+'_'+temp['variable'].map(lambda name: name[2:]) temp['variable'] = temp['variable'].map(lambda name: name[0]) temp = temp.pivot_table(values='value', columns='variable', index=['day', 'group', 'inter_knee_distance_adjust']).reset_index() temp.loc[temp['group']=='sci_artificial', 'group'] = 'sci_' temp.loc[temp['group']=='sci_medium_artificial', 'group'] = 'sci_medium_' temp.loc[temp['group']=='sci_msc_artificial', 'group'] = 'sci_msc_' temp.loc[temp['group']=='sci_', 'group'] = 'sci' temp.loc[temp['group']=='sci_medium_', 'group'] = 'sci_medium' temp.loc[temp['group']=='sci_msc_', 'group'] = 'sci_msc' temp2 = temp.copy() temp2.loc[temp2['x']==0, 'x'] = temp2.loc[temp2['x']==0, 'inter_knee_distance_adjust'] #!!!! #Plotting def inter_knee_distance_plot(data_technical, data_biological, data_summary, study_group, plot_day): #Creating plot data plot_data_technical = data_technical[(data_technical['group']==study_group)&(data_technical['day']==plot_day)] plot_data_biological = data_biological[(data_biological['group']==study_group)&(data_biological['day']==plot_day)] plot_data_summary = data_summary[(data_summary['group']==study_group)&(data_summary['day']==plot_day)] #Creating plots #A. Points plt.scatter('inter_knee_distance_adjust', 'y', data = plot_data_technical, color=group_2_color(study_group), alpha=0.2, s=50) plt.scatter('inter_knee_distance_adjust', 'y', data=plot_data_biological, color=group_2_color(study_group), alpha=0.5, marker="^", s=200) plt.scatter('inter_knee_distance_adjust', 'y', data=plot_data_summary, color=group_2_color(study_group), alpha=0.5, s=1000, marker="p") plt.scatter('x', 'y', data=plot_data_summary, color=group_2_color(study_group), alpha=0.5, s=1000, marker="p") #B. Crotch point plt.scatter('x_artificial', 'y_artificial', data=plot_data_summary, color=group_2_color(study_group), alpha=0.5, s=1000, marker="p") #Plot adjust sns.despine(left=True) plt.xlabel('Distance (x)', size=15, fontweight='bold') plt.xticks(list(np.arange(0, 4.5, 0.5))) plt.yticks(list(np.arange(0, 3.5, 0.5))) def line_plotter(data_set, study_group, plot_day): plot_data_set = data_set[(data_set['group']==study_group)&(data_set['day']==plot_day)] plt.plot('x', 'y', data = plot_data_set, lw=5, alpha=0.7, color=group_2_color(study_group)) def plot_caller(plotDay): list(map(lambda group: inter_knee_distance_plot(instance_knee.data_frame, data_knee_aggregate, data_knee_summary, group, plotDay), study_groups)) list(map(lambda group: line_plotter(temp, group, plotDay), ['sci', 'sci_medium', 'sci_msc'])) list(map(lambda group: line_plotter(temp2, group, plotDay), ['sci', 'sci_medium', 'sci_msc'])) plot_caller(3)
from protorpc import messages from google.appengine.ext import ndb from google.appengine.ext.ndb import msgprop #alpha feature dangerous class Name(messages.Message): name=messages.StringField(1) class Specilization(messages.Message): name=messages.StringField(1,required=True) disease_name=messages.MessageField(Name,2,repeated=True) class Address(messages.Message): pincode=messages.StringField(1) lattitude=messages.FloatField(2) longitude=messages.FloatField(3) city_name=messages.StringField(4,required=True) state_name=messages.StringField(5) country_name=messages.StringField(6,required=True) address_line=messages.StringField(7) class Doctor(messages.Message): name=messages.StringField(1,required=True) last_name=messages.StringField(2) designation=messages.MessageField(Specilization,3,repeated=True) home_address=messages.MessageField(Address,4,required=True) work_addresses=messages.MessageField(Address,5,repeated=True) #class for message presentation: class DoctorNameMessage(messages.Message): limit= messages.IntegerField(1,default=10,required=False) offset=messages.IntegerField(2,default=0,required=False) name=messages.StringField(3) last_name=messages.StringField(4) city_name=messages.StringField(5) country_name=messages.StringField(6) designation_name=messages.StringField(7) class DoctorDiseaseMessage(messages.Message): limit= messages.IntegerField(1,default=10,required=False) offset=messages.IntegerField(2,default=0,required=False) name=messages.StringField(3,repeated=True) class SpecilizationListMessage(messages.Message): specilization_list=messages.MessageField(Specilization,1,repeated=True) class DoctorListMessage(messages.Message): doctor_list=messages.MessageField(Doctor,1,repeated=True) #classes for handling Database interaction class DoctorStore(ndb.Model): doctor=msgprop.MessageProperty(Doctor,indexed_fields=['name', 'designation.name','home_address.city_name', 'work_addresses.city_name', 'home_address.country_name', 'work_addresses.country_name', 'last_name' ]) #warning this feature is in alpha #Symptom class to fetch the specilization names from disease names class SpecilizationStore(ndb.Model): specilization=msgprop.MessageProperty(Specilization,indexed_fields=['name', 'disease_name.name', ]) #warning this feature is in alpha
#read file containing inputs - temperatures in Fahrenheit with open('C:\\Users\\HoratiuC\\Documents\\inputs.txt','r') as f: l = f.read().split() #how many temperatures will be converted print("Will convert {} temperatures".format(l[0])) #eliminate first element that doesn't need to be converted l=l[1:] print ("Fahrenheit:\n",l) #define epmty list for results celsius = [] #calculate Fahrenheit to Celsius conversion and round to the nearest integer for i in l: i = (int(i)-32)*5/9 if i<0: i = int(i-0.5) else: i = int(i+0.5) celsius.append(i) print ("Celsius:\n", celsius) #write the results to a different file with open('C:\\Users\\HoratiuC\\Documents\\test.txt','a') as res: for j in celsius: res.write('{} '.format(str(j)))
#sudo pip install ExifRead import exifread from datetime import datetime from pyproj import Proj #projection WGS84/Pseudo Mecarto proj_4326 = Proj(init="epsg:3857", preserve_units=False) def myEXIFdata(src_filename): # Open image file for reading (binary mode) f = open(src_filename, 'rb') exif={} # Return Exif tags tags = exifread.process_file(f) #converting instance to double def getValueOfTag(gpsKeys): coor = [] for i in range(3): v=tags[gpsKeys[i]].values if len(v) >1: c=v[0].num+v[1].num/60.+v[2].num*1./v[2].den/3600. else: c=v[0].num*1./v[0].den coor.append(c) coor[0], coor[1] = proj_4326(coor[0],coor[1]) return coor #keys = tags.keys() gpsKeys = ['GPS GPSLongitude','GPS GPSLatitude','GPS GPSAltitude'] exif["gps"] = getValueOfTag(gpsKeys) v=tags["Image DateTime"].values exif["timestamp"]=datetime.strptime(v,"%Y:%m:%d %H:%M:%S") #exif["size"] = [tags['Image ImageWidth'].values[0],tags['Image ImageLength'].values[0]] f.close() return exif
sum = 0 i = 0 while i < 10: sum += 1 i += 1 print(sum) print('total', sum)
import numpy as np from . import generator_base from plotter.utils.gcode import * from plotter.utils.helper import overrides class SVGGenerator(generator_base.GeneratorBase): def __init__(self, args): generator_base.GeneratorBase.__init__(self, args) @classmethod def getName(cls): return "SVG" @classmethod def getHelp(cls): return """Generates images by processing svg data. Keep in mind that paths are rendered as individual lines only. Strokes, filled areas and non-path objects can not be represented. And the result might drasticly deviate from your expectation!""" @classmethod def getInputType(cls): return "svg" @classmethod def setupCustomParams(cls, subparser): subparser.add_argument('--path-sampling', default=30, type=int, help="Number of samples taken from each path object. High number increases precision but also print time.") @overrides(generator_base.GeneratorBase) def convert(self, svg): gcode = [GCode_up(), GCode_home()] paths, attr = svg samplePoints = np.linspace(0.0, 1.0, self.params["path_sampling"]) for p in paths: if p.iscontinuous(): subpaths = [p] else: subpaths = p.continuous_subpaths() for s in subpaths: # Points are represented as complex values c = s.point(samplePoints[0]) x, y = np.real(c), np.imag(c) c = self.px2Scr(np.array([x, y])) gcode.append(GCode_goTo(c, self.params["speed_nodraw"])) gcode.append(GCode_down()) for i in samplePoints[1:]: c = s.point(i) x, y = np.real(c), np.imag(c) c = self.px2Scr(np.array([x, y])) gcode.append(GCode_goTo(c, self.params["speed_draw"])) gcode.append(GCode_up()) return gcode
""" # 基于链表实现栈 """ import os import logging logger = logging.getLogger(__name__) class Node(object): """节点定义""" def __init__(self, data: int, next=None): self._data = data self._next = next class LinkedStack(object): """A stack based upon singly-linked list""" def __init__(self): self._top: Node = None def push(self, value: int): """栈顶插入节点""" new_top = Node(value) new_top._next = self._top self._top = new_top def pop(self): """出栈""" if self._top: value = self._top._data self._top = self._top._next return value def __repr__(self): current = self._top nums = [] while current: nums.append(current._data) current = current._next return "->".join(f"{num}" for num in nums) if __name__ == '__main__': logging.basicConfig(format="[%(asctime)s %(filename)s:%(lineno)s] %(message)s", level=logging.INFO, filename=None, filemode='a') stack = LinkedStack() for i in range(9): stack.push(i) logger.info(stack) for _ in range(3): stack.pop() logger.info(stack)
from rubicon_ml.repository.utils import slugify def test_can_slugify(): un_slugified = " TESTing... SlUgIfY? " slugified = "testing-slugify" assert slugify(un_slugified) == slugified
#!/usr/bin/env python """ Typical Usage: coords2dihedrals.py 180 < 12column_text_file.dat This script reads a 12-column numeric text file and computes dihedral angles (and other inclusive angles and distances). Example input file: 30.13 11.46 15.12 35.28 -2.32 12.61 30.68 -4.45 16.30 28.68 8.46 10.24 28.68 8.42 10.24 30.68 -4.45 16.30 25.22 -4.75 19.42 27.57 3.37 8.06 27.57 3.37 8.06 25.22 -4.75 19.42 20.45 -1.27 20.09 25.80 -2.26 6.61 25.80 -2.23 6.61 20.45 -1.27 20.09 15.47 1.36 18.12 21.67 -5.88 7.17 : The 12 numbers on each line represent the x,y,z coordinates of 4 atoms. (This is the format of the files generated by the "pdb2coords.py" script.) For each line, the program calculates the (4-body) dihedral angle for these 4 atoms, followed by the (3-body) bond angle between atoms 1,2,3 and 2,3,4 followed by distances between atoms 1,2 and 2,3 and 3,4, and writes them (in that order) to the standard out. (6 numbers total, followed by a newline) (When a line contains the wrong number of numbers, the script prints out a list of 6 impossibe negative values: "-720 -360 -360 -1 -1 -1" to let the caller know that this particular angle could not be computed.) Note: The "IUPAC/IUB" dihedral-angle convention is used: 4 atoms in the "trans" conformation have a dihedral angle of 180 degrees. By default, dihedral angles are calculated in the range from 0 to 360.0 degrees. (This means there is discontinuity in the angle at 0 degrees.) However this may be a bad choice for polymers which can alternate between helical conformations which are left and right handed. In some cases, you may want to have the discontinuity appear at 180 degrees. (or some other angle which is sparsely populated). So you can supply an optional argument (the "branch_of_log") which indicates where the discontinuity in the dihedral angle will appear. Dihedral angles returned by this program will lie in the range: [branch_of_log-360, branch_of_log) """ import sys from math import sqrt, cos, sin, tan, acos, asin, atan, pi, floor # Sometimes this program pipes its output to other programs which halt early. # Below we silently suppress the ugly "Broken pipe" message this generates: import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) def length_v(r): lsqd = 0.0 for d in range(0,len(r)): lsqd += r[d]*r[d] return sqrt(lsqd) def inner_prod_v(r1,r2): result = 0.0 for d in range(0,len(r1)): result += r1[d]*r2[d] return result def cross_prod_v3(a,b): c = [0.0,0.0,0.0] c[0] = a[1]*b[2] - a[2]*b[1] c[1] = a[2]*b[0] - a[0]*b[2] c[2] = a[0]*b[1] - a[1]*b[0] return c def Coords2DihedralsAnglesLengths(r0, r1, r2, r3, branch_of_log=pi): r10 = [0.0, 0.0, 0.0] r21 = [0.0, 0.0, 0.0] r32 = [0.0, 0.0, 0.0] for d in range(0,3): r10[d] = r1[d] - r0[d] r21[d] = r2[d] - r1[d] r32[d] = r3[d] - r2[d] l10 = length_v(r10) l21 = length_v(r21) l32 = length_v(r32) n012 = cross_prod_v3(r10, r21) n123 = cross_prod_v3(r21, r32) # The dihedral-angle or 4-body angle is named "angle0124" cos_phi = inner_prod_v(n012, n123) /(length_v(n012)*length_v(n123)) # There is a problem whenever 4 consecutive atoms are coplanar: # # *---* # | (all 4 atoms are coplanar, and phi = 0) # *---* # # In this example, the dihedral angle phi is well defined and = 0. # The problem is that, due to floating point roundoff # "cos_phi" can sometimes slightly exceed 1. # This causes a NAN when you calculate acos(cos_phi). if (cos_phi > 1.0): cos_phi = 1.0 elif (cos_phi < -1.0): cos_phi = -1.0 phi = acos(cos_phi) # This formula does not distinguish positive and negative phi. # # Negative dihedral angles: # # Check if the position of atom i+3 is above the phi=0 plane # (in the region of positive phi), or below the phi=0 plane. # It is above the phi=0 plane if the bond from atom i+2 to i+3 # points in the same direction as the negative-phi-tangent-vector # for atom i (not i+3) (...which points in the n012 direction) if inner_prod_v(n012, r32) < 0.0: phi = -phi phi_range_a = branch_of_log - (2.0*pi) phi_range_b = branch_of_log nphi = floor((phi - phi_range_a) / (2.0*pi)) phi = phi - (nphi*2.0*pi) sin_theta0 = length_v(n012) / (l10*l21) if sin_theta0 > 1.0: sin_theta0 = 1.0 theta0 = asin(sin_theta0) if r21[0]*r10[0]+r21[1]*r10[1]+r21[2]*r10[2] > 0.0: theta0 = pi - theta0 sin_theta1 = length_v(n123) / (l21*l32) if sin_theta1 > 1.0: sin_theta1 = 1.0 theta1 = asin(sin_theta1) if r32[0]*r21[0]+r32[1]*r21[1]+r32[2]*r21[2] > 0.0: theta1 = pi - theta1 return (phi, theta0, theta1, l10, l21, l32) def Coords2Dihedrals(r0, r1, r2, r3, branch_of_log=pi): phi,th1,th2,l01,l21,l32 = Coords2DihedralsAnglesLengths(r0, r1, r2, r3, branch_of_log) return phi def main(): branch_of_log = pi # by default, dihedral angles lie in range: [-180,180.0) truncate_a = 0 truncate_b = 0 if (len(sys.argv) > 4): sys.stderr.write('Error (coords2dihedrals): number of arguments should not exceed 3.\n'\ ' If an odd-number of arguments are passed (1 or 3), then\n' ' the first argument is assumed to be the branch-of-log, a number which is\n' ' 0 and 360.0. This argument indicates where the discontinuity in the\n' ' dihedral angle is located. By default it is 0 degrees, which\n' ' corresponds to 4 atoms in the "cis" conformation.)\n' ' (The two arguments correspond to the number of lines of\n'\ ' text to omit from the beginning and end of the file, respectively.)\n'\ ' If one argument is passed, then both are assumed to be the same.\n'\ ' If no argument is passed, then by default, no data is ignored.\nExiting...\n\n') sys.exit(-1) # NOTE: The "truncate" arguments are not really supported any more. Instead # use other scripts to post-process the results printed by this program. elif (len(sys.argv) == 4): branch_of_log = float(sys.argv[1]) truncate_a = int(sys.argv[2]) truncate_b = int(sys.argv[3]) elif (len(sys.argv) == 3): truncate_a = int(sys.argv[1]) truncate_b = int(sys.argv[2]) elif (len(sys.argv) == 2): branch_of_log = float(sys.argv[1]) branch_of_log *= pi/180.0 coords_list = [] # Read the file for line in sys.stdin: line = line.strip() # Each line should contain a list of 3 numbers separated by whitespace. # If so, store the 3 numbers in a list variable (named xyz), and append # it to the list of coordinates. # However some lines might also be blank, in which case we append the # empty list [] to the list of coordinates. if line == '': coords = [] else: # Each line should contain a list of 3 numbers separated by whitespace. coords = list(map(float, line.split())) if len(coords) != 4*3: sys.stderr.write('Error(coords2dihedrals):\n'+'Each line should either contain 12 numbers or be blank.\n') sys.exit(-1) coords_list.append(coords) # Truncate the data we don't want. # (Why? The residues at the beginning and ending of helices # are less trustworthy then the residues in the middle.) coords_list = coords_list[truncate_a:len(coords_list)-truncate_b] N = len(coords_list) for i in range(0,N): if len(coords_list[i]) == 3*4: r0 = [coords_list[i][3*0+0], coords_list[i][3*0+1], coords_list[i][3*0+2]] r1 = [coords_list[i][3*1+0], coords_list[i][3*1+1], coords_list[i][3*1+2]] r2 = [coords_list[i][3*2+0], coords_list[i][3*2+1], coords_list[i][3*2+2]] r3 = [coords_list[i][3*3+0], coords_list[i][3*3+1], coords_list[i][3*3+2]] phi,theta0,theta1,l10,l21,l32 = Coords2DihedralsAnglesLengths(r0, r1, r2, r3, branch_of_log) sys.stdout.write(str(phi*180.0/pi) + ' ' + str(theta0*180.0/pi) + ' ' + str(theta1*180.0/pi) + ' ' + str(l10) + ' ' + str(l21) + ' ' + str(l32) + '\n') else: # Otherwise, we write out an impossible values to let the caller # know that this particular dihedral angle could not be computed sys.stdout.write('-720 -360 -360 -1 -1 -1\n') if __name__ == "__main__": main()
# Day 12: Rain Risk # <ryc> 2021 def inputdata(): stream = open('day_12_2020.input') data = [ (line[0], int(line[1:])) for line in stream ] stream.close() return data def navigation(data,position,vector,relative=False): cardinal = {'E':1+0j, 'S':-1j, 'W':-1+0j, 'N':1j} sign = {'R':1, 'L':-1} traslate = ['E', 'S', 'W', 'N'] for action, value in data: if action in cardinal: if relative: vector += cardinal[action] * value else: position += cardinal[action] * value elif action in sign: vector *= cardinal[traslate[(sign[action] * int(value / 90) ) % 4]] else: position += vector * value return position if __name__ == '__main__': print('\n12: Rain Risk') data = inputdata() position = navigation(data,0j,1+0j) print('\nManhattan distance =',int(abs(position.real) + abs(position.imag))) print(position) position = navigation(data,0j,10+1j,relative=True) print('\nManhattan distance, relative navigation =',int(abs(position.real) + abs(position.imag))) print(position)
# Generated by Django 2.1.7 on 2019-06-15 08:58 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('whiskydatabase', '0009_whiskyinfo_bottling'), ] operations = [ migrations.RemoveField( model_name='whiskyinfo', name='distillery', ), migrations.AddField( model_name='whiskyinfo', name='distillery', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='whisky_distillery', to='whiskydatabase.Distillery'), preserve_default=False, ), ]
import requests serve_url = 'http://124.70.178.153:8080/g1/serve' xml_file = open('serve.xml', 'r') xml_str = xml_file.read() header = {'content-type':'application/xml'} requests.post(serve_url, xml_str, headers = header)
import numpy as np import statistics ms_per_sec=1000 #need to create additional spikes, beyond max_time, to ensure spikes continue to max_time extra_time=1.1 def summary(spikes,max_time,method=None): ISI=[np.diff(x) for x in spikes if len(x)] #replace np.diff with elephant function isi() lengths=[len(x) for x in ISI] #for i,x in enumerate(ISI): # print(method,'has',len(x),'ISIs for train',i) freq=np.mean([np.mean(1/x) for x in ISI if len(x)]) freq2=np.sum(lengths)/np.shape(lengths)[0]/max_time print('###########',method, 'mean number of spikes', np.round(np.mean(lengths)+1),'ff',np.round(freq,3),np.round(freq2,3)) if np.shape(ISI)[0]<=10: print('num ISIs per train:',lengths) CV=[np.std(x)/np.mean(x) for x in ISI if len(x)] #replace with elephant function cv() isi_min=np.min([np.min(x) for x in ISI if len(x)]) isi_max=np.max([np.max(x) for x in ISI if len(x)]) isi_mean=np.mean([np.mean(x) for x in ISI if len(x)]) flat_isi=[a for isi in ISI for a in isi] isi_med=statistics.median(flat_isi) if np.min(lengths)==0: print (' ****',method,'has ', lengths.count(0),'single spike trains, excluded from summary stats') if method: print(' CV', np.round(np.mean(CV),4),np.round(np.std(CV),4),"ISI min,mean,median,max", np.round(isi_min,5),np.round(isi_mean,5),np.round(isi_med,5),np.round(isi_max,5)) return ISI,CV,{'min':isi_min,'max':isi_max,'mean':isi_mean, 'median':isi_med, 'CV':np.mean(CV)} def flatten(isiarray): return [item for sublist in isiarray for item in sublist] def exp_events(mean_isi,Tmax,num_spikes,min_isi): isi = np.random.exponential(mean_isi, num_spikes) times=np.cumsum(isi[isi>min_isi]) return times[times< Tmax] ######################################################## # None of these 1st four methods allow for time dependent variation in firing rate # All of these methods generate independent trains - see Corr_trains.py on nebish ###################################################### #method 1 from Lytton et al: generate intervals with mean=mean_isi #Poisson: prob(k; lambda)={lambda^k e^{-lambda}}/{k!} #lambda = mean and std, k is integer, isi is integer value #spike time = cumulative sum of intervals #meanISI is correct, CV too low - very regular spikes - not similar to data? def spikes_poisson(num_cells,mean_isi,min_isi,max_time): num_spikes_per_cell = int(max_time/mean_isi) spikesPoisson = [] for i in range(num_cells): isi = np.random.poisson(mean_isi*ms_per_sec, num_spikes_per_cell)/np.float(ms_per_sec) spikes=np.cumsum(isi[isi>min_isi]) spikesPoisson.append(spikes[spikes<max_time]) ISI,CV_poisson,info=summary(spikesPoisson,max_time,method='poisson') return spikesPoisson, info,ISI #method 2 from Zbyszek: determine how many spike for the cell, then calculate that many spike times between 0 and max_time #in the loop: eliminate spikes yielding interval too small. Strange method. #Normally distributed with minimum ISI- not similar to data #meanISI = max_time/num_spikes_per_cell def spikes_normal(num_cells,mean_isi,min_isi,max_time): num_spikes_per_cell = int(max_time/mean_isi) spikesNormal = [sorted(np.random.rand(np.random.poisson(num_spikes_per_cell)) * max_time) for _ in range(num_cells)] for i in range(len(spikesNormal)): tt = np.array(spikesNormal[i]) spikesNormal[i] = [tt[0]] + list(tt[1:][np.diff(tt) > min_isi]) ISI,CV_norm,info=summary(spikesNormal,max_time,method='norm') return spikesNormal, info,ISI #method 3: exp, also known as homogeneous Poisson process. Could be replaced by elephant function: homogeneous_poisson_process #spikesExp = [homogeneous_poisson_process(rate=10.0*Hz, t_start=0.0*s, t_stop=100.0*s) for i in range(num_cells)] def spikes_exp(num_cells,mean_isi,min_isi,max_time): num_spikes_per_cell = int(extra_time*max_time/mean_isi) spikesExp=[] for i in range(num_cells): spikesExp.append(exp_events(mean_isi,max_time,num_spikes_per_cell,min_isi)) ISI,CV_exp,info=summary(spikesExp,max_time,method='exp') return spikesExp, info,ISI #method 4: distribution with mode=intraburst, using mean_isi as mean #CV, min and max ISI similar to Exp, but slightly longer tail and less truncation at min_isi #for low firing rates, e.g. SPN, the number of spikes is too low compared to exp def spikes_lognorm(num_cells,mean_isi,min_isi,max_time,intraburst): num_spikes_per_cell = int(extra_time*max_time/mean_isi) spikeslogNorm = [] sigma=np.sqrt((2/3.)*(np.log(mean_isi)-np.log(intraburst))) mn=(2*np.log(mean_isi)+np.log(intraburst))/3. #sigma=1.5*sigma #This makes number of spikes too low if mean firing rate is quite low for i in range(num_cells): isi = np.random.randn(num_spikes_per_cell)*sigma+mn newisi=np.exp(isi) #print('lognorm',sigma,mn,isi,newisi) spikes=np.cumsum(newisi[newisi>min_isi]) spikeslogNorm.append(spikes[spikes<max_time]) ISI,CV_lognorm,info=summary(spikeslogNorm,max_time,method='lognorm') return spikeslogNorm, info,ISI #method5 from Corr_plot_v2_1.py: use exponential distribution twice, generates a single train #ISI has high peak and long tailed ISI distribution. These are exponentially distributed bursts #good method for bursty data: #higher intertrain isi produces lower CV. itisi=0.1 --> 1.0; iti=0.3-->0.7; itisi=0.03-->1.4-1.6 #lower noise = higher CV def train(cellnum,mean_isi,burstinterval,burstisi,min_isi,max_time,noise): num_spikes_per_cell = int(max_time/mean_isi) time = 0 while time < max_time: #t is exponentially distributed time between bursts #why not use intertrain isi #f(x; 1/beta) = 1/beta *exp(-x/beta), t = max(min_isi,np.random.exponential(burstinterval)) time += t #determine how many ISI to generate within the burst - should be spikes_per_burst n = max(2,np.random.geometric(1. /num_spikes_per_cell)) #gaussian distribution of parameter used in exponential, mean = intertrain_isi, sigma=noise #noise on the isi provides double dose of noise compared to the spikesExp itisi = burstisi + np.random.randn(n) * noise #replace any lower than min_isi with min_isi itisi=np.array([x if x>min_isi else min_isi for x in itisi]) #these isis are expoentially distributed within the bursts #Issue if itisi=min_isi, which happens if noise too high isilist=np.random.exponential(itisi, size=n) isis = np.cumsum(isilist[isilist>min_isi]) times = time + isis #print('burst npre',n, 'npost', len(times[times <max_time] )) yield times[(times <max_time) & (times >= 0)] time = times[-1] if len(times) else time #Inhomogeneous Poisson process. Could be replaced by elephant function: inhomogeneous_poisson_process def spikes_inhomPois(numcells,tdep_rate,time_samp,min_isi,IP_type='InhomPoisson'): max_time=time_samp[-1] maxrate=np.max(tdep_rate) smallest_isi=1/maxrate spikes=[] for cellnum in range(numcells): spike_superset=exp_events(smallest_isi,max_time,int(extra_time*max_time/smallest_isi),min_isi) if len(spike_superset): spike_rn=np.random.rand(len(spike_superset)) #find firing rate bin corresponding to spike time rate_bin = [np.argmin(np.abs(time_samp-spk)) for spk in spike_superset] #normalized spike probability prob_spike=tdep_rate[rate_bin]/maxrate #retain spikes based on spike probability and random number spikes.append(spike_superset[spike_rn<prob_spike]) else: print('uh oh, no spikes generated') spikes.append([]) ISI,CV_IP,info=summary(spikes,max_time,IP_type) return spikes, info,ISI def osc(num_cells,mean_isi,min_isi,max_time,intraburst,interburst,freq_dependence,theta=None,IP_type='osc'): samples_per_cycle=10 if theta: maxfreq=max(interburst,theta) else: maxfreq=1.0/interburst freq_sampling_duration=(1.0/maxfreq)/samples_per_cycle time_samp=np.arange(0,max_time,freq_sampling_duration) #sinusoidal modulation in isi, interburst is 1/sin freq: #this gives a mean_freq ~2x 1/mean_isi - not good #tdep_rate=1/(mean_isi*(1+freq_dependence*np.sin((2*np.pi/interburst)*time_samp))) #sinusoidal modulation in firing rate gives mean number of spikes more similar to exp #Also, fft is more unimodal tdep_rate=(1./mean_isi)*(1+freq_dependence*np.sin(2*np.pi*time_samp/interburst)) if theta: #This doubles the envelope frequency! thetaosc=np.sin(2*np.pi*theta*time_samp) #4.5 multiplier increases number of spikes to that for exp and log norm #0.2 subtraction produces silent periods between "up" states, probably want these less silent for in vivo tdep_rate=(4.5/mean_isi)*((1+freq_dependence*np.sin(2*np.pi*time_samp/interburst))*thetaosc-0.2) print('theta',theta,tdep_rate[0:20]) # spikes, info,ISI=spikes_inhomPois(num_cells,tdep_rate,time_samp,min_isi) return spikes, info,ISI,time_samp,tdep_rate def spikes_ramp(num_cells,min_isi,max_time,min_freq,max_freq,start_time,ramp_duration): samples_per_cycle=10 freq_sampling_duration=(1.0/max_freq)/samples_per_cycle time_samp=np.arange(0,max_time,freq_sampling_duration) tdep_rate=min_freq+max_freq*(time_samp-start_time)*(time_samp>start_time)*(time_samp<(start_time+ramp_duration)) # spikes, info,ISI=spikes_inhomPois(num_cells,tdep_rate,time_samp,min_isi,IP_type='ramp') return spikes, info,ISI,time_samp,tdep_rate def spikes_pulse(num_cells,min_isi,max_time,min_freq,max_freq,start_list,duration): samples_per_cycle=10 freq_sampling_duration=(1.0/max_freq)/samples_per_cycle time_samp=np.arange(0,max_time,freq_sampling_duration) tdep_rate=min_freq*np.ones(len(time_samp)) for startt in start_list: tdep_rate=tdep_rate+max_freq*(time_samp>startt)*(time_samp<(startt+duration)) # spikes, info,ISI=spikes_inhomPois(num_cells,tdep_rate,time_samp,min_isi,IP_type='pulses') return spikes, info,ISI,time_samp,tdep_rate _FUNCTIONS = { #This is not used, because poisson and normal not so good 'exp': spikes_exp, 'poisson': spikes_poisson, 'normal': spikes_normal } def make_trains(num_trains,isi,samples,maxTime,train_type): print('make trains', train_type) if train_type.startswith('lognorm'): distr_info=train_type.split() print('distribution',distr_info) intraburst=float(distr_info[1]) train_type=distr_info[0] spikes,info,ISI=spikes_lognorm(num_trains,isi,samples,maxTime,intraburst) else: #func=_FUNCTIONS[train_type] #spikes,info,ISI=func(num_trains,isi,samples,maxTime) spikes,info,ISI=spikes_exp(num_trains,isi,samples,maxTime) return spikes,info,ISI ''' Inhomogeneous Poisson Process from Ujfalussy github R code: gen.Poisson.events <- function(Tmax, rate){ ## homogeneous Poisson process ## Tmax: time in ms ## rate: event frequency in 1/ms t0 <- rexp(1, rate) if (t0 > Tmax) { sp <- NULL } else { sp <- t0 tmax <- t0 while(tmax < Tmax){ t.next <- tmax + rexp(1, rate) if (t.next < Tmax) sp <- c(sp, t.next) tmax <- t.next } } return(sp) } NOTE by AB: gen.Poisson.events translated into exp_events, but added min_isi gen.Poisson.train <- function(rates, N){ ## inhomogeneous Poisson process Tmax <- length(rates) # Tmax in ms max.rate <- max(rates) # rates in Hz t.sp.kept <- c(0, 0) # cell, time (ms) for (cell in 1:N){ t.sp <- gen.Poisson.events(Tmax, max.rate/1000) if (length(t.sp > 0)){ for (tt in t.sp){ ## we keep the transition with some probability rr <- rates[ceiling(tt)] p.keep <- rr/max.rate # both in Hz! if (runif(1) < p.keep){ t.sp.kept <- rbind(t.sp.kept, c(cell-1, tt)) } } } } t.sp.kept <- t.sp.kept[-1,] ii <- sort(t.sp.kept[,2], index.return=T)$ix t.sp <- t.sp.kept[ii,] t.sp } NOTE by AB: gen.Poisson.train translated into python above '''
import dash_bootstrap_components as dbc from dash import Input, Output, State, html from dash_bootstrap_components._components.Container import Container PLOTLY_LOGO = "https://images.plot.ly/logo/new-branding/plotly-logomark.png" search_bar = dbc.Row( [ dbc.Col(dbc.Input(type="search", placeholder="Search")), dbc.Col( dbc.Button( "Search", color="primary", className="ms-2", n_clicks=0 ), width="auto", ), ], className="g-0 ms-auto flex-nowrap mt-3 mt-md-0", align="center", ) navbar = dbc.Navbar( dbc.Container( [ html.A( # Use row and col to control vertical alignment of logo / brand dbc.Row( [ dbc.Col(html.Img(src=PLOTLY_LOGO, height="30px")), dbc.Col(dbc.NavbarBrand("Navbar", className="ms-2")), ], align="center", className="g-0", ), href="https://plotly.com", style={"textDecoration": "none"}, ), dbc.NavbarToggler(id="navbar-toggler", n_clicks=0), dbc.Collapse( search_bar, id="navbar-collapse", is_open=False, navbar=True, ), ] ), color="dark", dark=True, ) # add callback for toggling the collapse on small screens @app.callback( Output("navbar-collapse", "is_open"), [Input("navbar-toggler", "n_clicks")], [State("navbar-collapse", "is_open")], ) def toggle_navbar_collapse(n, is_open): if n: return not is_open return is_open
import unittest from core.player import Player from core.turn_selecting_strategies.random_strategy import RandomStrategy class PlayerTest(unittest.TestCase): def test_init_assigns_strategy_as_a_field(self): strategy = 42 player = Player(strategy) assert player.turn_selecting_strategy is strategy def test_win_increments_score(self): player = self.instantiate_player() assert player.score == 0 player.win() assert player.score == 1 def test_lose_decrements_score(self): player = self.instantiate_player() player.score = 1 player.lose() assert player.score == 0 def test_lose_dont_decrement_score_when_it_is_zero(self): player = self.instantiate_player() assert player.score == 0 player.lose() assert player.score == 0 @staticmethod def instantiate_player(): strategy = RandomStrategy() return Player(strategy)
def first(items, condition): return next((i for i in items if condition(i)), None)
import unittest from katas.kyu_7.convert_improper_fraction_to_mixed_numeral import \ convert_to_mixed_numeral class ConvertToMixedNumeralTestCase(unittest.TestCase): def test_equal_1(self): self.assertEqual(convert_to_mixed_numeral('74/3'), '24 2/3') def test_equal_2(self): self.assertEqual(convert_to_mixed_numeral('9999/24'), '416 15/24') def test_equal_3(self): self.assertEqual(convert_to_mixed_numeral('74/30'), '2 14/30') def test_equal_4(self): self.assertEqual(convert_to_mixed_numeral('13/5'), '2 3/5') def test_equal_5(self): self.assertEqual(convert_to_mixed_numeral('5/3'), '1 2/3') def test_equal_6(self): self.assertEqual(convert_to_mixed_numeral('1/1'), '1') def test_equal_7(self): self.assertEqual(convert_to_mixed_numeral('10/10'), '1') def test_equal_8(self): self.assertEqual(convert_to_mixed_numeral('900/10'), '90') def test_equal_9(self): self.assertEqual(convert_to_mixed_numeral('9920/124'), '80') def test_equal_10(self): self.assertEqual(convert_to_mixed_numeral('6/2'), '3') def test_equal_11(self): self.assertEqual(convert_to_mixed_numeral('9/77'), '9/77') def test_equal_12(self): self.assertEqual(convert_to_mixed_numeral('96/100'), '96/100') def test_equal_13(self): self.assertEqual(convert_to_mixed_numeral('12/18'), '12/18') def test_equal_14(self): self.assertEqual(convert_to_mixed_numeral('6/36'), '6/36') def test_equal_15(self): self.assertEqual(convert_to_mixed_numeral('1/18'), '1/18') def test_equal_16(self): self.assertEqual(convert_to_mixed_numeral('-64/8'), '-8') def test_equal_17(self): self.assertEqual(convert_to_mixed_numeral('-6/8'), '-6/8') def test_equal_18(self): self.assertEqual(convert_to_mixed_numeral('-9/78'), '-9/78') def test_equal_19(self): self.assertEqual(convert_to_mixed_numeral('-504/26'), '-19 10/26') def test_equal_20(self): self.assertEqual(convert_to_mixed_numeral('-47/2'), '-23 1/2') def test_equal_21(self): self.assertEqual(convert_to_mixed_numeral('-21511/21'), '-1024 7/21')
#coding:utf8 #数据工具1------------------------------基本画图 import numpy as np import matplotlib.pyplot as plt def f(x): return np.sin(x)+0.5*x x=np.linspace(-2*np.pi,2*np.pi,50) # plt.plot(x,f(x),'b') # plt.grid(True) #---------线性回归 reg=np.polyfit(x,f(x),deg=5) ry=np.polyval(reg,x) plt.plot(x,ry,'b',label='reg') plt.plot(x,f(x),'r.',label='fx') plt.grid(True) plt.show()
import random import numpy as np import networkx as nx import scipy.sparse as sp from numba import njit __all__ = ["RandomWalker", "BiasedRandomWalker", "BiasedRandomWalkerAlias"] @njit def random_choice(arr, p): """Similar to `numpy.random.choice` and it suppors p=option in numba. refer to <https://github.com/numba/numba/issues/2539#issuecomment-507306369> Parameters ---------- arr : 1-D array-like p : 1-D array-like The probabilities associated with each entry in arr Returns ------- samples : ndarray The generated random samples """ return arr[np.searchsorted(np.cumsum(p), np.random.random(), side="right")] class RandomWalker: def __init__(self, walk_length: int = 80, walk_number: int = 10): self.walk_length = walk_length self.walk_number = walk_number def walk(self, graph: sp.csr_matrix): walks = self.random_walk(graph.indices, graph.indptr, walk_length=self.walk_length, walk_number=self.walk_number) return walks @staticmethod @njit def random_walk(indices, indptr, walk_length, walk_number): N = len(indptr) - 1 for _ in range(walk_number): for n in range(N): walk = [n] current_node = n for _ in range(walk_length - 1): neighbors = indices[ indptr[current_node]:indptr[current_node + 1]] if neighbors.size == 0: break current_node = np.random.choice(neighbors) walk.append(current_node) yield walk class BiasedRandomWalker: def __init__(self, walk_length: int = 80, walk_number: int = 10, p: float = 0.5, q: float = 0.5): self.walk_length = walk_length self.walk_number = walk_number try: _ = 1 / p except ZeroDivisionError: raise ValueError("The value of p is too small or zero to be used in 1/p.") self.p = p try: _ = 1 / q except ZeroDivisionError: raise ValueError("The value of q is too small or zero to be used in 1/q.") self.q = q def walk(self, graph: sp.csr_matrix): walks = self.random_walk(graph.indices, graph.indptr, walk_length=self.walk_length, walk_number=self.walk_number, p=self.p, q=self.q) return walks @staticmethod @njit def random_walk(indices, indptr, walk_length, walk_number, p=0.5, q=0.5): N = len(indptr) - 1 for _ in range(walk_number): for n in range(N): walk = [n] current_node = n previous_node = N previous_node_neighbors = np.empty(0, dtype=np.int32) for _ in range(walk_length - 1): neighbors = indices[indptr[current_node]:indptr[current_node + 1]] if neighbors.size == 0: break probability = np.array([1 / q] * neighbors.size) probability[previous_node == neighbors] = 1 / p for i, nbr in enumerate(neighbors): if np.any(nbr == previous_node_neighbors): probability[i] = 1. norm_probability = probability / np.sum(probability) current_node = random_choice(neighbors, norm_probability) walk.append(current_node) previous_node_neighbors = neighbors previous_node = current_node yield walk class BiasedRandomWalkerAlias: def __init__(self, walk_length: int = 80, walk_number: int = 10, p: float = 0.5, q: float = 0.5): self.walk_length = walk_length self.walk_number = walk_number try: _ = 1 / p except ZeroDivisionError: raise ValueError("The value of p is too small or zero to be used in 1/p.") self.p = p try: _ = 1 / q except ZeroDivisionError: raise ValueError("The value of q is too small or zero to be used in 1/q.") self.q = q def walk(self, graph: sp.csr_matrix): graph = nx.from_scipy_sparse_matrix(graph, create_using=nx.DiGraph) self.preprocess_transition_probs(graph) walks = self.random_walk(graph, self.alias_nodes, self.alias_edges, walk_length=self.walk_length, walk_number=self.walk_number) return walks @staticmethod def random_walk(graph, alias_nodes, alias_edges, walk_length=80, walk_number=10): for _ in range(walk_number): for n in graph.nodes(): walk = [n] current_node = n for _ in range(walk_length - 1): neighbors = list(graph.neighbors(current_node)) if len(neighbors) > 0: if len(walk) == 1: current_node = neighbors[alias_sample( alias_nodes[current_node][0], alias_nodes[current_node][1])] else: prev = walk[-2] edge = (prev, current_node) current_node = neighbors[alias_sample( alias_edges[edge][0], alias_edges[edge][1])] else: break walk.append(current_node) yield walk def get_alias_edge(self, graph, t, v): p = self.p q = self.q unnormalized_probs = [] for x in graph.neighbors(v): weight = graph[v][x].get('weight', 1.0) # w_vx if x == t: # d_tx == 0 unnormalized_probs.append(weight / p) elif graph.has_edge(x, t): # d_tx == 1 unnormalized_probs.append(weight) else: # d_tx > 1 unnormalized_probs.append(weight / q) norm_const = sum(unnormalized_probs) normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs] return create_alias_table(normalized_probs) def preprocess_transition_probs(self, graph): alias_nodes = {} for node in graph.nodes(): unnormalized_probs = [graph[node][nbr].get('weight', 1.0) for nbr in graph.neighbors(node)] norm_const = sum(unnormalized_probs) normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs] alias_nodes[node] = create_alias_table(normalized_probs) alias_edges = {} for edge in graph.edges(): alias_edges[edge] = self.get_alias_edge(graph, edge[0], edge[1]) self.alias_nodes = alias_nodes self.alias_edges = alias_edges def create_alias_table(area_ratio): l = len(area_ratio) accept, alias = [0] * l, [0] * l small, large = [], [] area_ratio_ = np.array(area_ratio) * l for i, prob in enumerate(area_ratio_): if prob < 1.0: small.append(i) else: large.append(i) while small and large: small_idx, large_idx = small.pop(), large.pop() accept[small_idx] = area_ratio_[small_idx] alias[small_idx] = large_idx area_ratio_[large_idx] = area_ratio_[large_idx] - (1 - area_ratio_[small_idx]) if area_ratio_[large_idx] < 1.0: small.append(large_idx) else: large.append(large_idx) while large: large_idx = large.pop() accept[large_idx] = 1 while small: small_idx = small.pop() accept[small_idx] = 1 return accept, alias def alias_sample(accept, alias): N = len(accept) i = int(random.random() * N) r = random.random() if r < accept[i]: return i else: return alias[i]
from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf def main(): mnist = input_data.read_data_sets('MNIST_data', one_hot=True) sess = tf.InteractiveSession() x = tf.placeholder(tf.float32, shape=[None, 784]) y_ = tf.placeholder(tf.float32, shape=[None, 10]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) sess.run(tf.global_variables_initializer()) y = tf.matmul(x, W) + b cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) return if __name__ == '__main__': main()
import streamlit as st import sys, os from pathlib import Path from PIL import Image import warnings warnings.filterwarnings("ignore") from omegaconf import DictConfig, OmegaConf from app_utils import parse_yaml, inference ROOT = Path(os.getcwd()).parent # ~/src ### app contents ### st.title('Welcome to my app!') st.markdown("### when you uplaod image, ssd inferenced result are shown") ### sidebar settings exps = os.listdir("../experiments/") exps = [exp for exp in exps if exp not in ["make_ex.sh", "_template"]] st.sidebar.markdown("# settings") # 1. ###### uploaded_image = st.sidebar.file_uploader("1. upload your image") if uploaded_image is not None: image = Image.open(uploaded_image) st.image(image, use_column_width=True) # 2. ###### selected_exp = st.sidebar.selectbox("2. select experiment you try", exps) if selected_exp is not None: infer_params = parse_yaml(ROOT / "experiments" / selected_exp / "config.yml") st.sidebar.write(infer_params["exp_param"]["description"]) if not os.path.exists(ROOT / "experiments" / selected_exp / "weight"): st.sidebar.markdown("`whoops!` :disappointed_relieved:") st.sidebar.markdown("`model weight doesn't exist.`") st.sidebar.markdown("`you may not have experiment yet.`") # 3. ###### image_size = st.sidebar.radio("3. decide image size (300 is better result)", [300, 512]) # 4. ###### data_confidence_level = st.sidebar.slider('4. set confidence level (default is 0.9)', min_value=0.0, max_value=1.0, step=0.01, value=0.9) st.sidebar.write("Are you ready?") do_inference = st.sidebar.button('do inference') # Infer if do_inference and uploaded_image is not None: inferenced_image = inference( cfg=OmegaConf.create(infer_params), ROOT=ROOT, image=image, selected_exp=selected_exp, data_confidence_level=data_confidence_level, image_size=image_size, ) inferenced_image = Image.open("img.png") st.image(inferenced_image, use_column_width=True)
from .utils import get_value, missing_ class LogicParseException(Exception): pass class TLogic: """ 模板逻辑模块基类 """ def parse(self, render_, **kwargs) -> list: raise NotImplementedError() @property def __dict__(self): return '<{0}>'.format(self.__class__.__name__) class THolder(TLogic): """ 模板占位符 usage: template = { 'user': { 'id': THolder('user.id') } } render(template, user=dict(id=1)) # {'user': {'id': 1}} """ def __init__(self, key, default=missing_): assert isinstance(key, str) and len(key), 'key must be string' self.key = key self.default = default def parse(self, render_, **kwargs): return get_value(self.key, kwargs, self.default) @property def __dict__(self): return '<{0}>'.format(self.key) class TFor(TLogic): """ 循环逻辑 usage: template = { 'events': TFor({ 'id': THolder('event.id') }, 'events', prefix='event') } render(template, events=[dict(id=1), dict(id=2)]) # {'events': [{'id': 1}, {'id': 2}] """ def __init__(self, template, key: str, prefix='for', getter=None): """ 初始化一个循环逻辑模板 :param template: 遍历内容的输出模板 :param key: 对应可遍历的参数的key :param prefix: 输出模板中的对象前缀 :param getter: 可指定遍历内容的某个属性或处理函数,为None则为遍历内容自身 """ self.template = template self.key = key self.prefix = prefix self.getter = getter def parse(self, render_, **kwargs) -> list: data = get_value(self.key, kwargs) assert isinstance(data, (list, tuple)), 'TFor required a iterable data' # output = [] # for d in data: # result = render_(self.template, **{self.prefix: d}, **kwargs) # output.append(result) # # return output return [render_(self.template, **{self.prefix: self.getter(d) if self.getter else d}, **kwargs) for d in data] @property def __dict__(self): return [self.template] class TEnum(TLogic): def __init__(self, key, enum: tuple, default=missing_): assert isinstance(key, str) and len(key), 'key must be string' assert isinstance(enum, (tuple, list)), 'enum must be iterable' self.enum = enum self.key = key self.default = default def parse(self, render_, **kwargs): value = get_value(self.key, source=kwargs) if value not in self.enum: raise LogicParseException( 'value "{0}" is not in "{1}"'.format(value, self.enum)) return value @property def __dict__(self): return '|'.join(map(lambda o: str(o), self.enum)) class TDict(TLogic): def __init__(self, template, show_key, key, prefix='dict', default=missing_): self.show_key = show_key self.template = template self.key = key self.prefix = prefix def parse(self, render_, **kwargs): value = get_value(self.key, source=kwargs) assert isinstance(value, dict), 'TDict required a dict type value' return {k: render_(self.template, **{self.prefix: v}, **kwargs) for k, v in value.items()} @property def __dict__(self): return {self.show_key: self.template}
# -*- coding: utf-8 -*- """Global pytest fixtures.""" import pytest from fastapi.testclient import TestClient from plsqldecoder import create_app @pytest.fixture def app(): app = create_app("testing") return app @pytest.fixture def client(app): client = TestClient(app) return client
import numpy as np class PriorBoxManager(object): def __init__(self, prior_boxes, num_classes, background_id=0, overlap_threshold=.5): self.prior_boxes = prior_boxes self.num_priors = self.prior_boxes.shape[0] self.num_classes = num_classes self.overlap_threshold = overlap_threshold self.background_id = background_id def _calculate_intersection_over_unions(self, ground_truth_data): ground_truth_x_min = ground_truth_data[0] ground_truth_y_min = ground_truth_data[1] ground_truth_x_max = ground_truth_data[2] ground_truth_y_max = ground_truth_data[3] prior_boxes_x_min = self.prior_boxes[:, 0] prior_boxes_y_min = self.prior_boxes[:, 1] prior_boxes_x_max = self.prior_boxes[:, 2] prior_boxes_y_max = self.prior_boxes[:, 3] # calculating the intersection intersections_x_min = np.maximum(prior_boxes_x_min, ground_truth_x_min) intersections_y_min = np.maximum(prior_boxes_y_min, ground_truth_y_min) intersections_x_max = np.minimum(prior_boxes_x_max, ground_truth_x_max) intersections_y_max = np.minimum(prior_boxes_y_max, ground_truth_y_max) intersected_widths = intersections_x_max - intersections_x_min intersected_heights = intersections_y_max - intersections_y_min intersected_widths = np.maximum(intersected_widths, 0) intersected_heights = np.maximum(intersected_heights, 0) intersections = intersected_widths * intersected_heights # calculating the union prior_box_widths = prior_boxes_x_max - prior_boxes_x_min prior_box_heights = prior_boxes_y_max - prior_boxes_y_min prior_box_areas = prior_box_widths * prior_box_heights ground_truth_width = ground_truth_x_max - ground_truth_x_min ground_truth_height = ground_truth_y_max - ground_truth_y_min ground_truth_area = ground_truth_width * ground_truth_height unions = prior_box_areas + ground_truth_area - intersections intersection_over_unions = intersections / unions return intersection_over_unions def _assign_boxes_to_object(self, ground_truth_box, return_iou=True): ious = self._calculate_intersection_over_unions(ground_truth_box) assigned_boxes = np.zeros((self.num_priors, 4 + return_iou)) assign_mask = ious > self.overlap_threshold if not assign_mask.any(): assign_mask[ious.argmax()] = True if return_iou: assigned_boxes[:, -1][assign_mask] = ious[assign_mask] assigned_to_object_boxes = self.prior_boxes[assign_mask] assigned_boxes[assign_mask, 0:4] = assigned_to_object_boxes return assigned_boxes.ravel() def assign_boxes(self, ground_truth_data): assignments = np.zeros((self.num_priors, 4 + self.num_classes)) assignments[:, 4 + self.background_id] = 1.0 num_objects_in_image = len(ground_truth_data) if num_objects_in_image == 0: return assignments assigned_boxes = np.apply_along_axis(self._assign_boxes_to_object, 1, ground_truth_data[:, :4]) assigned_boxes = assigned_boxes.reshape(-1, self.num_priors, 5) best_iou = assigned_boxes[:, :, -1].max(axis=0) best_iou_indices = assigned_boxes[:, :, -1].argmax(axis=0) best_iou_mask = best_iou > 0 best_iou_indices = best_iou_indices[best_iou_mask] num_assigned_boxes = len(best_iou_indices) assigned_boxes = assigned_boxes[:, best_iou_mask, :] assignments[best_iou_mask, :4] = assigned_boxes[best_iou_indices, np.arange(num_assigned_boxes), :4] assignments[:, 4][best_iou_mask] = 0 # check the following two lines background_mask = np.logical_not(best_iou) assignments[:, :4][background_mask] = self.prior_boxes[background_mask, :4] assignments[:, 5:][best_iou_mask] = ground_truth_data[best_iou_indices, 5:] return assignments if __name__ == "__main__": import random from prior_box_creator import PriorBoxCreator from XML_parser import XMLParser data_path = '../datasets/german_open_dataset/annotations/' data_manager = XMLParser(data_path) class_names = data_manager.class_names num_classes = len(class_names) print('Found classes: \n', class_names) ground_truth_data = data_manager.get_data() sampled_key = random.choice(list(ground_truth_data.keys())) sampled_data = ground_truth_data[sampled_key] prior_box_creator = PriorBoxCreator(image_shape=(300, 300)) prior_boxes = prior_box_creator.create_boxes() prior_box_manager = PriorBoxManager(prior_boxes, num_classes) assigned_boxes = prior_box_manager.assign_boxes(sampled_data) print('assigned_boxes: \n', assigned_boxes) object_mask = assigned_boxes[:, 4] != 1 object_data = assigned_boxes[object_mask] print('object_data: \n', object_data) print('Number of box assigned to different objects:', len(object_data))
from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column,Integer,String,DATE,Enum,ForeignKey from sqlalchemy.orm import sessionmaker,relationship engine = create_engine("mysql+pymysql://root:123456@192.168.235.131/oldmandb", encoding='utf-8',echo = False) Base = declarative_base() class Customer(Base): __tablename__ = 'customer' id = Column(Integer,primary_key=True) name = Column(String(64)) billing_address_id = Column(Integer,ForeignKey("address.id")) shipping_address_id = Column(Integer,ForeignKey("address.id")) billing_address = relationship('Address',foreign_keys=[billing_address_id]) shipping_address = relationship('Address',foreign_keys=[shipping_address_id]) def __repr__(self): return self.name pass class Address(Base): __tablename__ = 'address' id = Column(Integer,primary_key=True) street = Column(String(64)) city = Column(String(64)) state = Column(String(64)) def __repr__(self): return self.street pass Base.metadata.create_all(engine)
from rtypes.types.pcc_set import pcc_set from rtypes.attributes import dimension, primarykey, merge
''' Dictionary have braces { & } Lists have square braces [ & ] Tuples have curly braces ( & ) ''' myList = [1, 1.1, "String"] print(myList) print(type(myList)) ''' List even can contain list inside and tuples also ''' modifiedList = [myList, "Basic", (0, 2, 4)] print(modifiedList) print(modifiedList[0]) print(modifiedList[0][2]) print(modifiedList[1]) print(modifiedList[2][2]) modifiedList.append("Added New") print(modifiedList) modifiedList.insert(1, [1, 0, -1]) print(modifiedList)
numbers = [1948, 957, 423, 65, 350] even_numbers = [x for x in numbers if x % 2==0] print("Even numbers {}".format(even_numbers)) multiplied_by_10 = [x * 10 for x in numbers] print("Multiplied by 10 {}".format(multiplied_by_10)) squares = {x: x*x for x in numbers} print("Squares {}".format(squares))
from Task904 import docprob def classify(bayes, item): max_prob = None max_cat = None dict_cat_prob = dict() for category in bayes.class_count: prob = docprob(bayes, item, category) dict_cat_prob[category] = prob sort_list_cat_prob = sorted(dict_cat_prob, key=dict_cat_prob.get, reverse=True) return sort_list_cat_prob[:3]
from django.core.exceptions import ValidationError from django.db import models from pycont.utils import PycontModel from pycont.apps.accounts.models import Account CURRENCIES = { 'USD': 'United States Dollars', 'EUR': 'Euros', } class Transaction(PycontModel): amount = models.DecimalField( decimal_places=2, blank=False, max_digits=30 # Should work for Venezuela's national debt in Bolivar ) currency = models.CharField( choices=CURRENCIES.items(), default='EUR', blank=False, max_length=3, ) emitter = models.ForeignKey( Account, blank=True, null=True, on_delete=models.SET_NULL, related_name='emitted_transactions' ) receiver = models.ForeignKey( Account, blank=True, null=True, on_delete=models.SET_NULL, related_name='received_transactions' ) def clean(self): if self.emitter is None and self.receiver is None: error = 'At least one of emitter/receiver has to be provided' raise ValidationError({ 'emitter': error, 'receiver': error }) def save(self, *args, **kwargs): self.full_clean() return super().save(*args, **kwargs)
from battle.round.Round import Round from battle.battlemenu.BattleMenu import BattleMenu from operator import attrgetter from ui.UI import UI from ui.PartyRender import PartyRender import random # represents a battle class Battle: def __init__(self, party, monsters): self.party = party self.monsters = monsters def print_battlefield(self): PartyRender(self.monsters).display_party() self.print_field() PartyRender(self.party).display_party() #TODO: Print ascii art dependent on where the battle happens def print_field(self): UI().show_text("\n\n\n") # the main loop for fights def fight(self): win = False lose = False while not win | lose: UI().show_text("New Round!") self.print_battlefield() targets = {'ally': self.party.get_actionable_members(), 'enemy': self.monsters.get_actionable_members()} actions = BattleMenu(targets).begin_menu_selection() actions.extend(self.get_random_monster_actions()) actions = self.sort_actions_by_priority(actions) new_round = Round(actions) new_round.process_round_actions() if len(self.party.get_actionable_members()) == 0: lose = True elif len(self.monsters.get_actionable_members()) == 0: win = True UI().show_text("\n\n") if win: UI().show_text("You've won the battle!!") elif lose: UI().show_text("You've lost the battle...") def sort_actions_by_priority(self, actions): actions = sorted(actions, key=attrgetter('priority'), reverse=True) return actions def get_random_monster_actions(self): actions = [] '''for member in self.party.get_actionable_members(): action = member.create_round_action(random.choice(self.monsters.party)) # get rand actions.append(action)''' # TODO: write monster AI. for monster in self.monsters.get_actionable_members(): random_target = random.choice(self.party.party) action = monster.create_round_action([random_target]) actions.append(action) return actions
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('trade', '0002_auto_20170502_1720'), ] operations = [ migrations.AlterField( model_name='industry', name='reportfile', field=models.FileField(upload_to=b'./upload'), ), ]
import os from setuptools import find_packages from setuptools import setup version = '0.0.1' here = os.path.abspath(os.path.dirname(__file__)) setup( name="Braindecode", version=version, description="A library to decode brain signals, for now from EEG.", classifiers=[ "Development Status :: 1 - Alpha", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Programming Language :: Python :: 2.7", "Topic :: Scientific/Engineering :: Artificial Intelligence :: Brain State Decoding", ], keywords="", author="Robin Tibor Schirrmeister", author_email="robintibor@googlegroups.com", url="https://github.com/robintibor/braindecode", packages=find_packages(), include_package_data=False, zip_safe=False, )
from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.test import TestCase from django.test.client import RequestFactory from django.views.generic import View from guardian.view_mixins import PermissionRequiredMixin class DatabaseRemovedError(Exception): pass class RemoveDatabaseView(View): def get(self, request, *args, **kwargs): raise DatabaseRemovedError("You've just allowed db to be removed!") class TestView(PermissionRequiredMixin, RemoveDatabaseView): permission_required = 'contenttypes.change_contenttype' object = None # should be set at each tests explicitly class TestViewMixins(TestCase): def setUp(self): self.ctype = ContentType.objects.create(name='foo', model='bar', app_label='fake-for-guardian-tests') self.factory = RequestFactory() self.user = User.objects.create_user('joe', 'joe@doe.com', 'doe') self.client.login(username='joe', password='doe') def test_permission_is_checked_before_view_is_computed(self): """ This test would fail if permission is checked **after** view is actually resolved. """ request = self.factory.get('/') request.user = self.user view = TestView.as_view(object=self.ctype) response = view(request) self.assertEqual(response.status_code, 403)
# Importing Unit Testing Module import unittest # Importing functions which need to be tested from function_test import check_input_blackjack, deck, listed, draw class TestMethods(unittest.TestCase): def test_input_players(self): self.assertRaises(ValueError, check_input_blackjack, -1, 1) def test_input_decks(self): self.assertRaises(ValueError, check_input_blackjack, 1, 0) def test_deck(self): self.assertEqual(deck()[0], 0) def test_listed(self): self.assertEqual(listed()[0,0], '1') self.assertEqual(listed()[0,1], 'Hearts') def test_draw(self): # checking that the length of the deck is reduced by one self.assertEqual(len(draw()[1]), 51) # checking that the first element of the deck is removed self.assertNotEqual(draw()[1][0], 0) #checking the new first element of the deck self.assertEqual(draw()[1][0], 1) if __name__ == '__main__': unittest.main()
#!/usr/bin/env python import sys import numpy as np import pandas as pd import codecs from scipy.stats import spearmanr from nicemodel import load_labels def row_norm(a): row_sums = a.sum(axis=1) return a / row_sums[:, np.newaxis] def col_norm(a): col_sums = a.sum(axis=0) return a / col_sums model_file = sys.argv[1] comp_file = '/home/01813/roller/tmp/imsgrounded/data/comp/comp-values_all_sorted.tsv' target_labels_file = '/scratch/01813/roller/corpora/webko/TermDoc/target-labels.txt' vocab_labels = load_labels(target_labels_file) vocab_labels = {w : i for i, w in vocab_labels.iteritems()} from onlineldavb import dirichlet_expectation_2 phi = np.ascontiguousarray(np.load(model_file)['phi']) #phi = np.exp(dirichlet_expectation_2(phi)) topic_normed = col_norm(phi) word_normed = row_norm(phi) comp_tab = pd.read_table(comp_file, encoding='utf-8') comp_tab = comp_tab[comp_tab['const'] != comp_tab['compound']] compound = [] const = [] ratings = [] w2givenw1 = [] w1givenw2 = [] for i, row in comp_tab.iterrows(): try: cmpd_id = vocab_labels[row['compound'] + '/NN'] const_id = vocab_labels[row['const'] + '/NN'] except KeyError: pass compound.append(row['compound']) const.append(row['const']) top_given_w1 = word_normed[:,cmpd_id] w2_given_top = topic_normed[:,const_id] top_given_w2 = word_normed[:,const_id] w1_given_top = topic_normed[:,cmpd_id] ratings.append(row['mean']) w2givenw1.append(np.dot(top_given_w1, w2_given_top)) w1givenw2.append(np.dot(top_given_w2, w1_given_top)) disp_tab = pd.DataFrame(dict(compound=compound, const=const, ratings=ratings, w2givenw1=w2givenw1, w1givenw2=w1givenw2)) #print disp_tab.sort('ratings').to_string() #disp_tab.to_csv(sys.argv[2], index=False, encoding='utf-8') print print "shape =", disp_tab.shape print "cpmd|const =", spearmanr(ratings, w1givenw2) print "const|cmpd =", spearmanr(ratings, w2givenw1)
from django.db import models from django.contrib.auth.models import User class MyUser(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) national_id = models.IntegerField(unique=True, blank=True) # address country = models.CharField(max_length=20) city = models.CharField(max_length=20) address = models.CharField(max_length=200) postal_code = models.IntegerField() phone_number = models.IntegerField() def __str__(self): return self.user.first_name + " " + self.user.last_name
class Queue(): def __init__(self): self.queue = [] def enqueue(self, value): self.queue.append(value) def dequeue(self): if self.size() > 0: return self.queue.pop(0) else: return None def size(self): return len(self.queue) class Graph: """Represent a graph as a dictionary of vertices mapping labels to edges.""" def __init__(self): self.vertices = {} def add_vertex(self, vertex_id): if vertex_id not in self.vertices: self.vertices[vertex_id] = set() def add_edge(self, v1, v2): if v1 in self.vertices and v2 in self.vertices: self.vertices[v1].add(v2) else: raise IndexError("That vertex does not exist!") def earliest_ancestor(ancestors, starting_node): # Build the graph graph = Graph() for pair in ancestors: graph.add_vertex(pair[0]) graph.add_vertex(pair[1]) # Build edges in reverse graph.add_edge(pair[1], pair[0]) # Do a BFS (storing the path) q = Queue() q.enqueue([starting_node]) max_path_len = 1 earliest_ancestor = -1 while q.size() > 0: path = q.dequeue() v = path[-1] # If the path is longer or equal and the value is smaller, or if the path is longer) if (len(path) >= max_path_len and v < earliest_ancestor) or (len(path) > max_path_len): earliest_ancestor = v max_path_len = len(path) for neighbor in graph.vertices[v]: path_copy = list(path) path_copy.append(neighbor) q.enqueue(path_copy) return earliest_ancestor # class Queue(): # def __init__(self): # self.queue = [] # def enqueue(self, value): # self.queue.append(value) # def dequeue(self): # if self.size() > 0: # return self.queue.pop(0) # else: # return None # def size(self): # return len(self.queue) # class Graph: # """Represent a graph as a dictionary of vertices mapping labels to edges.""" # def __init__(self): # self.vertices = {} # def add_vertex(self, vertex_id): # """ # Add a vertex to the graph. # """ # self.vertices[vertex_id] = set() # def add_edge(self, v1, v2): # """ # Add a directed edge to the graph. # """ # if v1 in self.vertices and v2 in self.vertices: # self.vertices[v1].add(v2) # else: # raise IndexError("nonexistent vertex") # def get_neighbors(self, vertex_id): # """ # Get all neighbors (edges) of a vertex. # """ # return self.vertices[vertex_id] # def earliest_ancestor(ancestors, starting_node): # g = Graph() # for pair in ancestors: # g.add_vertex(pair[0]) # add parent node # g.add_vertex(pair[1]) # add child node # g.add_edge(pair[1], pair[0]) # connect parent and child node # print(g.vertices) # q = Queue() # q.enqueue([starting_node]) # oldest_ancestor = -1 # this is what is returned if node has no parents # max_path_length = 1 # while q.size() > 0: # path = q.dequeue() # vertex = path[-1] #[-1] = the last position in the list # # If there is more than one ancestor tied for "earliest", return the one # with the lowest numeric ID # # If the input individual has no parents, function returns 1 # if len(path) >= max_path_length and vertex < oldest_ancestor: # oldest_ancestor = vertex # max_path_length = len(path) # elif len(path) > max_path_length: # oldest_ancestor = vertex # max_path_length = len(path) # for neighbor in g.vertices[vertex]: # path_copy = list(path) # path_copy.append(neighbor) # q.enqueue(path_copy) # return oldest_ancestor # def earliest_ancestor(ancestors, starting_node): # # Build the graph # graph = Graph() # for pair in ancestors: # graph.add_vertex(pair[0]) # graph.add_vertex(pair[1]) # # Build edges in reverse # graph.add_edge(pair[1], pair[0]) # # Do a BFS (storing the path) # q = Queue() # q.enqueue([starting_node]) # max_path_len = 1 # earliest_ancestor = -1 # while q.size() > 0: # path = q.dequeue() # v = path[-1] # # If the path is longer or equal and the value is smaller, or if the path is longer) # if (len(path) >= max_path_len and v < earliest_ancestor) or (len(path) > max_path_len): # earliest_ancestor = v # max_path_len = len(path) # for neighbor in graph.vertices[v]: # path_copy = list(path) # path_copy.append(neighbor) # q.enqueue(path_copy) # return earliest_ancestor # return path # if v not in visited: # visited.add(v) # for neighbor in self.vertices[v]: # path_copy = list(path) # path_copy.append(neighbor) # q.enqueue(path_copy) # create a graph # take a starting node and see which node is the furthest away # bfs # if count is the same for two items return the smaller value # take in length of paths. choose longest path. assign last node as == last. # return last of longest path/paths
#!/usr/bin/env python3.7 """ """ from __future__ import annotations from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler from dataclasses import dataclass, field from typing import List, Dict, Tuple, Union from pathlib import Path from datetime import datetime, timedelta from threading import Event, Timer, Lock from contextlib import contextmanager from collections import defaultdict import subprocess import pkgutil import json from .util import continue_task_from_header from eliot import ( to_file, start_action, log_message, preserve_context, add_destinations, ) try: from eliot.journald import JournaldDestination has_journald = True except ImportError: has_journald = False from jinja2 import Template _g_clients: Dict[Client.name, Client] = None _g_lock: Lock = None _g_counts: Dict[str, int] = None @dataclass class Client: name: str env: Dict[str, str] timer: Timer event: Event body: bytes @contextmanager def lock(): log_message('Acquire lock') with _g_lock: yield def remove_client(client): with start_action(action_type='Remove client', name=client.name) as action: name = client.name with lock(): del _g_clients[name] def start_timer(client, interval=120.0): with start_action(action_type='Start timer', name=client.name, interval=interval): timer = Timer(interval, preserve_context(remove_client), args=(client,)) client.timer = timer timer.start() def cancel_timer(client): with start_action(action_type='Cancel timer', name=client.name) as action: timer = client.timer timer.cancel() class RequestHandler(SimpleHTTPRequestHandler): protocol_version = 'HTTP/1.1' def do_GET(self): if self.path == '/': content = pkgutil.get_data('swarmrunner', 'static/index.html') self.send('text/html', content) elif self.path == '/static/main.js': content = pkgutil.get_data('swarmrunner', 'static/main.js') self.send('application/javascript', content) elif self.path == '/favicon.ico': content = pkgutil.get_data('swarmrunner', 'static/favicon.ico') self.send('image/x-icon', content) elif self.path.startswith('/listen/'): self.do_GET_listen() elif self.path == '/clients/': self.do_GET_clients() else: print('GET', self.path) raise NotImplementedError def do_GET_clients(self): "GET /clients/" _, _clients, _2 = self.path.split('/') assert _ == '' assert _clients == 'clients' assert _2 == '' clients = _g_clients with start_action(action_type='GET /clients/') as context: with lock(): data = { 'clients': [ { 'name': c.name, 'env': json.loads(c.env), } for c in clients.values() ], } content = json.dumps(data).encode('utf-8') self.send('application/json', content) @continue_task_from_header(action_type='Server') def do_GET_listen(self): "GET /listen/:name" _, _listen, name = self.path.split('/') assert _ == '' assert _listen == 'listen' with start_action(action_type='GET /listen/:name', name=name) as context: with lock(): client = _g_clients[name] cancel_timer(client) start_timer(client) timeout = 60 # seconds context.log('Waiting for event', timeout=timeout) event = client.event was_set = event.wait(timeout=timeout) if not was_set: context.log('Event not set during timeout') self.send('text/plain', b'timeout\r\n', response=408) return event.clear() body = client.body self.send('text/plain', body) def do_POST(self): length = self.headers['content-length'] nbytes = int(length) data = self.rfile.read(nbytes) # throw away extra data? see Lib/http/server.py:1203-1205 self.data = data if self.path.startswith('/register/'): self.do_POST_register() elif self.path.startswith('/send/'): self.do_POST_send() elif self.path.startswith('/count/'): self.do_POST_count() elif self.path.startswith('/create/'): self.do_POST_create() elif self.path == '/killall/': self.do_POST_killall() else: print('POST', self.path) raise NotImplementedError @continue_task_from_header(action_type='Server') def do_POST_register(self): "POST /register/:name" _, _register, name = self.path.split('/') assert _ == '' assert _register == 'register' assert name != '' with start_action(action_type='POST /register/:name', name=name) as context: env = self.data.decode('utf-8') timer = None event = Event() body = None with lock(): if name in _g_clients: context.log('client already exists with name', name=name) self.send('text/plain', b'client already exists with name', response=409) return client = Client(name, env, timer, event, body) _g_clients[name] = client start_timer(client) self.send('text/plain', b'ok\r\n') @continue_task_from_header(action_type='Server') def do_POST_send(self): "POST /send/:name" _, _send, name = self.path.split('/') assert _ == '' assert _send == 'send' assert name != '' with start_action(action_type='POST /send/:name', name=name) as context: body = self.data with lock(): client = _g_clients[name] event = client.event client.body = body event.set() self.send('text/plain', b'ok\r\n') @continue_task_from_header(action_type='Server') def do_POST_count(self): "POST /count/:id" _, _count, id = self.path.split('/') assert _ == '' assert _count == 'count' assert id != '' counts = _g_counts with start_action(action_type='POST /count/:id', id=id) as context: with lock(): count = counts[id] counts[id] = count + 1 self.send('text/plain', b'%d' % (count,)) @continue_task_from_header(action_type='Server') def do_POST_create(self): "POST /count/:type" _, _create, type = self.path.split('/') assert _ == '' assert _create == 'create' assert type != '' count = int(self.data) assert 0 < count < 16 with start_action(action_type='POST /create/:type', type=type) as context: with lock(): user_data = pkgutil.get_data('swarmrunner', 'scripts/aws-user-data.sh') args = [ 'aws', 'ec2', 'run-instances', '--image-id', 'ami-09aeadf521cc24feb', '--count', f'{count}', '--instance-type', f'{type}', '--key-name', 'Accona', '--subnet-id', 'subnet-250d584d', '--security-group-ids', 'sg-0a0349d5d30aff8a7', '--user-data', user_data, ] kwargs = { 'args': args, 'cwd': '/tmp', 'capture_output': True, 'check': True, } with start_action(action_type='subprocess.run', **kwargs) as action: process = subprocess.run(**kwargs) action.add_success_fields(stdout=process.stdout, stderr=process.stderr) self.send('text/plain', b'ok\r\n') @continue_task_from_header(action_type='Server') def do_POST_killall(self): "POST /killall/" _, _killall, _2 = self.path.split('/') assert _ == '' assert _killall == 'killall' assert _2 == '' with start_action(action_type='POST /killall/') as context: with lock(): script = pkgutil.get_data('swarmrunner', 'scripts/aws-kill-all-instances.sh') args = [ 'bash', '-c', script, 'aws-kill-all-instance.sh', ] kwargs = { 'args': args, 'cwd': '/tmp', 'capture_output': True, 'check': True, } with start_action(action_type='subprocess.run', **kwargs) as action: process = subprocess.run(**kwargs) action.add_success_fields(stdout=process.stdout, stderr=process.stderr) self.send('text/plain', b'ok\r\n') def send(self, content_type, content, *, response=200): use_keep_alive = self._should_use_keep_alive() use_gzip = self._should_use_gzip() if use_gzip: import gzip content = gzip.compress(content) self.send_response(response) self.send_header('Content-Type', content_type) self.send_header('Content-Length', str(len(content))) if use_keep_alive: self.send_header('Connection', 'keep-alive') if use_gzip: self.send_header('Content-Encoding', 'gzip') self.end_headers() self.wfile.write(content) def _should_use_keep_alive(self): connection = self.headers['connection'] if connection is None: return False if connection != 'keep-alive': return False return True def _should_use_gzip(self): accept_encoding = self.headers['accept-encoding'] if accept_encoding is None: return False if 'gzip' not in accept_encoding: return False return True def main(bind, port, logfile, journald): to_file(open(logfile, 'ab')) print((datetime.utcnow() - timedelta(seconds=5)).isoformat()) if journald: if has_journald: dest = JournaldDestination() dest._identifier = b'swarmrunner' add_destinations(dest) else: log_message("Requested journald support, but it's not available") clients = {} lock = Lock() counts = defaultdict(int) global _g_clients _g_clients = clients global _g_lock _g_lock = lock global _g_counts _g_counts = counts address = (bind, port) print(f'Listening on {address}') server = ThreadingHTTPServer(address, RequestHandler) server.serve_forever() def cli(args=None): import argparse parser = argparse.ArgumentParser() parser.add_argument('--bind', default='') parser.add_argument('--port', type=int, default=8800) parser.add_argument('--logfile', type=Path, default=Path.cwd() / 'log-server.txt') parser.add_argument('--journald', action='store_true') args = vars(parser.parse_args(args)) main(**args) if __name__ == '__main__': cli()
# -*- coding:UTF-8 -*- ''' ''' #-- URL_BASE = 'http://www.coay.com' URL_LOGIN = 'http://www.coay.com/login.php' URL_LOGOUT = "http://www.coay.com/logout.php"
# coding=utf-8 import sys # init datas class Data: def __init__(self): pass weather_dict = {'CLEAR_DAY': {'name': '晴天', 'icon': 'assets/sun-3.png'}, 'CLEAR_NIGHT': {'name': '晴夜', 'icon': 'assets/moon-1.png'}, 'PARTLY_CLOUDY_DAY': {'name': '多云', 'icon': 'assets/cloudy.png'}, 'PARTLY_CLOUDY_NIGHT': {'name': '多云', 'icon': 'assets/cloudy-night.png'}, 'CLOUDY': {'name': '阴', 'icon': 'assets/cloud.png'}, 'RAIN': {'name': '雨', 'icon': 'assets/rain-1.png'}, 'SNOW': {'name': '雪', 'icon': 'assets/snow.png'}, 'WIND': {'name': '风', 'icon': 'assets/windy.png'}, 'FOG': {'name': '雾', 'icon': 'assets/fogg.png'}, 'HAZE': {'name': '霾'}, 'SLEET': {'name': '冻雨'}} @staticmethod def get_wind_direction(wd): if wd <= 22.5 or wd > 337.5: return '北风' elif 22.5 < wd <= 67.5: return '东北风' elif 67.5 < wd <= 112.5: return '东风' elif 112.5 < wd <= 157.5: return '东南风' elif 157.5 < wd <= 202.5: return '南风' elif 202.5 < wd <= 247.5: return '西南风' elif 247.5 < wd <= 292.5: return '西风' elif 292.5 < wd <= 337.5: return '西北风' @staticmethod def get_wind_speed(ws): if ws <= 2: return '无风' if 2 < ws <= 6: return '软风' elif 6 < ws <= 12: return '轻风' elif 12 < ws <= 19: return '缓风' elif 19 < ws <= 30: return '和风' elif 30 < ws <= 40: return '清风' elif 40 < ws <= 51: return '强风' elif 51 < ws <= 62: return '疾风' elif 62 < ws <= 75: return '烈风' elif 75 < ws <= 87: return '增强烈风' elif 87 < ws <= 103: return '暴风' elif 103 < ws <= 149: return '台风' elif 149 < ws <= 183: return '强台飓风' elif 183 < ws <= 220: return '超强台飓风' else: return '极强台飓风'