text
stringlengths
8
6.05M
import sys import os dir_path = str(os.path.dirname(os.path.realpath(__file__))) dir_path = dir_path[:-7] print(dir_path) sys.path.insert(0, dir_path) os.environ['DJANGO_SETTINGS_MODULE'] = 'FAM.settings' from django.conf import settings import django django.setup() from sources.models import StockExchange, CryptoExchange """ Add all stock exchanges to StockExchange Database """ def add_stockExchange(name, code): exchange = StockExchange() exchange.name = name exchange.code = code exchange.save() stock_exchanges = StockExchange.objects.all() exchanges = list((item.name for item in stock_exchanges)) if "NYSE MKT" not in exchanges: add_stockExchange("NYSE MKT","A") if "NASDAQ OMX BX" not in exchanges: add_stockExchange("NASDAQ OMX BX","B") if "National Stock Exchange" not in exchanges: add_stockExchange("National Stock Exchange","C") if "FINRA ADF" not in exchanges: add_stockExchange("FINRA ADF","D") if "Market Independent (Generated by Nasdaq SIP)" not in exchanges: add_stockExchange("Market Independent (Generated by Nasdaq SIP)", "E") if "Mutual Funds/Money Markets (NASDAQ)" not in exchanges: add_stockExchange("Mutual Funds/Money Markets (NASDAQ)","F") if "International Securities Exchange" not in exchanges: add_stockExchange("International Securities Exchange","I") if "Direct Edge A" not in exchanges: add_stockExchange("Direct Edge A","J") if "Direct Edge X" not in exchanges: add_stockExchange("Direct Edge X","K") if "Chicago Stock Exchange" not in exchanges: add_stockExchange("Chicago Stock Exchange","M") if "NYSE" not in exchanges: add_stockExchange("NYSE","N") if "NYSE Arca" not in exchanges: add_stockExchange("NYSE Arca","P") if "NASDAQ OMX" not in exchanges: add_stockExchange("NASDAQ OMX","Q") if "NASDAQ Small Cap" not in exchanges: add_stockExchange("NASDAQ Small Cap","S") if "NASDAQ Int" not in exchanges: add_stockExchange("NASDAQ Int","T") if "OTCBB" not in exchanges: add_stockExchange("OTCBB","U") if "OTC other" not in exchanges: add_stockExchange("OTC other","V") if "CBOE" not in exchanges: add_stockExchange("CBOE","W") if "NASDAQ OMX PSX" not in exchanges: add_stockExchange("NASDAQ OMX PSX","X") if "GLOBEX" not in exchanges: add_stockExchange("GLOBEX","G") if "BATS Y-Exchange" not in exchanges: add_stockExchange("BATS Y-Exchange","Y") if "BATS" not in exchanges: add_stockExchange("BATS","Z") """ Add all cryptocurrency exchanges to CryptoExchange Database """ def add_cryptoExchange(name): exchange = CryptoExchange() exchange.name = name exchange.save() crypto_exchanges = CryptoExchange.objects.all() exchanges = list((item.name for item in crypto_exchanges)) if "HitBTC" not in exchanges: add_cryptoExchange("HitBTC") if "Coinbase" not in exchanges: add_cryptoExchange("Coinbase") if "Gemini" not in exchanges: add_cryptoExchange("Gemini") if "Poloniex" not in exchanges: add_cryptoExchange("Poloniex")
import cv2 from cnn.cnn_mult import * import pickle import os from torch.utils.data import Dataset import torchvision.transforms as transforms import numpy.ma as ma import numpy as np from image_utils import (EnhancedCompose, Merge, RandomCropNumpy, Split, to_tensor, BilinearResize, CenterCropNumpy, RandomRotate, AddGaussianNoise, RandomFlipHorizontal, RandomColor, RandomAffineZoom) from torchvision.transforms import Lambda, Normalize, ToTensor import csv def get_time(name): if name[0] == 'a': return int(name[name[2:].find('-') + 3:len(name) - 6]) else: return int(name[name[2:].find('-') + 3:len(name) - 5]) NYUD_MEAN = [0.48056951, 0.41091299, 0.39225179] NYUD_STD = [0.28918225, 0.29590312, 0.3093034] width = 400 out_size = (width, int(width * 0.75)) BASE_PATH = "/home/gustavo/pytorch/NYU V2/nyu_data/" PATH = '/home/gustavo/pytorch/NYU V2/nyu_data/data/nyu2_train' CSV_TRAIN = "/home/gustavo/pytorch/NYU V2/nyu_data/data/nyu2_train.csv" CSV_TEST = "/home/gustavo/pytorch/NYU V2/nyu_data/data/nyu2_test.csv" class NYUDepthV2(Dataset): def __init__(self, test=False, shrink_factor=1): self.shrink_factor = shrink_factor self.train_list = pickle.load(open(PATH + '_list', 'rb')) # [row for row in csv.reader(open(CSV_TRAIN))] self.test_list = [row for row in csv.reader(open(CSV_TEST))] # self.transform = self.get_transform(training=True, size=(256, 208)) self.trans = transforms.Compose([ transforms.ToPILImage(), transforms.CenterCrop((out_size[1] - 20, out_size[0] - 20)), transforms.ToTensor(), transforms.Normalize(NYUD_MEAN, NYUD_STD) ]) self.trans_depth = transforms.Compose([ transforms.ToPILImage(), transforms.ToTensor(), # transforms.Normalize([0.5], [0.5]) ]) self.trans_mask = transforms.Compose([ transforms.ToPILImage(), transforms.ToTensor() ]) self.transform_test = transforms.Compose([ transforms.ToPILImage(), transforms.CenterCrop((out_size[1] - 20, out_size[0] - 20)), transforms.ToTensor(), ]) self.normalize = transforms.Compose([ transforms.Normalize(NYUD_MEAN, NYUD_STD) ]) self.test = test def __getitem__(self, index): if ~self.test: index = index * self.shrink_factor img_rgb = cv2.imread(self.train_list[index][0]) img_rgb = cv2.resize(img_rgb, out_size, interpolation=cv2.INTER_AREA) img_dep = cv2.imread(self.train_list[index][1], cv2.IMREAD_GRAYSCALE) img_dep = cv2.resize(img_dep, out_size, interpolation=cv2.INTER_NEAREST) # depth = np.expand_dims(img_dep, axis=2) # temp_mask = self.get_mask(depth) # image, depth_mask = self.transform([img_rgb, depth, temp_mask]) # depth = depth_mask[0].unsqueeze(0) # mask = depth_mask[1].unsqueeze(0) image = self.trans(img_rgb) depth = self.trans_depth(img_dep) mask = self.trans_mask(self.get_mask(img_dep)) return image, depth, mask else: image = self.normalize(self.transform_test(self.test_list[index][0])) depth = self.transform_test(self.test_list[index][1]) return image, depth, 1 def __len__(self): if self.test: return int(len(self.test_list) / self.shrink_factor) else: return int(len(self.train_list) / self.shrink_factor) def get_mask(self, img_dep): img_mask_1 = ma.masked_not_equal(img_dep, 0) img_mask_2 = ma.masked_not_equal(img_dep, 255) img_mask = ~img_mask_1.mask + ~img_mask_2.mask img_mask = np.array(img_mask, dtype=np.uint8) return 255 - img_mask * 255 def get_transform(self, training=True, size=(256, 192), normalize=True): if training: transforms = [ Merge(), RandomFlipHorizontal(), RandomRotate(angle_range=(-5, 5), mode='constant'), RandomCropNumpy(size=size), RandomAffineZoom(scale_range=(1.0, 1.5)), Split([0, 3], [3, 5]), # # Note: ToTensor maps from [0, 255] to [0, 1] while to_tensor does not [RandomColor(multiplier_range=(0.8, 1.2)), None], ] else: transforms = [ [BilinearResize(0.5), None], ] transforms.extend([ # Note: ToTensor maps from [0, 255] to [0, 1] while to_tensor does not [ToTensor(), Lambda(to_tensor)], [Normalize(mean=NYUD_MEAN, std=NYUD_STD), None] if normalize else None ]) return EnhancedCompose(transforms) def extract_number(string): return int(str[0:string.find('.')]) if __name__ == "__main__": dataset_dir = PATH folders = [os.path.join(dataset_dir, o) for o in os.listdir(dataset_dir) if os.path.isdir(os.path.join(dataset_dir, o))] files_list = [] for paths in folders: files = os.listdir(paths) files_num = len(files) / 2 if files_num - int(files_num) != 0: raise Exception('Odd number of files (no pairing for depth-rgb)') files_num = int(files_num) files_list = files_list + [[paths + '/' + str(i) + '.jpg', paths + '/' + str(i) + '.png'] for i in range(files_num)] with open(PATH + '_list', 'wb') as file: pickle.dump(files_list, file) def nyu_depth_old(): dataset_dir = '/home/gustavo/pytorch/NYU V2/nyu_data/data/nyu2_train' folders = [os.path.join(dataset_dir, o) for o in os.listdir(dataset_dir) if os.path.isdir(os.path.join(dataset_dir, o))] subfolders = [] for paths in folders: sub_subfolders = [os.path.join(paths, o) for o in os.listdir(paths) if os.path.isdir(os.path.join(paths, o))] subfolders = subfolders + sub_subfolders subfolders = ['/home/gustavo/pytorch/NYU Depth V2/living_rooms_part2/living_room_0037', '/home/gustavo/pytorch/NYU Depth V2/living_rooms_part2/living_room_0035'] dataset = [] for folder in subfolders: if os.path.exists(folder + '/index.txt'): index_file = open(folder + '/index.txt', 'r') elif os.path.exists(folder + '/INDEX.txt'): index_file = open(folder + '/INDEX.txt', 'r') else: continue line = index_file.readline() accel_files = [] depth_files = [] rgb_files = [] while line: if line[0] == 'a': accel_files.append(line[:len(line) - 1]) elif line[0] == 'd': depth_files.append(line[:len(line) - 1]) elif line[0] == 'r': rgb_files.append(line[:len(line) - 1]) line = index_file.readline() accel_index = 0 rgb_index = 0 old_index = 0 debug = False # set dataset rgb-depth pairs for i in range(len(depth_files)): try: depth_time = get_time(depth_files[i]) except: print("fault INDEX") continue if rgb_index == len(rgb_files) - 1: break found = True # set index to the rgb frame right after the current depth map while get_time(rgb_files[rgb_index]) < depth_time: rgb_index += 1 if rgb_index >= len(rgb_files) - 1: found = False break if found: if rgb_index != old_index: dataset.append([folder, rgb_files[rgb_index], depth_files[i]]) if debug: print(rgb_files[rgb_index], depth_files[i]) old_index = rgb_index show = True if show: for i in range(0, len(dataset), 10): cv2.imshow('rgb', cv2.imread(dataset[i][0] + '/' + dataset[i][1])) cv2.imshow('depth', cv2.imread(dataset[i][0] + '/' + dataset[i][2])) if cv2.waitKey(25) & 0xFF == ord('q'): break print(str(len(dataset)) + " images") with open('dataset_list.pickle', 'wb') as file: pickle.dump(dataset, file)
number = 23 flag = 1 while flag == 1: guess = int(raw_input('Enter a number:')) if guess == number: print 'Congrats on your success!' flag = 0 elif guess < number: print 'Ops, it is a little bit higher.' else: print 'Ops, it is a little bit smaller.' print 'Done.'
from selenium import webdriver from time import sleep # import xlrd import random import os import time import sys sys.path.append("..") # import email_imap as imap # import json import re # from urllib import request, parse from selenium.webdriver.support.ui import Select # import base64 import Chrome_driver import email_imap as imap import name_get import db import selenium_funcs import Submit_handle import random def web_submit(submit,chrome_driver,debug=0): # test if debug == 1: site = 'http://da.off3riz.com/aff_c?offer_id=666&aff_id=1230' submit['Site'] = site chrome_driver.get(submit['Site']) # chrome_driver.maximize_window() # chrome_driver.refresh() print('Loading finished') sleep(3) # 18 buttons = chrome_driver.find_elements_by_id('buttons') for button in buttons: button_yes = button.find_elements_by_tag_name('a')[0] print(button_yes.get_attribute('innerHTML')) try: button_yes.click() sleep(2) except Exception as e: print(e) # name sleep(3) name = name_get.gen_one_word_digit(lowercase=False,digitmax=100000) chrome_driver.find_element_by_xpath('//*[@id="registration"]/div[2]/input[1]').send_keys(name) sleep(2) chrome_driver.find_element_by_xpath('//*[@id="emailPG"]').send_keys(submit['fr_soi']['email']) sleep(2) chrome_driver.find_element_by_xpath('//*[@id="pg_submit"]').click() sleep_rand = random.randint(60,180) sleep(sleep_rand) db.update_plan_status(2,submit['ID']) def test(): # db.email_test() # date_of_birth = Submit_handle.get_auto_birthday('') # Mission_list = ['10044'] # excel = 'fr_soi' # Excel_name = [excel,''] # Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com'] # submit = db.read_one_excel(Mission_list,Excel_name,Email_list) # [print(item,':',submit[excel][item]) for item in submit[excel] if submit[excel][item]!=None] # [print(item,':',submit[excel][item]) for item in submit[excel] if item == 'homephone'] submit = {} submit['Mission_Id'] = '10047' submit['Country'] = 'FR' chrome_driver = Chrome_driver.get_chrome(submit) web_submit(submit,chrome_driver,1) def test1(): num = random.randint(0,1) print(num) if __name__=='__main__': test()
#!/usr/bin/env python2 # vim: set fileencoding=utf8 import base64 import requests import time import os import sys import argparse import random from HTMLParser import HTMLParser import select from urllib2 import * s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template parser = HTMLParser() ############################################################ # wget exit status wget_es = { 0: "No problems occurred.", 2: "User interference.", 1<<8: "Generic error code.", 2<<8: "Parse error - for instance, when parsing command-line optio.wgetrc or .netrc...", 3<<8: "File I/O error.", 4<<8: "Network failure.", 5<<8: "SSL verification failure.", 6<<8: "Username/password authentication failure.", 7<<8: "Protocol errors.", 8<<8: "Server issued an error response." } ############################################################ headers = { "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Encoding":"text/html", "Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", "Content-Type":"application/x-www-form-urlencoded", "User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36" } wget_template = 'wget -q -c -nv -O "%s" "%s"' api = 'http://api.flvxz.com/jsonp/purejson/url/%s' def download(infos): #if not os.path.exists(infos['dir_']): # os.mkdir(infos['dir_']) #else: #if os.path.exists(infos['filename']): #return 0 num = random.randint(0, 7) % 7 col = s % (2, num + 90, os.path.basename(infos['filename'])) print '\n ++ 正在下载:', '#', s % (1, 97, infos['n']), '/', s % (1, 97, infos['amount']), '#', col cmd = wget_template % (infos['filename'], parser.unescape(infos['durl'])) status = os.system(cmd) if status != 0: # other http-errors, such as 302. wget_exit_status_info = wget_es[status] print('\n\n ----### \x1b[1;91mERROR\x1b[0m ==> \x1b[1;91m%d (%s)\x1b[0m ###--- \n\n' % (status, wget_exit_status_info)) print s % (1, 91, ' ===> '), cmd sys.exit(1) def downloads(infos): #if not os.path.exists(infos['dir_']): # os.mkdir(infos['dir_']) #else: #if os.path.exists(infos['filename']): #return 0 num = random.randint(0, 7) % 7 col = s % (2, num + 90, os.path.basename(infos['filename'])) #print '\n ++ 正在下载:', '#', s % (1, 97, infos['n']), '/', s % (1, 97, infos['amount']), '#', col flvurl=str(parser.unescape(infos['durl'])) flvname=str(infos['filename']) req=urlopen(flvurl,None,10) size=int(req.info()["Content-Length"]) CHUNK=1024*1024*2 ALL=size/CHUNK now=0 print "下载中 "+flvname+"["+str(size/1000/1000)+"MB]" with open(flvname,"wb") as fp: while True: condition=float(now)*100/ALL condition=float('%4.1f'% condition) if condition >100: condition=100 now=size/1000/1000 print "\r"+" "*20, print "\033[91m\r[%"+str(condition)+"]"+str(now)+"MB\033[0m/"+"["+str(size/1000/1000)+"MB]", sys.stdout.flush() chunk=req.read(CHUNK) if not chunk: break fp.write(chunk) now+=1 print "\n" def play(infos): num = random.randint(0, 7) % 7 col = s % (2, num + 90, os.path.basename(infos['filename'])) print '\n ++ play:', '#', s % (1, 97, infos['n']), '/', s % (1, 97, infos['amount']), '#', col cmd = 'mpv --really-quiet --cache 8140 --cache-default 8140 ' \ '"%s"' % parser.unescape(infos['durl']) #'--http-header-fields "User-Agent:%s" ' \ #'"%s"' % (headers['User-Agent'], infos['durl']) status = os.system(cmd) timeout = 1 ii, _, _ = select.select([sys.stdin], [], [], timeout) if ii: sys.exit(0) else: pass def pickup(j): print s % (1, 97, ' ++ pick a quality:') for i in xrange(len(j)): print s % (1, 91, ' %s' % (i+1)), j[i]['quality'] #p = raw_input(s % (1, 92, ' Enter: ')) p="3" if p.isdigit(): p = int(p) if p <= len(j): return j[p-1] else: print s % (1, 91, ' !! enter error') sys.exit() else: print s % (1, 91, ' !! enter error') sys.exit() def main(url): encode_url = base64.b64encode(url.replace('://', ':##')) url = api % encode_url r = requests.get(url) j = r.json() if j: j = pickup(j) else: print s % (1, 91, ' !! Can\'t get videos') sys.exit() print s % (2, 92, ' -- %s' % j['quality'].encode('utf8')) yes = True if len(j['files']) > 1 else False dir_ = os.path.join(os.getcwd(), j['title'].encode('utf8')) if yes else os.getcwd() n = 1 amount = len(j['files']) j['title']=j['title'].replace(" ","") for i in j['files']: infos = { 'filename': os.path.join("", '%s_%s.%s' % (j['title'].encode('utf8'), n, i['ftype'].encode('utf8'))), 'durl': i['furl'].encode('utf8'), 'dir_': dir_, 'amount': amount, 'n': n } if args.play: play(infos) else: downloads(infos) n += 1 if __name__ == '__main__': p = argparse.ArgumentParser(description='flvxz') p.add_argument('url', help='site url') p.add_argument('-p', '--play', action='store_true', \ help='play with mpv') args = p.parse_args() main(args.url)
# Copyright 2020 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import rclpy from rclpy.event_handler import SubscriptionEventCallbacks from rclpy.executors import ExternalShutdownException from rclpy.executors import SingleThreadedExecutor from rclpy.node import Node from rclpy.time import Time from sensor_msgs.msg import Image class MessageLostListener(Node): """Listener node to demonstrate how to get a notification on lost messages.""" def __init__(self): """Create a MessageLostListener.""" super().__init__('message_lost_listener') # Create an object describing the event handlers that will # be registered in the subscription. # In this case, only a handler for a message lost event is registered. event_callbacks = SubscriptionEventCallbacks( message_lost=self._message_lost_event_callback) # Create a subscription, passing the previously created event handlers. self.subscription = self.create_subscription( Image, 'message_lost_chatter', self._message_callback, 1, event_callbacks=event_callbacks) def _message_callback(self, message): """Log when a message is received.""" now = self.get_clock().now() diff = now - Time.from_msg(message.header.stamp) self.get_logger().info( f'I heard an Image. Message single trip latency: [{diff.nanoseconds}]\n---') def _message_lost_event_callback(self, message_lost_status): """Log the number of lost messages when the event is triggered.""" self.get_logger().info( 'Some messages were lost:\n>\tNumber of new lost messages: ' f'{message_lost_status.total_count_change}' f' \n>\tTotal number of messages lost: {message_lost_status.total_count}', ) def main(args=None): rclpy.init(args=args) listener = MessageLostListener() executor = SingleThreadedExecutor() executor.add_node(listener) try: executor.spin() except (KeyboardInterrupt, ExternalShutdownException): pass finally: rclpy.try_shutdown() return 0 if __name__ == '__main__': sys.exit(main())
#!/usr/bin/env python3 import os exe_set = set() for pid in os.listdir('/proc'): if pid[0].isdecimal() is False: continue try: rp = os.path.realpath('/proc/{}/exe'.format(pid)) except Exception as e: pass exe_set.add(rp) list_d = list(exe_set) list_d.sort() for rp in list_d: print('+' + rp)
class Process(object): last_scheduled_time = 0 def __init__(self, id, arrive_time, burst_time): self.id = id self.arrive_time = arrive_time self.burst_time = burst_time def __repr__(self): return ('[id %d : arrive_time %d, burst_time %d]' % (self.id, self.arrive_time, self.burst_time))
def get_smiles(path_to_file: str) -> set(): """ Функция построчно считывает файл и записывает из него все смайлики. Возвращает набор уникальных элементов (множество) :param path_to_file: str :return: set() """ smiles_set = set() with open(path_to_file, encoding="cp1251") as file: for row in file: smile = row.split('"|"')[0] smiles_set.add(smile) return smiles_set smiles_negative = get_smiles("files/smiles_negative.txt") smiles_positive = get_smiles("files/smiles_positive.txt")
from .bot_review import * # noqa from .human_review import * # noqa
''' Created on Dec 8, 2015 @author: Xu Xu ''' import matplotlib.pyplot as plt import pandas as pd from matplotlib import patches #generate the grade of restaurant in nyc def grades_of_nyc_by_year(data): totaldata=data.groupby(['year','grade']).size().unstack() pd.DataFrame(totaldata).plot(kind='bar') plt.savefig('grade_improvement_NYC.pdf',format = 'pdf') plt.close() #generate the grade of restaurant in every borough def grades_of_Borough_by_year(data,boro): data=data[data['boro']==boro] summary = data.groupby(['YEAR','GRADE']).size().unstack() pd.DataFrame(summary).plot(kind='bar') plt.savefig('grade_improvement_' + boro + '.pdf',format = 'pdf') plt.close()
""" Tools for satellite related calculations """ import math import ephem from catalog.models import TLE class SatelliteComputation(object): """ Tools for satellite related computation """ G = 6.67408e-11 EARTH_MASS = 5.98e24 def __init__(self, **kwargs): self.observer = ephem.Observer() self._satellite = None if 'tle' not in kwargs: raise TypeError("tle parameter is missing") tle = kwargs['tle'] if type(tle) is not TLE: raise TypeError("tle must be of type TLE") try: self._satellite = ephem.readtle( tle.first_line, tle.second_line, tle.third_line ) except TypeError: raise ValueError("invalid TLE") def _calc_orbital_velocity(self, altitude): try: float(altitude) except: raise TypeError("altitude must be a number") if altitude < 0: raise ValueError("altitude must be positive") r = altitude + ephem.earth_radius return math.sqrt((SatelliteComputation.G * SatelliteComputation.EARTH_MASS) / r) def compute(self): self._satellite.compute(self.observer) return { 'longitude' : math.degrees(self._satellite.sublong), 'latitude' : math.degrees(self._satellite.sublat), 'elevation' : self._satellite.elevation, 'velocity' : self._calc_orbital_velocity(self._satellite.elevation), }
# Goal: Make an implementation of the sed command # v1: Just work with the input stream: # ls -l | sed "s/jamesbryant/cynicalneon/" # - find the specified string and replace with static string # v2: Use sed formatting to replace the string # v3: Use Sed formatting. import argparse import glob import os import sys import re class Sed(object): def __init__(self): # Setup args # Parse pattern string # Return components of pattern string # replacement_string # search_pattern print(sys.argv) parser = argparse.ArgumentParser() parser.add_argument("function", help="""The sed function used. Contains the regex search pattern and substitution string.""", type=str) self.__args = parser.parse_args() self.__search_pattern = None self.__sub_pattern = None print(parser.parse_known_args()) self.__search_pattern, self.__sub_pattern = self.__parse_function() def __parse_function(self): pattern = self.__args.function.split('/') print(pattern) return (pattern[1], pattern[2]) def sed(self): # Read string from standard input for line in sys.stdin: # check if searched_string in line # v1, v3 completed with the 2 lines below #if self.__search_pattern in line: # line = line.replace(self.__search_pattern, self.__sub_pattern) """ These 3 lines below do not work.""" #if re.match(self.__search_pattern, line) is not None: # print("something") # line = line.replace(self.__search_pattern, self.__sub_pattern) line = re.sub(self.__search_pattern, self.__sub_pattern, line) print(line, end="") if __name__ == '__main__': Sed().sed()
""" 将 VOC XML 转换为 COCO JSON @Author: patrickcty @filename: det_xml2json.py """ import os import json from xml.etree.ElementTree import parse def convert_binary_to_coco(xml_file_dir, out_file, all_xmls=None, cls_name='zawu'): """ 将给定文件夹下的 xml 文件转换为一个 json 文件,类别标签视为二分类 Parameters ---------- xml_file_dir: xml 文件所在文件夹,文件夹下包含一个或多个 xml 文件 out_file: 输出 json 文件绝对路径 all_xmls: 可选, 从给定的列表中生成 json 文件 cls_name: 类名 Returns ------- """ if all_xmls is None: all_xmls = os.listdir(xml_file_dir) train_annotations = [] train_images = [] object_idx = 0 for idx, xml_anno in enumerate(sorted(all_xmls)): if xml_anno.endswith('.xml'): info, anno, object_idx = parse_xml(os.path.join(xml_file_dir, xml_anno), idx, object_idx) train_annotations.extend(anno) train_images.append(info) coco_format_json_train = dict( images=train_images, annotations=train_annotations, categories=[{'id': 0, 'name': cls_name}]) with open(out_file, 'w') as f: json.dump(coco_format_json_train, f, indent=4) print('Save coco json successfully in {}.'.format(out_file)) def convert_multi_to_coco(xml_file_dir, out_file, all_xmls=None): """ 将给定文件夹下的 xml 文件转换为一个 json 文件,类别按照原始的标签类别 Parameters ---------- xml_file_dir: xml 文件所在文件夹,文件夹下包含一个或多个 xml 文件 out_file: 输出 json 文件绝对路径 all_xmls: 可选, 从给定的列表中生成 json 文件 Returns ------- """ if all_xmls is None: all_xmls = os.listdir(xml_file_dir) train_annotations = [] train_images = [] object_idx = 0 all_categories = [] for idx, xml_anno in enumerate(sorted(all_xmls)): if xml_anno.endswith('.xml'): info, anno, object_idx = parse_xml(os.path.join(xml_file_dir, xml_anno), idx, object_idx) # generate category id dynamically for a in anno: class_name = a['class_name'] if class_name in all_categories: a['category_id'] = all_categories.index(class_name) else: a['category_id'] = len(all_categories) all_categories.append(class_name) del a['class_name'] train_annotations.extend(anno) train_images.append(info) coco_format_json_train = dict( images=train_images, annotations=train_annotations, categories=[{'id': i, 'name': n} for i, n in enumerate(sorted(all_categories))]) with open(out_file, 'w') as f: json.dump(coco_format_json_train, f, indent=4) print('Save coco json successfully in {}.'.format(out_file)) def parse_xml(xml_file, idx, object_idx): """ 解析 xml 标注文件 Parameters ---------- xml_file: xml 文件所在绝对路径 idx: index object_idx: bbox index Returns a tuple including: info dict a list that contains annotation dicts object index ------- """ doc = parse(xml_file) info = { 'id': idx, 'file_name': doc.findtext('filename'), 'height': int(doc.findtext('size/height')), 'width': int(doc.findtext('size/width')) } all_annos = [] for item in doc.iterfind('object'): # can deal with multiply bbox class_name = item.findtext('name') xmin = int(item.findtext('bndbox/xmin')) ymin = int(item.findtext('bndbox/ymin')) xmax = int(item.findtext('bndbox/xmax')) ymax = int(item.findtext('bndbox/ymax')) anno = { 'image_id': idx, 'id': object_idx, 'category_id': 0, 'bbox': [min(xmin, xmax), min(ymin, ymax), abs(xmax - xmin), abs(ymax - ymin)], 'area': abs(xmax - xmin) * abs(ymax - ymin), 'iscrowd': 0, 'class_name': class_name } all_annos.append(anno) object_idx += 1 return info, all_annos, object_idx
#!/usr/bin/env python3 import time from http.server import HTTPServer, BaseHTTPRequestHandler class RequestHandler(BaseHTTPRequestHandler): def do_GET(self): # Javascript client side code opens EventSource on the browser body = """<html><body><h1>Streaming baby!</h1> <ul id="events"> </ul> <script> const eventSource = new EventSource("/sse") eventSource.onmessage = function(event) { const list = document.querySelector("ul#events") const elmnt = document.createElement("li") elmnt.innerText = "data: " + event.data list.appendChild(elmnt) } </script> </body></html>""".encode('utf-8') if self.path == '/': self.send_response(200, 'OK') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(body) self.wfile.flush() elif self.path == '/sse': # TODO: Implement SSE while True: # TODO: send frames every 3 seconds, don't forget to flush! time.sleep(3) else: self.send_response(404, 'Not found') # after sending the status line, we send headers self.send_header('Server', 'demo') self.end_headers() server = HTTPServer(('', 8080), RequestHandler) if __name__ == "__main__": print('Listening on port 8080') server.serve_forever()
import threading import numpy import pyaudio import struct import random from datetime import datetime as dt import time import math MAX_SOUNDTYPE = 3 #指定周波数でサイン波を生成する def genewave(frequency, length, rate,type): length = int(length * rate) factor = float(frequency) * (math.pi * 2) / rate if type == 0: print("sin") return numpy.sin(numpy.arange(length) * factor) elif type == 1: print("saw") return numpy.arange(length) * factor elif type == 2: print("tankei") return numpy.abs(numpy.arange(length) * factor) #オーディオ鳴らす def play_tone(stream, frequency, length, rate,onkai): chunks = [] pitch = 440*(numpy.power(2,(onkai[0]-9)/12)) chunks.append(genewave(pitch, length, rate,onkai[1])) chunk = numpy.concatenate(chunks) * 0.25 stream.write(chunk.astype(numpy.float32).tostring()) bufsize = 32 RATE=44100 #ローパスフィルター lpfbuf=numpy.zeros(4) outwave=numpy.zeros(bufsize) def lowpass(wave): global lpfbuf,outwave w0 = 2.0*numpy.pi*(200+(255.0/255.0)**2*20000)/RATE; Q = 1.0 alpha = numpy.sin(w0)/(2.0*Q) a0 = (1 + alpha) a1 = -2*numpy.cos(w0)/a0 a2 = (1 - alpha)/a0 b0 = (1 - numpy.cos(w0))/2/a0 b1 = (1 - numpy.cos(w0))/a0 b2 = (1 - numpy.cos(w0))/2/a0 for i in range(bufsize): outwave[i] = b0*wave[i]+b1*lpfbuf[1]+b2*lpfbuf[0]-a1*lpfbuf[3]-a2*lpfbuf[2] lpfbuf[0] = lpfbuf[1] lpfbuf[1] = wave[i] lpfbuf[2] = lpfbuf[3] lpfbuf[3] = outwave[i] return outwave path= 'data.txt' def WriteFile(): data=[random.randint(-30,30),random.randint(-30,30),random.randint(-30,30)] data_str=[str(n) for n in data] with open(path, mode='w') as f: f.write('\n'.join(data_str)) info =[0,0,0] sound_type = 0 def ReadFile(): global sound_type print(sound_type) with open(path) as f: i=0 for s_line in f: if(s_line != "\n"): info[i] = int(s_line) i+=1 if info[1] < 0: if sound_type == 0: sound_type = MAX_SOUNDTYPE-1 else: sound_type -= 1 elif info[1] > 0: if sound_type >= MAX_SOUNDTYPE-1: sound_type = 0 else: sound_type += 1 if info[2] < 0: sound_list.append([info[0],sound_type]) elif info[2] >0 and sound_list != []: sound_list.pop(0) print(info) prev_read = dt.now() prev_write = dt.now() prev_sound = dt.now() sound_list = [] if __name__ == "__main__": while 1: cur = dt.now() for i,onkai in enumerate(sound_list): if (cur - prev_sound).total_seconds() >= i/16.0: p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paFloat32,channels=1, rate=44100, output=1) play_tone(stream,440,1,44100,onkai) stream.close() p.terminate() if (cur - prev_write).total_seconds() >= 3: prev_write = cur WriteFile() if (cur - prev_read).total_seconds() >= 3: prev_read = cur ReadFile() print(sound_list) time.sleep(1)
class colors: RED = '\x1b[0;31;1m' GREEN = '\x1b[0;32;1m' YELLOW = '\x1b[0;33;1m' BLUE = '\x1b[0;34;1m' PURPLE = '\x1b[0;35;1m' END = '\033[0m'
from os import system system("pip install --upgrade matplotlib")
# endi tuple xaqida tuplening listdan farqi buning elemetnlarini o'zgartirib bo'lmaydi lisdagi kabi numbers=(1,2,3,4) #numbers[0]=12 print(numbers) coordinates=(1,2,3) x,y,z=coordinates # bu degani shu tuple elementlarini shu xarflarga ta'minla degani buni listda xam ishlatsak bir xilda ishlaydi print(x) print(y) print(z)
import socket HOST_IP="127.0.0.1" HOST_PORT = 54831 HOST_PORT_RECEIVE = 58927 DEST_IP_ADDRESS = "127.0.0.1" DEST_PORT_NO = 58913 BUFSIZE = 4096 buffer_to_send='I am client' clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) receiveSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) clientSock.bind((HOST_IP, HOST_PORT)) receiveSock.bind((HOST_IP, HOST_PORT_RECEIVE)) clientSock.sendto(buffer_to_send, (DEST_IP_ADDRESS, DEST_PORT_NO)) print 'send:',buffer_to_send command_echo = receiveSock.recvfrom(BUFSIZE) print 'receive:',command_echo clientSock.close() receiveSock.close()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __version__ = '1.0.1' from web_backend.nvlserver.module import nvl_meta from sqlalchemy import BigInteger, String, Column, Boolean, DateTime, Table, ForeignKey, func from sqlalchemy.dialects.postgresql import JSONB location_type = Table( 'location_type', nvl_meta, Column('id', BigInteger, primary_key=True), Column('name', String(length=255), nullable=False), Column('active', Boolean, nullable=False), Column('deleted', Boolean, nullable=False), Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False), Column('updated_on', DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False), ) location = Table( 'location', nvl_meta, Column('id', BigInteger, primary_key=True), Column('name', String(length=255), nullable=False), Column('location_type_id', BigInteger, ForeignKey('location_type.id'), nullable=True), Column('user_id', BigInteger, ForeignKey('user.id'), nullable=True), Column('meta_information', JSONB, default=lambda: {}, nullable=False), Column('show_on_map', Boolean, nullable=False, default=True), Column('active', Boolean, nullable=False), Column('end_zone', Boolean, nullable=False), Column('deleted', Boolean, nullable=False), Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False), Column('updated_on', DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False), )
#!/usr/bin/env python import katcp_wrapper import time, struct, socket #bitstream = 'pkt_p2s_2015_May_08_1620.bof.gz' bitstream = 'pkt_32_to_8_2017_Nov_08_1625.bof.gz' roach = 'r1511' katcp_port = 7147 mac_base = (2<<40) + (2<<32) fabric_ip_string = '10.32.127.88' fabric_ip = struct.unpack('!L',socket.inet_aton(fabric_ip_string))[0] # convert ip to long fabric_port = 12345 dest_ip_string = '10.32.127.11' dest_ip = struct.unpack('!L',socket.inet_aton(dest_ip_string))[0] dest_port = 55555 def exit_fail(): # print('FAILURE DETECTED. Log entries:\n',lh.printMessages()) print('FAILURE DETECTED.\n') try: fpga.stop() except: pass raise #exit() def exit_clean(): try: fpga.stop() except: pass #exit() #START OF MAIN: if __name__ == '__main__': try: print('Connecting to server %s on port %i... ' % (roach,katcp_port)), fpga = katcp_wrapper.FpgaClient(roach, katcp_port, timeout=10) time.sleep(1) if fpga.is_connected(): print('ok\n') else: print('ERROR connecting to server %s on port %i.\n' % (roach,katcp_port)) exit_fail() print('------------------------') print('Programming FPGA with %s ... ' %bitstream), fpga.progdev(bitstream) print('done') #print(fpga.listdev()) print('Configuring destination IP and port %s:%i ... '%(socket.inet_ntoa(struct.pack('!L', dest_ip)),dest_port)), fpga.write_int('destip', dest_ip) fpga.write_int('destport', dest_port) print('done') divider = 254 print('Setting divider to %d ...' % divider), fpga.write_int('divider', divider) print('done') print('Initialize 1GbE ... '), fpga.tap_start('gbe', 'one_GbE', mac_base + fabric_ip, fabric_ip, fabric_port) print('done') print('Reset system...'), fpga.write_int('reset', 0) fpga.write_int('reset', 1) print('done') print('status: %d' % fpga.read_int('status')) print('Sleep one second...') time.sleep(1) print('status: %d' % fpga.read_int('status')) #except KeyboardInterrupt: #exit_clean() #except: #exit_fail() finally: exit_clean()
import SimpleITK as sitk import pandas as pd import quandl, math import numpy as np from sklearn import preprocessing, svm from sklearn.model_selection._validation import cross_validate from sklearn.linear_model import LinearRegression from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score print("hello") import cv2 import sys y_true = sitk.ReadImage("/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/liver_pred/pred1.nii") y_pred = sitk.ReadImage("/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/seg/segmentation1.nii") print("img true") print (y_true) print("img predict") print (y_pred) print("hello2") # accuracy: (tp + tn) / (p + n) accuracy = accuracy_score(y_true, y_pred) print('Accuracy: %f' % accuracy) # precision tp / (tp + fp) precision = precision_score(y_true, y_pred) print('Precision: %f' % precision) # recall: tp / (tp + fn) recall = recall_score(y_true, y_pred) print('Recall: %f' % recall) # f1: 2 tp / (2 tp + fp + fn) f1 = f1_score(y_true, y_pred) print('F1 score: %f' % f1) # confusion matrix matrix = confusion_matrix(y_true, y_pred) print(matrix)
from sys import argv #import com-line input from os.path import exists #import exists() method script, copying_file, empty_file = argv #assign vars to argv print "Copying from %s to %s" % (copying_file, empty_file) #print statement indata = open(copying_file).read() #var = open first, then read or write print "The input file is %d bytes long" % len(indata) #length of text in bytes print "Does the output file exist? %r" % exists(empty_file) #exists() boolean print "Ready, hit RETURN to continue, CTRL-C to abort." #print statement raw_input('> ') # prompt asking open(empty_file, 'w').write(indata) #copying file to file with .write() #will create a new .txt file if it DNE already print "Alright, all done." #done
#!/usr/bin/env python # date: 2017/11/24 v1.0 # date: 2018/2/9 v2.0 # author: zss ### Input All transcripts.gtf list file ### Output Five type Count "*_Alternative_summary.txt" import sys,commands,os def CountAstala(GTF_astalavista): Sample = "CK" Dict = {} Type1 = 0 Type2 = 0 Type3 = 0 Type4 = 0 Type5 = 0 with open(GTF_astalavista, 'r') as F: for line in F: info = line.strip().split(';') structure = info[3].split('"')[1] #print structure if "0,1-2^" in structure: #print "0,1-2^: %s" % structure Type1 += 1 elif "1^,2^" in structure: Type2 += 1 elif "1-,2-" in structure: Type3 += 1 elif "0,1^2-" in structure: #print "0,1^2-: %s" % structure Type4 += 1 else: #print "other: %s" % structure Type5 += 1 #print "Type1: %s, Type2: %s, Type3: %s, Type4: %s, Type5: %s" % (Type1, Type2, Type3, Type4, Type5) OutPutInfo1 = "Type1-Cassette exon, Type2-Alternative 5' splice site, Type3-Alternative 3' splice site, Type4-Retained intron, Type5-Mutually exclusive exon" OutPutInfo2 = str(Type1) + ',' + str(Type2) + ',' + str(Type3) + ',' + str(Type4) + ',' + str(Type5) Dict[Sample] = {'Type1': Type1, 'Type2': Type2, 'Type3': Type3, 'Type4':Type4, 'Type5':Type5} return Dict if __name__ == '__main__': if len(sys.argv) != 2: print "\tUsage: python %s Sample_transcript.txt\n" % sys.argv[0] sys.argv[1] File = sys.argv[1] ### Count Astalavista type print "Sample, Type1-Cassette exon, Type2-Alternative 5' splice site, Type3-Alternative 3' splice site, Type4-Retained intron, Type5-Mutually exclusive exon" Dict = CountAstala(File) sample = Dict.keys()[0] print "%s, %d, %d, %d, %d, %d" % (sample, Dict[sample]['Type1'], Dict[sample]['Type2'], Dict[sample]['Type3'], Dict[sample]['Type4'], Dict[sample]['Type5'])
from board2 import * from minmax import * from eval import * b = Board().withSize(size=5) evaluator = Pipeline( (MFlatCoverage(), 1.0), (MDjikstraDistance(), 4.0) ) print (evaluator) ai = Minmax(evaluator) while True: score, (type, args) = ai(b) b = b.apply_move(type, *args) print(b) if b.get_winner() != 0: break
import random import time space = "--------------------------------------------------------\n" values = ('sasso', 'carta', 'forbici') while True: player = input("\nInserire Sasso Carta o Forbici\n").lower().strip() pc = random.choice(values) time.sleep(1) if player in values: if player == "sasso"\ and pc == "forbici" or player == "carta"\ and pc == "sasso" or player == "forbici"\ and pc == "carta": print("Hai vinto") elif player == pc: print("Pareggio") else: print("Hai perso") else: print(space, "Errore,", end=' ') continue time.sleep(1)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 23 16:33:04 2021 @author: Guoxin Sun Paper: "Strategic mitigation against wireless attacks onautonomous platoons" It is a numerical example of the proposed security game based mitigation framework. Please refer to the paper for detailed explanation (especially Section 3). """ import gambit import sys import argparse class ControllerSwitchingStrategy: def __init__(self): self.payoffsDefender = [] self.payoffsAttacker = [] def getPayoffs(self, a_acc=7, a_cacc=-10, na_acc=-5, na_cacc=10, s_d=3, ns_d=-3, s_nd=-3, ns_nd=3): """ To compute payoffs """ self.payoffsDefender.append(a_acc+s_d) self.payoffsDefender.append(a_cacc+ns_d) self.payoffsDefender.append(a_acc+s_nd) self.payoffsDefender.append(a_cacc+ns_nd) self.payoffsDefender.append(na_acc+s_d) self.payoffsDefender.append(na_cacc+ns_d) self.payoffsDefender.append(na_acc+s_nd) self.payoffsDefender.append(na_cacc+ns_nd) print("Defender's Payoffs are ", self.payoffsDefender) self.payoffsAttacker.append(-(a_acc+s_d)) self.payoffsAttacker.append(-(a_cacc+ns_d)) self.payoffsAttacker.append(-(a_acc+s_nd)) self.payoffsAttacker.append(-(a_cacc+ns_nd)) self.payoffsAttacker.append(-(0+s_d)) self.payoffsAttacker.append(-(na_cacc+ns_d)) self.payoffsAttacker.append(-(0+s_nd)) self.payoffsAttacker.append(-(na_cacc+ns_nd)) print("Attacker's Payoffs are ", self.payoffsAttacker) return self.payoffsDefender, self.payoffsAttacker def run(self, P_na_nr = (6, 10), P_na_r = (4, 10), P_a_nr = (1, 100), P_a_r = (99, 100),a_acc=7, a_cacc=-10, na_acc=-5, na_cacc=10,s_d=0, ns_d=-6, s_nd=-6, ns_nd=0): g = gambit.Game.new_tree() g.title = "Controller Switching Game" P1 = g.players.add("Attacker") P2 = g.players.add("Defender") # to define state-action pair move = g.root.append_move(P1, 2) move.actions[0].label = "na" move.actions[1].label = "a" c1 = g.root.children[0].append_move(g.players.chance, 2) c1.actions[0].label = "nr" c1.actions[0].prob = gambit.Rational(P_na_nr[0], P_na_nr[1]) c1.actions[1].label = "r" c1.actions[1].prob = gambit.Rational(P_na_r[0], P_na_r[1]) c2 = g.root.children[1].append_move(g.players.chance, 2) c2.actions[0].label = "nr" c2.actions[0].prob = gambit.Rational(P_a_nr[0], P_a_nr[1]) c2.actions[1].label = "r" c2.actions[1].prob = gambit.Rational(P_a_r[0], P_a_r[1]) set1 = g.root.children[0].children[0].append_move(P2, 2) set1.actions[0].label = "cacc" set1.actions[1].label = "acc" set2 = g.root.children[0].children[1].append_move(P2, 2) set2.actions[0].label = "cacc" set2.actions[1].label = "acc" g.root.children[1].children[0].append_move(set1) g.root.children[1].children[1].append_move(set2) #payoffs payoffsDefender, payoffsAttacker = self.getPayoffs(a_acc=a_acc, a_cacc=a_cacc, na_acc=na_acc, na_cacc=na_cacc, s_d=s_d, ns_d=ns_d, s_nd=s_nd, ns_nd=ns_nd) o1 = g.outcomes.add("") o1[0] = payoffsAttacker[7] o1[1] = payoffsDefender[7] o2 = g.outcomes.add("") o2[0] = payoffsAttacker[6] o2[1] = payoffsDefender[6] o3 = g.outcomes.add("") o3[0] = payoffsAttacker[5] o3[1] = payoffsDefender[5] o4 = g.outcomes.add("") o4[0] = payoffsAttacker[4] o4[1] = payoffsDefender[4] o5 = g.outcomes.add("") o5[0] = payoffsAttacker[3] o5[1] = payoffsDefender[3] o6 = g.outcomes.add("") o6[0] = payoffsAttacker[2] o6[1] = payoffsDefender[2] o7 = g.outcomes.add("") o7[0] = payoffsAttacker[1] o7[1] = payoffsDefender[1] o8 = g.outcomes.add("") o8[0] = payoffsAttacker[0] o8[1] = payoffsDefender[0] g.root.children[0].children[0].children[0].outcome = o1 g.root.children[0].children[0].children[1].outcome = o2 g.root.children[0].children[1].children[0].outcome = o3 g.root.children[0].children[1].children[1].outcome = o4 g.root.children[1].children[0].children[0].outcome = o5 g.root.children[1].children[0].children[1].outcome = o6 g.root.children[1].children[1].children[0].outcome = o7 g.root.children[1].children[1].children[1].outcome = o8 out = gambit.nash.lcp_solve(g, rational=False) return out def print_NashEquilibrium(self, out): print("Prob. of not attacking - P(na) = {:.4f}".format(out[0][0])) print("Prob. of initiating boiling frog attack - P(a) = {:.4f}".format(out[0][1])) print("If the detector reports NO attack, P(cacc) = {:.4f}, P(acc) = {:.4f}".format(out[0][2], out[0][3])) print("If the detector reports an attack, P(cacc) = {:.4f}, P(acc) = {:.4f}".format(out[0][4], out[0][5])) def readCommand( argv ): """ Processes the command used to run the proposed security game based mitigation framework from the command line. """ usageStr = """ USAGE: python NumericalExample.py <options> EXAMPLES: (1) python NumericalExample.py - returns the Player' payoffs, prob. of initiating an attack and prob. of defloying acc for defending (2) python NumericalExample.py -p - starts to type in utility values and detector properties for game tree construction """ parser = argparse.ArgumentParser(usageStr) parser.add_argument("-p", "--parameters", help="enter utility values and detector properties", action="store_true", default=False) args = parser.parse_args(argv) global alpha_d, alpha_f, alpha_m, beta_s, beta_t, P_na_nr, P_na_r, P_a_nr, P_a_r if args.parameters: alpha_d = int(input('Enter alpha_d: ')) alpha_f = int(input('Enter alpha_f: ')) alpha_m = int(input('Enter alpha_m: ')) beta_s = int(input('Enter beta_s: ')) beta_t = int(input('Enter beta_t: ')) false_alarm = int(100*round(float(input('Enter Prob. of false alarms:')),2)) # format required by the chance node defination in Gambit misses = int(100*round(float(input('Enter Prob. of misses:')),2)) P_na_nr = (100-false_alarm,100) P_na_r = (false_alarm, 100) P_a_nr = (misses, 100) P_a_r = (100-misses, 100) else: # Default vaules (same as the ones presented in the paper) # Entries of utility function U1 alpha_d = 5 alpha_f = 9 alpha_m = 12 # Entries of utility function U2 beta_s = 10 beta_t = 15 # ============================================================================= # # When the defense framework is depolyed on each vehicle, beta_s is caulated as (please refer to eq.(6) in the paper) # if eps_radar > eps_max: # beta_s = int(6*np.log2(10*eps_radar)) # else: # beta_s = -k_c # ============================================================================= # Detector properties P_na_nr = (85, 100) P_na_r = (15, 100) P_a_nr = (35, 100) P_a_r = (65, 100) if __name__ == '__main__': options = readCommand( sys.argv[1:] ) NumericalExample= ControllerSwitchingStrategy() Results = NumericalExample.run(P_na_nr = P_na_nr, P_na_r = P_na_r, P_a_nr = P_a_nr, P_a_r = P_a_r, a_acc=alpha_d, a_cacc=-alpha_m, na_acc=-alpha_f, na_cacc=0, s_d=beta_t, ns_d=beta_s, s_nd=beta_s, ns_nd=beta_t) NumericalExample.print_NashEquilibrium(Results)
from doh.private import CRISISNET_APIKEY import json import requests def query_crisis(endpoint, **custom_params): params = {'apikey': CRISISNET_APIKEY} params.update(custom_params) baseurl = 'http://api.crisis.net/' fullurl = baseurl + endpoint r = requests.get(fullurl, params=params) return r.json() if __name__ == '__main__': dlurl('sources')
class adecide: def decide(self,na,nd): if nd>15: self.x=0 elif nd<10 and na>15: self.x=1 elif na>6 and na>(2*nd): self.x=0 else: self.x=0 return self.x def win(self,nd): if nd>0: self.victory=0 else: self.victory=1 return self.victory def cont(self,na,nd): self.t=adecide() if ((na-nd>5) or (nd<2 and na>4)or(adecide.decide(self.t,na,nd))): self.x=0 else: self.x=1 return self.x
import cv2 as cv def hi(): hybrid1 = cv.imread("hybrid1.jpg", cv.IMREAD_GRAYSCALE) hybrid2 = cv.imread("hybrid2.jpg", cv.IMREAD_GRAYSCALE) h1_low_pass = cv.GaussianBlur(hybrid1, (3, 3), 0) h2_low_pass = cv.GaussianBlur(hybrid2, (5, 5), 0) h2_high_pass = hybrid2 - h2_low_pass hybrid = h2_high_pass + h1_low_pass cv.imshow("Image", hybrid) cv.waitKey(0) cv.destroyAllWindows()
# Solve with breadthfirst search
#!/usr/bin/env python # -*- coding: utf-8 -*- from head import * from django_frame_solution import DjangoFrameSolution class DjangoFrameMain(): """ 此类负责处理每个连接实例,包括收发包,解析等操作 """ def __init__(self, client): self.client = client self.fileno = client.fileno() def main_receivor(self, ): """ 连接实例主处理入口,收取数据 :rtype: 成功返回SUC, 失败返回 FAI,如果查过判断僵死时间,返回 ERR """ result = FAI servant = DjangoFrameSolution() version, body = servant.receive(self.client.handler) if body != '': self.client.mylock.acquire() self.client.last_time = datetime.now() self.client.mylock.release() # 解析数据 json_inst = servant.parse(body) # 带入计算公式计算 result = servant.dispatch(json_inst, self.client.handler) log_msg = 'dispatch result = %d' %result log_handler.debug(log_msg) if result == SUC: return SUC now_time = datetime.now() gap = (now_time - self.client.last_time).seconds if gap > SOCKET_TIMEOUT: return ERR else: return FAI
#!/usr/bin/env python import psycopg2 conn = psycopg2.connect(database="hpc", user="hpc", password="123456", host="localhost", port="5432") print "Open database successfully" cursor = conn.cursor() cursor.execute("delete from rank_toponehundred;") conn.commit() conn.close()
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from dataclasses import dataclass from pants.core.util_rules.system_binaries import OpenBinary from pants.engine.environment import EnvironmentName from pants.engine.explorer import RequestState from pants.engine.process import Process, ProcessCacheScope, ProcessResult from pants.engine.rules import QueryRule, collect_rules, rule from pants.util.logging import LogLevel @dataclass(frozen=True) class Browser: open_binary: OpenBinary protocol: str server: str def open(self, request_state: RequestState, uri: str = "/") -> ProcessResult | None: if not self.open_binary: return None url = f"{self.protocol}://{self.server}{uri}" return request_state.product_request( ProcessResult, ( Process( (self.open_binary.path, url), description=f"Open {url} with default web browser.", level=LogLevel.INFO, cache_scope=ProcessCacheScope.PER_SESSION, ), ), ) @dataclass(frozen=True) class BrowserRequest: protocol: str server: str @rule async def get_browser(request: BrowserRequest, open_binary: OpenBinary) -> Browser: return Browser(open_binary, request.protocol, request.server) def rules(): return ( *collect_rules(), QueryRule(ProcessResult, (Process, EnvironmentName)), )
# -*- coding:utf-8 -*- from fraction import Fraction def solve_it(a, b): # ax + b = 0 if a != 0: return -b / a elif b != 0: return "No solution" else: return "Any number" if __name__ == '__main__': print(solve_it(Fraction(0, 2), Fraction(0, 4)))
import unittest from katas.kyu_6.reverse_or_rotate import revrot class ReverseOrRotateTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(revrot('123456987654', 6), '234561876549') def test_equals_2(self): self.assertEqual(revrot('123456987653', 6), '234561356789') def test_equals_3(self): self.assertEqual(revrot('66443875', 4), '44668753') def test_equals_4(self): self.assertEqual(revrot('66443875', 8), '64438756') def test_equals_5(self): self.assertEqual(revrot('664438769', 8), '67834466') def test_equals_6(self): self.assertEqual(revrot('123456779', 8), '23456771') def test_equals_7(self): self.assertEqual(revrot('', 8), '') def test_equals_8(self): self.assertEqual(revrot('123456779', 0), '')
import sqlite3,csv from sqlite3 import Error import os FILE = "movies.db" MOVIES_TABLE = "Movies" class Movies: def __init__(self): self.conn = None try: self.conn = sqlite3.connect(FILE) except Error as e: print(e) self.cursor = self.conn.cursor() self._create_table() def _create_table(self): query = f"""CREATE TABLE IF NOT EXISTS {MOVIES_TABLE} (assetid INTEGER PRIMARY KEY,event_count INTEGER,title TEXT)""" self.cursor.execute(query) self.conn.commit() def add_shit(self,id,event,title): query = f"INSERT INTO {MOVIES_TABLE} VALUES (?, ?,?)" self.cursor.execute(query, (id,event,title)) self.conn.commit() def get_by_id(self,id): query = f"SELECT * FROM {MOVIES_TABLE} WHERE assetid = {id}" self.cursor.execute(query) result = self.cursor.fetchone() if not result: return None id,event,title = result return title def get_first_five(self): self.cursor.execute(f"SELECT assetid,event_count,title FROM {MOVIES_TABLE} LIMIT 5") result = self.cursor.fetchall() return result def fill_db(self): with open('/Volumes/MacOS — данные/Users/mac/Pictures/movix_cumansi/movix_cumansi/react-flask-app/api/models/boeviki.csv','r') as fin: dr = csv.DictReader(fin) to_db = [(i['assetid'], i['event_count'],i['title']) for i in dr] query = f"INSERT INTO {MOVIES_TABLE} (assetid, event_count,title) VALUES (?, ?, ?);" self.cursor.executemany(query, to_db) self.conn.commit() db = Movies() print(db.get_first_five())
# Odd numbers in range for i in range(1, 21): if i % 2 != 0: print("The odd number is {}".format(i))
import sublime, sublime_plugin class PrintCodeCommand(sublime_plugin.WindowCommand): def run(self): syntax = self.window.active_view().settings().get('syntax') allString = self.window.active_view().substr(sublime.Region(0, self.window.active_view().size())) newFile = self.window.new_file() newFile.set_syntax_file(syntax) t = 0 # timer a = "add" # add, remove, clear for c in allString: if c == 'é': a = "remove" elif c == 'è': a = "clear" else : a = "add" sublime.set_timeout(lambda char = c, action = a : newFile.run_command("process_char_action",{ "action" : action , "char" : char}) , t*100) t += 1 class ProcessCharActionCommand(sublime_plugin.TextCommand): def run(self, edit, action, char): if action == "add": self.view.insert(edit, self.view.size(), char) elif action == "remove": region = sublime.Region(self.view.size()-1, self.view.size()) self.view.erase(edit, region) elif action == "clear" : region = sublime.Region(0, self.view.size()) self.view.erase(edit, region)
from flask import Flask, request from flask_restful import Resource, Api from sqlalchemy import create_engine from json import dumps import psycopg2 import os SQLALCHEMY_DATABASE_URI = "postgresql+psycopg2://w205:MIDS@localhost/postgres" e = create_engine(SQLALCHEMY_DATABASE_URI) app = Flask(__name__) api = Api(app) class Count(Resource): def get(self, date): conn = e.connect() print date if date.upper() == "GROUP": query = conn.execute("select date(time_first),count(*),type from users group by date(time_first),type;") result = {"boats": [{"date":i[0], "count":i[1], "type":i[2]} for i in query.cursor.fetchall()]} elif date.upper() == "ALL": query = conn.execute("select count(*),type from users group by type;") result = {"boats": [{"count":i[0], "type":i[1]} for i in query.cursor.fetchall()]} else: query = conn.execute("select count(*),type from users where date(time_first) ='%s' group by type;" %date) result = {"boats": [{"count":i[0], "type":i[1]} for i in query.cursor.fetchall()]} print result return result class Query(Resource): def get(self, date): conn = e.connect() print date if date.upper() == "ALL": query = conn.execute("select sum(calls),type from users group by type;") result = {"boats": [{"queries":i[0], "type":i[1]} for i in query.cursor.fetchall()]} else: query = conn.execute("select sum(calls),type from users where date(time_first) ='%s' group by type;" %date) result = {"boats": [{"queries":i[0], "type":i[1]} for i in query.cursor.fetchall()]} print result return result class List(Resource): def get(self, date): conn = e.connect() print date if date.upper() == "ALL": query = conn.execute("select userid from users where type != 'GOOD';") result = {"boats": [{"id":i[0]} for i in query.cursor.fetchall()]} else: query = conn.execute("select userid from users where type != 'GOOD' and date(time_first) ='%s';" %date) result = {"boats": [{"id":i[0]} for i in query.cursor.fetchall()]} print result return result class Data(Resource): def get(self, id): conn = e.connect() print id if id.upper() == "ALL": query = conn.execute("select userid,ipaddress,lon,lat,calls,type,mostwatchedcontent,totaltime,mostwatchedmateches from users;") result = {"boats": [{"queries":i[0], "ipaddress":i[1], "lon":i[2], "lat":i[3], "calls":i[4], "type":i[5], "watched":i[6], "total":i[7], "matches":i[8]} for i in query.cursor.fetchall()]} else: query = conn.execute("select userid,ipaddress,lon,lat,calls,type,mostwatchedcontent,totaltime,mostwatchedmateches from users where userid ='%s';" %id) result = {"boats": [{"queries":i[0], "ipaddress":i[1], "lon":i[2], "lat":i[3], "calls":i[4], "type":i[5], "watched":i[6], "total":i[7], "matches":i[8]} for i in query.cursor.fetchall()]} print result return result api.add_resource(Count, "/boats/count/<string:date>") api.add_resource(Query, "/boats/queries/<string:date>") api.add_resource(List, "/boats/list/<string:date>") api.add_resource(Data, "/boats/data/<string:id>") if __name__ == '__main__': test_con = e.connect() test_query = "SELECT * FROM users LIMIT 1;" test_result = test_con.execute(test_query) print test_result.cursor.fetchall() port = int(os.environ.get("PORT", 8080)) app.run(host='0.0.0.0', port=port, debug=True)
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup with open('README.rst') as readme_file: readme = readme_file.read() with open('HISTORY.rst') as history_file: history = history_file.read() requirements = [ 'Click>=6.0', 'Cerberus==1.0.1', 'lxml', ] test_requirements = [ 'tox', 'pytest', ] setup( name='pycfdi', version='0.1.0', description="A python module to create, manipulate and validate CFDI documents.", long_description=readme + '\n\n' + history, author="Walter Treviño", author_email='walter.trevino@gmail.com', url='https://github.com/wtrevino/pycfdi', packages=[ 'pycfdi', ], package_dir={'pycfdi': 'pycfdi'}, entry_points={ 'console_scripts': [ 'pycfdi=pycfdi.cli:main' ] }, include_package_data=True, install_requires=requirements, license="MIT license", zip_safe=False, keywords=['pycfdi', 'cfdi', 'cfdi sat', 'cfdi comprobante', 'cfdi nómina', 'facturación electrónica', 'e-invoicing', ], classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], test_suite='tests', tests_require=test_requirements )
import logging logging.basicConfig(format='%(asctime)s %(name)s %(filename)s@%(lineno)d %(levelname)s %(message)s', level=logging.INFO) log = logging.getLogger('speed-cam')
# -*- coding: utf-8 -*- from collections import Counter class Solution: def numFriendRequests(self, ages): result = 0 counts = Counter(ages) for age1, count1 in counts.items(): for age2, count2 in counts.items(): if age1 // 2 + 7 < age2 <= age1: result += count1 * (count2 - (1 if age1 == age2 else 0)) return result if __name__ == "__main__": solution = Solution() assert 2 == solution.numFriendRequests([16, 16]) assert 2 == solution.numFriendRequests([16, 17, 18]) assert 3 == solution.numFriendRequests([20, 30, 100, 110, 120]) assert 3 == solution.numFriendRequests([108, 115, 5, 24, 82])
from django.db import models from geopy.geocoders import Nominatim def location(): geolocator = Nominatim(user_agent="https://ba531876.ngrok.io") loc = geolocator.geocode("5 Shaaban Robert St, Dar es Salaam") class Organizer(models.Model): name = models.CharField(max_length=255,null=False,blank=False) title = models.CharField(max_length=255,null=False) location_address = models.CharField(max_length=255,null=False) member_since = models.DateTimeField(verbose_name='Account Since', auto_now_add=True,null=False) bio = models.CharField(max_length=255) profile_picture = models.ImageField(verbose_name='Profile Image',null=False) def __str__(self): return self.name class GroupDetails(models.Model): group_name = models.CharField(max_length=255,null=False) group_location = models.CharField(max_length=255,null=False, blank=False) total_members = models.IntegerField() organizers = models.ManyToManyField(Organizer) group_about = models.CharField(max_length=255,null=False) thumbnail = models.ImageField(verbose_name='Group Picture', null=False) def __str__(self): return self.group_name
import pandas as pd import csv import plotly.express as px df=pd.read_csv("data.csv") mean=df.groupby(["student_id","level"],as_index=False)["attempt"].mean() fig=px.scatter(mean,x="student_id",y="level",size="attempt",color="attempt") fig.show()
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import errno import logging import os import sys import psutil from fasteners import InterProcessLock from pants.util.dirutil import safe_delete logger = logging.getLogger(__name__) def print_to_stderr(message): print(message, file=sys.stderr) class OwnerPrintingInterProcessFileLock(InterProcessLock): @property def message_path(self): return f"{self.path_str}.lock_message" @property def path_str(self): return self.path.decode() @property def missing_message_output(self): return f"Pid {os.getpid()} waiting for a file lock ({self.path_str}), but there was no message at {self.message_path} indicating who is holding it." def acquire(self, message_fn=print_to_stderr, **kwargs): logger.debug(f"acquiring lock: {self!r}") super().acquire(blocking=False) if not self.acquired: try: with open(self.message_path, "rb") as f: message = f.read().decode("utf-8", "replace") output = f"PID {os.getpid()} waiting for a file lock ({self.path_str}) held by: {message}" except OSError as e: if e.errno == errno.ENOENT: output = self.missing_message_output else: raise message_fn(output) super().acquire(**kwargs) if self.acquired: current_process = psutil.Process() cmd_line = " ".join(current_process.cmdline()) message = f"{current_process.pid} ({cmd_line})" with open(self.message_path, "wb") as f: f.write(message.encode()) return self.acquired def release(self): logger.debug(f"releasing lock: {self!r}") if self.acquired: safe_delete(self.message_path) return super().release()
def is_valid(x, y): if (0 <= x < 8) and (0 <= y < 8): return True return False def minimum_steps(start_x, start_y, target_x, target_y): q = [] arr_x = [-2, -1, 1, 2, 2, 1, -1, -2] arr_y = [1, 2, 2, 1, -1, -2, -2, -1] rows = 8 column = 8 arr = [[-1 for i in range(column)] for j in range(rows)] arr[start_x][start_y] = 0 if start_x == target_x and start_y == target_y: return arr[start_x][start_y] q.append([start_x, start_y]) while len(q): a, b = q[0][0], q[0][1] for i in range(0, 8): if is_valid(a + arr_x[i], b + arr_y[i]): if a + arr_x[i] == target_x and b + arr_y[i] == target_y: return arr[a][b] + 1 else: q.append([a + arr_x[i], b + arr_y[i]]) if arr[a + arr_x[i]][b + arr_y[i]] == -1: arr[a + arr_x[i]][b + arr_y[i]] = arr[a][b] + 1 q.pop(0) start_x, start_y = 3, 4 target_x, target_y = 3, 3 print(minimum_steps(start_x, start_y, target_x, target_y))
# this script is used to create new dataset with entities and positions import json from data_loader import remove_invalid_token, remove_return_sym, lower_case from tqdm import tqdm fewrel_train_file = './dataset/fewrel/train_wiki.json' fewrel_valid_file = './dataset/fewrel/val_wiki.json' fewrel_relation_file = './dataset/fewrel/all_relations.json' with open(fewrel_train_file, 'r') as f: fewrel_train_data = json.load(f) with open(fewrel_valid_file, 'r') as f: fewrel_valid_data = json.load(f) with open(fewrel_relation_file, 'r') as f: fewrel_relations = json.load(f) id2rel = {} rel2id = {} for wiki_rel_id in fewrel_relations: idx = int(fewrel_relations[wiki_rel_id]['index']) + 1 id2rel[idx] = wiki_rel_id rel2id[wiki_rel_id] = idx lifelong_fewrel_train_file = './dataset/training_data.txt' lifelong_fewrel_valid_file = './dataset/val_data.txt' lifelong_fewrel_train_file_with_entity = './dataset/training_data_with_entity.txt' lifelong_fewrel_valid_file_with_entity = './dataset/test_data_with_entity.txt' lifelong_fewrel_train_data = [] lifelong_fewrel_valid_data = [] # lifelong_fewrel_train_data_with_entity = [] # lifelong_fewrel_valid_data_with_entity = [] with open(lifelong_fewrel_train_file, 'r') as f: for line in f: lifelong_fewrel_train_data.append(line) print('Load %d training instance' % len(lifelong_fewrel_train_data)) with open(lifelong_fewrel_valid_file, 'r') as f: for line in f: lifelong_fewrel_valid_data.append(line) print('Load %d validation instance' % len(lifelong_fewrel_valid_data)) def process_lifelong_data(fewrel_data): data_with_entity = [] for i in tqdm(range(len(fewrel_data))): line = fewrel_data[i] items = line.split('\t') rel_idx = int(items[0]) # int candidate_rel_idx = [int(idx) for idx in items[1].split()] tokens = remove_invalid_token(remove_return_sym(items[2]).split()) if rel_idx in id2rel: wiki_rel_id = id2rel[rel_idx] else: print('relation %s not found' % rel_idx) if wiki_rel_id in fewrel_train_data: instance_pool = fewrel_train_data[wiki_rel_id] elif wiki_rel_id in fewrel_valid_data: instance_pool = fewrel_valid_data[wiki_rel_id] else: print('wiki relation %s not found' % wiki_rel_id) current_instance = None for instance_item in instance_pool: _tokens = lower_case(remove_invalid_token(instance_item['tokens'])) if ' '.join(_tokens) == ' '.join(tokens): current_instance = instance_item break if current_instance is None: print('instance: [%s] not found' % line) h = current_instance['h'][0] h_pos = current_instance['h'][-1] t = current_instance['t'][0] t_pos = current_instance['t'][-1] item_with_entity = [rel_idx, candidate_rel_idx, tokens, h, h_pos, t, t_pos] data_with_entity.append(item_with_entity) return data_with_entity print('processing training data') lifelong_fewrel_train_data_with_entity = process_lifelong_data(lifelong_fewrel_train_data) print('processing validation data') lifelong_fewrel_valid_data_with_entity = process_lifelong_data(lifelong_fewrel_valid_data) def dump_new_data(data_with_entity, file_path): with open(file_path, 'w') as f: for i in tqdm(range(len(data_with_entity))): item = data_with_entity[i] f.writelines('%d\t%s\t%s\t%s\t%s\t%s\t%s\n' % ( item[0], ' '.join([str(cand_rel) for cand_rel in item[1]]), ' '.join(item[2]), item[3], ' '.join([str(pos) for pos in item[4][0]]), item[5], ' '.join([str(pos) for pos in item[6][0]]), )) print('dump training data with entity') dump_new_data(lifelong_fewrel_train_data_with_entity, lifelong_fewrel_train_file_with_entity) print('dump valid data with entity') dump_new_data(lifelong_fewrel_valid_data_with_entity, lifelong_fewrel_valid_file_with_entity)
if __name__ == '__main__': s = input() alphanumeric =False alphabetical= False digits = False lowercase = False uppercase= False for letter in s : alphanumeric =alphanumeric or letter.isalnum() alphabetical= alphabetical or letter.isalpha() digits = digits or letter.isdigit() lowercase = lowercase or letter.islower() uppercase= uppercase or letter.isupper() print(alphanumeric) print(alphabetical) print(digits) print(lowercase) print(uppercase)
aa,bb=list(map(int,input().split())) ll=list(map(int,input().split())) for i in range(bb): q,s=list(map(int,input().split())) print(min(ll[q-1:s]))
from Sieve_of_Eratosthenes import * from Miller_Test import * # list of primes till 100 primes = sieve_of_eratosthenes(100) for p in primes: if miller_test(p, 50): print(f'{p} is prime') else: print(f'{p} is composite')
import aStarHelperFunctions ''' * Function Name: a_star_search() --> Performs A* path finding for the passed nodes * Input: (startNode, goalNode, traversable_nodes, current_dir) --> * Start of the path, Goal of the path, List of passable(traversable) nodes and current facing direction * Output: -1 if no path is found, else: path cost, one node before goal(for pickup), path, final facing direction * Logic: The standard A* algorithm is modified in which g_cost is determined from the node_to_node and turn costs, and * directions are stored along with path for the next loop run * Example Call: a_star_search(33, 17, traversable_nodes, 'N') ''' def a_star_search(startNode, goalNode, traversable_nodes, current_dir): openNodes = [] closedNodes = [] traversable_nodes.append(startNode) traversable_nodes.append(goalNode) pathList = [] # Final output list the path nodeParents = {} # Stores node : parent(node from which it comes) fCost = {} # Stores node : fCost gCost = {} # Stores node : gCost nodeDirections = {} # Direction at every node botDirections = [] # Directional commands for bot # initial fCost = gCost + hCost # fCost[startNode] = 0 + node_distance(startNode, goalNode) * cell_to_cell_cost gCost[startNode] = 0 # Stores turn direction required at each node nodeDirections[startNode] = current_dir openNodes.append(startNode) while 1: fCostCompare = 9999 # large value to find minimum currentNode = 0 if len(openNodes) == 0: # If object not reachable traversable_nodes.remove(startNode) # rectify the modified list traversable_nodes.remove(goalNode) return -1 for node in openNodes: # Set node with least fCost as current if fCost[node] < fCostCompare: currentNode = node fCostCompare = fCost[node] openNodes.remove(currentNode) # Move from open to closed closedNodes.append(currentNode) if currentNode == goalNode: # If reached goal break neighbours = getNeighbouringNodes(currentNode) # For every neighbour get fCost and append to open nodes if valid--- # for j in neighbours: if j not in traversable_nodes or j in closedNodes: continue # get the turnRequired, costs and directions to get from currentNode to neighbour turnRequired, turnCost, nextDir = calculateTurn(nodeDirections[currentNode][0], currentNode, j) tentativeGCost = gCost[currentNode] + turnCost + node_distance(currentNode, j) * cell_to_cell_cost if j not in openNodes: openNodes.append(j) elif tentativeGCost >= gCost[j]: # skip this continue # Store costs and update parent, directions gCost[j] = tentativeGCost fCost[j] = gCost[j] + node_distance(j, goalNode) * cell_to_cell_cost nodeParents[j] = currentNode nodeDirections[j] = [nextDir, turnRequired] pathNode = goalNode pathList.append(goalNode) # Form path list by retracing path while pathNode != startNode: pathNode = nodeParents[pathNode] pathList.append(pathNode) pathList.reverse() print 'final path list : ', pathList # create a directions_list for bot commands for j in range(0, len(pathList) - 1): contents = nodeDirections[pathList[j + 1]] botDirections.append(contents[1]) pickup_node_in_astar = pathList[-1] final_facing_dir = nodeDirections[goalNode][0] # bot_facing_direction at the end of path traversable_nodes.remove(startNode) # rectify the modified list traversable_nodes.remove(goalNode) return fCost[goalNode], pickup_node_in_astar, botDirections, final_facing_dir # ------------------------------------------------------------------- #
import os from fabric.api import sudo from fabtools import files def upload_template(p, dest, ctx, mode='644'): files.upload_template( p.name, dest, ctx, use_jinja=True, template_dir=str(p.parent), use_sudo=True, mode=mode, backup=False, chown=True) def enable(app, d): """ Install systemd units for an app. Installs and enables systemd units, e.g. services and/or timers required by an app. Each unit is expected to be specified by a subdirectory of `d`, with files - `service` - `timer` - `script` These files are treated as Jinja tamplates, rendered and put in - /etc/systemd/system (for timers and services) and - /user/bin for scripts. The template context available within the files is - `app`: The App instance, the unit is used for - `script_path`: The path of the associated script on the target system. """ if d.exists() and d.name == 'systemd': for unit in d.iterdir(): ctx = dict(app=app, osenv=os.environ) script = unit / 'script' if script.exists(): ctx['script_path'] = script_path = '/usr/bin/{0}-{1}'.format(app.name, unit.name) upload_template(script, script_path, ctx, mode='755') enable = 'service' for name in ['service', 'timer']: if name == 'timer': enable = name p = unit / name if p.exists(): upload_template( p, '/etc/systemd/system/{0}-{1}.{2}'.format(app.name, unit.name, name), ctx) sudo('systemctl start {0}-{1}.{2}'.format(app.name, unit.name, enable)) sudo('systemctl enable {0}-{1}.{2}'.format(app.name, unit.name, enable)) sudo('systemctl daemon-reload') def uninstall(app, d): if d.exists() and d.name == 'systemd': for unit in d.iterdir(): delete = ['/usr/bin/{0}-{1}'.format(app.name, unit.name)] enable = 'service' for name in ['service', 'timer']: if name == 'timer': enable = name delete.append('/etc/systemd/system/{0}-{1}.{2}'.format(app.name, unit.name, name)) sudo('systemctl stop {0}-{1}.{2}'.format(app.name, unit.name, enable)) sudo('systemctl disable {0}-{1}.{2}'.format(app.name, unit.name, enable)) for p in delete: if files.exists(p): sudo('rm {0}'.format(p)) sudo('systemctl daemon-reload') sudo('systemctl reset-failed')
import numpy import json import cv2 import numpy as np import os import scipy.misc as misc # Add Ignore to vessel ############################################################################################# def show(Im): cv2.imshow("show",Im.astype(np.uint8)) cv2.waitKey() cv2.destroyAllWindows() ############################################################################################### def AddIgnore(InDir): ooo=0 for DirName in os.listdir(InDir): print(ooo) ooo+=1 DirName=InDir+"//"+DirName Im = cv2.imread(DirName + "/Image.png") Ignore0 = cv2.imread(DirName + "/Ignore.png",0) Ignore=(Ignore0==3) if Ignore.sum()==0: continue #========================================================================= Im[:, :, 0] *= 1 - Ignore.astype(np.uint8) Im[:, :, 1] *= 1 - Ignore.astype(np.uint8) cv2.imshow(" i (ignore) c(correct)", Im) while (True): ch = chr(cv2.waitKey()) if ch == 'i' or ch == 'c': break cv2.destroyAllWindows() if ch == 'i': continue Ignore0[Ignore]=2 cv2.imwrite(DirName + "/Ignore.png", Ignore0) #========================================================================= InDir=r"C:\Users\Sagi\Desktop\NewChemistryDataSet\NewFormat\Instance\\" AddIgnore(InDir)
# Modifications Copyright 2016-2017 Reddit, Inc. # # Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup, find_packages setup( name="cqlmapper", version="0.2.1", packages=find_packages(), install_requires=[ "cassandra-driver", "six>=1.6", ], include_package_data=True, tests_require=[ "mock", "nose", "coverage", ], )
__author__ = 'apple' ## Get Shapefile Fields - Get the user defined fields from osgeo import ogr daShapefile = r"ne1.shp" dataSource = ogr.Open(daShapefile) daLayer = dataSource.GetLayer(0) layerDefinition = daLayer.GetLayerDefn() for i in range(layerDefinition.GetFieldCount()): print layerDefinition.GetFieldDefn(i).GetName()
import pandas as pd from main import get_data,get_commu_info,headers from lxml import etree import re from get_commu_list_lianjia import get_commu_ls_page # 安居客根据更细致的细分区域获取更完整的小区列表 # 获取浦东的板块名称列表 def get_bankuai_info(url): """拿到一个区的url,获取这个区的细分板块名称和url""" res_elements, _ = get_data(url, headers) urls = res_elements.xpath('//div[@class="position"]/dl[last()]/dd/div/div[last()]/a/@href') names = res_elements.xpath('//div[@class="position"]/dl[last()]/dd/div/div[last()]/a/text()') res = {} for name,url in zip(names,urls): res[name] = 'https://sh.ke.com' + url return res def get_commu_num(url): """拿到板块对应的url后,获取该板块下有多少小区,以均订有多少页""" res_elements, res_text = get_data(url, headers) # print(res_text) ss = res_elements.xpath('//div[@data-component="listOverview"]/h2/span')[0] commu_num = etree.tostring(ss, encoding='utf-8').decode() commu_num = re.findall('.*<span>(.+)</span>.*',commu_num)[0].strip() return int(commu_num) def get_comm_ls_pg(pg_url): """获取一页中所有的小区名称和信息""" res = [] res_elements, res_text = get_data(pg_url, headers) commu_names = res_elements.xpath('//ul[@class="listContent"]/li/a/@title') urls = res_elements.xpath('//ul[@class="listContent"]/li/a/@href') response_ids = res_elements.xpath('//ul[@class="listContent"]/li/@data-id') for commu_info in zip(commu_names,response_ids,urls): res.append(list(commu_info)) df_res = pd.DataFrame(res,columns=['小区名称','response_id','url']) return df_res def get_commu_ls_region(url,name_qu): res_df = pd.DataFrame() # 获取浦东的板块名 bankuai_dic = get_bankuai_info(url) print(bankuai_dic) # 循环获取每个板块的小区名称 for name, url in bankuai_dic.items(): # 获取该板块的小区数 commu_num = get_commu_num(url) print(f"{name}板块共有{commu_num}个小区".center(80, '-')) pg_nums = int(commu_num / 30) + 2 # 循环获取该板块的所有小区 for pg_num in range(1, pg_nums): pg_url = url + f'pg{pg_num}/' print(pg_url) # 获取一个页面的小区列表 n=0 while n<=10: try: commus_pg_df = get_comm_ls_pg(pg_url) except: n+=1 break if n==11: continue commus_pg_df['板块名称'] = name res_df = res_df.append(commus_pg_df) print(f"目前已拿到{res_df.shape[0]}个小区") res_df['区名'] = name_qu res_df.to_csv(f'./data/{name_qu}小区列表.csv',index=False) return res_df def get_region_info(): """获取上海所有区的区名和对应url""" url = 'https://sh.ke.com/xiaoqu/' res = {} res_elements, res_text = get_data(url, headers) region_names = res_elements.xpath('//div[@data-role="ershoufang"]/div/a/text()')[:-1] urls = res_elements.xpath('//div[@data-role="ershoufang"]/div/a/@href')[:-1] # ss = etree.tostring(ss, encoding='utf-8').decode() print(region_names) print(urls) for name,url in zip(region_names,urls): res[name] = 'https://sh.ke.com'+url return res if __name__=="__main__": # url = 'https://sh.ke.com/xiaoqu/pudong/' # get_commu_ls_region(url) # 获取区名和对应的url # res_df = pd.DataFrame() # regions = get_region_info() # for name_qu , url_qu in regions.items(): # print(f'正在获取{name_qu}的小区名称'.center(90,'*')) # df_qu = get_commu_ls_region(url_qu,name_qu) # res_df = res_df.append(df_qu) # res_df.to_csv('./data/上海市小区名称列表_链家.csv',index=False) import os ls =[] for name in os.listdir('./data'): if re.findall('.*小区列表.*',name): name = os.path.join('./data',name) df_t = pd.read_csv(name) ls.append(df_t) df = pd.concat(ls,axis=0,ignore_index=True) print(df.head()) df.to_csv('./data/上海市小区名称列表_链家.csv',index=False,encoding='utf8') df.to_excel('./data/上海市小区名称列表_链家.xlsx', index=False)
#!/usr/bin/env python # -*- coding:utf-8 -*- import discord import json from DrinkShop import runLoki with open("account.info", encoding="UTF-8") as f: accountDICT = json.loads(f.read()) class BotClient(discord.Client): async def on_ready(self): print('Logged on as {} with id {}'.format(self.user, self.user.id)) async def on_message(self, message): # Don't respond to bot itself. Or it would create a non-stop loop. # 如果訊息來自 bot 自己,就不要處理,直接回覆 None。不然會 Bot 會自問自答個不停。 if message.author == self.user: return None print("到到來自 {} 的訊息".format(message.author)) print("訊息內容是 {}。".format(message.content)) if self.user.mentioned_in(message): print("本 bot 被叫到了!") msg = message.content.replace("<@!{}> ".format(self.user.id), "") if msg == 'ping': await message.reply('pong') elif msg == 'ping ping': await message.reply('pong pong') elif msg == '想喝奶茶了': await message.reply('別喝啦!!') elif msg == '今天晚餐你有頭緒嗎?': await message.reply('別吃省錢最快') elif msg == '想看貓貓': file = discord.File(r"./img/cat.png", filename="cat.png") await message.channel.send(file=file) elif msg == '想不到晚餐吃什麼': file = discord.File(r"./img/first.png", filename="first.png") await message.channel.send(file=file) elif msg == '聽說明天情人節?': file = discord.File(r"./img/dog.png", filename="dog.png") await message.channel.send(file=file) else: # 從這裡開始接上 NLU 模型 responseSTR = "聽不懂你在說什麼?" inputLIST = [msg] filterLIST = [] resultDICT = runLoki(inputLIST, filterLIST) print("Result => {}".format(resultDICT)) if "sweetness" not in resultDICT: resultDICT["sweetness"] = "正常糖" if "ice" in resultDICT and "hot" in resultDICT: responseSTR = "請問是要喝熱飲還是冰飲呢?" if "ice" not in resultDICT and "hot" not in resultDICT: resultDICT["ice"] = "正常冰" if "ice" in resultDICT and "hot" not in resultDICT: responseSTR = "hello~ \n您點的總共是:\n" for k in range(0, len(resultDICT["amount"])): responseSTR = responseSTR + "{} X {} ({}、{})\n".format(resultDICT["item"][k], resultDICT["amount"][k], resultDICT["sweetness"][k], resultDICT["ice"][k]) responseSTR = responseSTR + "謝謝您的訂購\n" if "hot" in resultDICT and "ice" not in resultDICT: responseSTR = "hello~~ \n您點的總共是:\n" for k in range(0, len(resultDICT["amount"])): responseSTR = responseSTR + "{} X {} ({}、{})\n".format(resultDICT["item"][k], resultDICT["amount"][k], resultDICT["sweetness"][k], resultDICT["hot"][0]) responseSTR = responseSTR + "謝謝您的訂購\n" if "Nothing" in resultDICT["item"]: responseSTR = "我們沒有販售該產品,請選擇其他飲品" await message.reply(responseSTR) if __name__ == "__main__": client = BotClient() client.run(accountDICT["discord_token"])
#!/usr/bin/env python3 import wpilib as w from time import sleep class Sparky(w.IterativeRobot): def robotInit(self): # Motors to PWM channels l_motor = w.Talon(0) r_motor = w.Talon(1) self.lift_motor = w.VictorSP(2) # Drivetrain control self.drivetrain = w.RobotDrive(l_motor, r_motor) # Joystick self.js0 = w.Joystick(0) # Xbox controller self.js1 = w.Joystick(1) # Joystick def disabledInit(self): pass def disabledPeriodic(self): self.drivetrain.drive(0, 0) def autonomousInit(self): self.move = self.drivetrain.drive self.moving = True self.i = 0 self.length = 130 # How long the robot will run at max speed (100 ~= 2 sec) self.max_speed = 5 # Maximum speed (1-10) def autonomousPeriodic(self): while self.moving: for x in range(self.max_speed): self.move(-x/10, 0) sleep(0.1) if x == (self.max_speed - 1): self.moving = False if self.i < self.length: self.move(-self.max_speed/10, 0) self.i += 1 elif self.i == self.length: for x in reversed(range(self.max_speed - 1)): self.move(-x/10, 0) sleep(0.1) if x == 1: self.move(0, 0) self.i += 1 else: self.move(0, 0) def teleopInit(self): pass def teleopPeriodic(self): while self.isOperatorControl() and self.isEnabled(): self.drivetrain.arcadeDrive(self.js0) if self.js1.getRawButton(3) or self.js0.getRawButton(2): self.lift_motor.set(-0.8) elif self.js1.getRawButton(4) or self.js0.getRawButton(1): self.lift_motor.set(0.6) else: self.lift_motor.set(0) if __name__ == "__main__": w.run(Sparky)
from PyQt5.QtWidgets import QWidget from PyQt5.QtGui import QPainter, QPen, QBrush from PyQt5.QtCore import Qt from PyQt5.QtCore import QTimer import random import Object import Physik class Game(QWidget): def __init__(self, UI, number_of_objects): super().__init__() self.title = "Game" self.top = 150 self.left = 150 self.width = UI.windowWidth self.height = UI.windowHeight self.InitWindow() self.x = 40 self.y = 40 self.timerSetup() self.objects = [] for i in range(number_of_objects.value()): print(random.randint(0,500)) obj = Object.Objekt("Rechteck") obj.position = (random.randint(0,500),random.randint(0,500)) obj.mass = random.randint(0,10000) self.objects.append(obj) self.objects[0].velocity = (0.5,0) self.physik_engine = Physik.Engine() # Erzeugt die Physik_Engine auf """ self.rectangle = Object.Objekt("Rechteck") self.rectangle2 = Object.Objekt("Rechteck") self.rectangle3 = Object.Objekt("Rechteck") self.rectangle2.position = (300,300) self.rectangle3.position = (100,300) self.rectangle3.mass = 5000 self.rectangle2.velocity = 2 self.objects.append(self.rectangle) self.objects.append(self.rectangle2) self.objects.append(self.rectangle3) """ def timerSetup(self): self.timer = QTimer(self) self.timer.setInterval(10) self.timer.timeout.connect(self.game) self.timer.start() def InitWindow(self): self.setWindowTitle(self.title) self.setGeometry(self.top, self.left, self.width, self.height) self.show() def game(self): for obj in self.objects: obj.update() self.update() self.physik_engine.check_physics(self, self.objects) # ruft ein update der Physik_Engine auf print(len(self.objects)) """def changepos(self): self.x += 1 self.y += 1 self.update()""" def paintEvent(self, event): painter = QPainter(self) painter.setPen(QPen(Qt.green, 8, Qt.SolidLine)) painter.setBrush(QBrush(Qt.green, Qt.SolidPattern)) #painter.drawRect(self.rectangle.rect) #painter.drawEllipse(self.obj.pos[0], self.obj.pos[1], 100, 100) for obj in self.objects: painter.drawEllipse(obj.rect)
# Generated by Django 3.0.1 on 2019-12-29 07:04 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='DistributorInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('distributor_name', models.CharField(max_length=35)), ('distributor_type', models.CharField(choices=[('Distributor', 'Distributor'), ('Supermarkets', 'Supermarkets'), ('Food & Restaurent', 'Food & Restaurent')], max_length=20)), ('address', models.CharField(max_length=200)), ('status', models.BooleanField(default=True)), ], options={ 'verbose_name': 'Distributor Information', 'verbose_name_plural': 'Distributor Informations', }, ), migrations.CreateModel( name='UserInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user_name', models.CharField(max_length=30, unique=True)), ('user_type', models.CharField(max_length=10)), ('user_full_name', models.CharField(max_length=80)), ('user_email', models.CharField(max_length=80, unique=True)), ('user_password', models.CharField(max_length=150)), ('user_mobile', models.CharField(max_length=15)), ('created_date', models.DateTimeField(auto_now_add=True)), ('status', models.BooleanField(default=True)), ], options={ 'verbose_name': 'User List', 'verbose_name_plural': 'User Lists', }, ), migrations.CreateModel( name='InspectionInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('serial_number', models.BigIntegerField(default=0)), ('action_taken', models.CharField(blank=True, max_length=30, null=True)), ('option_one', models.BooleanField(default=False)), ('option_one_img', models.ImageField(blank=True, upload_to='inspection_img')), ('option_two', models.BooleanField(default=False)), ('option_two_img', models.ImageField(blank=True, upload_to='inspection_img')), ('option_three', models.BooleanField(default=False)), ('option_three_img', models.ImageField(blank=True, upload_to='inspection_img')), ('option_four', models.BooleanField(default=False)), ('option_four_img', models.ImageField(blank=True, upload_to='inspection_img')), ('option_five', models.BooleanField(default=False)), ('option_five_img', models.ImageField(blank=True, upload_to='inspection_img')), ('option_six', models.BooleanField(default=False)), ('option_six_img', models.ImageField(blank=True, upload_to='inspection_img')), ('fine_amount', models.FloatField(default=0)), ('issue_warning', models.CharField(blank=True, max_length=30, null=True)), ('inspection_date', models.DateTimeField()), ('inserted_date', models.DateTimeField(auto_now_add=True)), ('status', models.BooleanField(default=True)), ('distributor_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.DistributorInfo')), ('user_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.UserInfo')), ], options={ 'verbose_name': 'Inspection Information', 'verbose_name_plural': 'Inspection Informations', }, ), ]
import psycopg2 from api.constants import POSTGRES_ADAPTER def connect_to_postgres(host, port, username, password, db_name, **kwargs): try: conn = psycopg2.connect( host=host, user=username, password=password, dbname=db_name, port=port ) except Exception as e: print(str(e)) return conn DB_MAP_CONNECTOR = { POSTGRES_ADAPTER: connect_to_postgres }
''' This function calculates Fibonocci number at nth place using Dynamic Programming ''' fibHash = {} def fibDP(n): if n == 1: return 1 if n == 0: return 0 if n in fibHash: print "resued" return fibHash[n] # store the computed value in Hash table # Memoization fibHash[n-1] = fibDP(n-1) fibHash[n-2] = fibDP(n-2) print fibHash return fibDP(n-1) + fibDP(n-2) print fibDP(10)
import glob import os import unittest import tempfile import pytouch if __name__ == '__main__': class BaseDirectoryPathTest(unittest.TestCase): def test_base_path(self): name = 'test usr/local' base_path = '/usr/local' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path) self.assertEqual(base_path, bp.base_path) def test_name(self): name = 'test usr/local' base_path = '/usr/local' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path) self.assertEqual(name, bp.name) def test_empty_tail(self): name = 'test usr/local' base_path = '/usr/local' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path) self.assertEqual(None, bp.tail) def test_non_empty_tail(self): name = 'test usr/local' base_path = '/usr/local' tail = 'bin' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path, tail=tail) self.assertEqual(tail, bp.tail) def test_get_full_path(self): name = 'test usr/local' base_path = '/usr/local' tail = 'bin' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path, tail=tail) self.assertEqual('/usr/local/bin', bp.get_full_path()) def test_is_symlink(self): base_path = '/tmp' tail = 'blablah.symlink' name = 'blah blah symlink' regular_file = '/tmp/blah' # clean up from old tests if os.path.isfile(regular_file): os.remove(regular_file) b = pytouch.BaseDirectoryPath(name=name, base_path=base_path, tail=tail) # clean up from old tests if os.path.islink(b.get_full_path()): os.remove(b.get_full_path()) os.system('touch {}'.format(regular_file)) self.assertTrue(os.path.isfile(regular_file)) self.assertTrue(not os.path.islink(b.get_full_path())) os.symlink(regular_file, b.get_full_path()) self.assertTrue(os.path.islink(b.get_full_path())) self.assertTrue(b.is_symlink()) os.remove(regular_file) os.remove(b.get_full_path()) def test_remove(self): name = 'blah' base_path = '/tmp' tail = 'blah' regular_file = '/tmp/blah' # clean up from old tests if os.path.isfile(regular_file): os.remove(regular_file) os.system('touch {}'.format(regular_file)) self.assertTrue(os.path.isfile(regular_file)) b = pytouch.BaseDirectoryPath(name=name, base_path=base_path, tail=tail) b.remove() self.assertTrue(not os.path.islink(b.get_full_path())) class PythonEnvironmentBinaryTest(object): def test_base_path(self): name = 'test usr/local' base_path = '/usr/local' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path) self.assertEqual(base_path, bp.base_path) def test_name(self): name = 'test usr/local' base_path = '/usr/local' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path) self.assertEqual(name, bp.name) def test_empty_tail(self): name = 'test usr/local' base_path = '/usr/local' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path) self.assertEqual(None, bp.tail) def test_non_empty_tail(self): name = 'test usr/local' base_path = '/usr/local' tail = 'bin' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path, tail=tail) self.assertEqual(tail, bp.tail) def test_get_full_path(self): name = 'test usr/local' base_path = '/usr/local' tail = 'bin' bp = pytouch.BaseDirectoryPath(name=name, base_path=base_path, tail=tail) self.assertEqual('/usr/local/bin', bp.get_full_path()) class PythonEnvironmentTest(unittest.TestCase): def test_base_path(self): base_path = '/tmp/test.symlink' version = '2.7.3' p = pytouch.PythonEnvironment(base_path=base_path, version=version) self.assertEqual(p.base_path, base_path) def test_version(self): base_path = '/tmp/test.symlink' version = '2.7.3' p = pytouch.PythonEnvironment(base_path=base_path, version=version) self.assertEqual(p.version, version) def test_compile_paths_binaries(self): base_path = '/tmp/test.symlink' version = '2.7.3' p = pytouch.PythonEnvironment(base_path=base_path, version=version) self.assertEqual(len(p.binaries), 4) def test_compile_paths_binaries_name_equal(self): base_path = '/tmp/test.symlink' version = '2.7.3' p = pytouch.PythonEnvironment(base_path=base_path, version=version) for binary in p.binaries: self.assertIn(binary.name, [p['name'] for p in pytouch.PythonEnvironment.INIT_BINARIES]) def test_compile_paths_binaries_name_equal(self): base_path = '/tmp/test.symlink' version = '2.7.3' p = pytouch.PythonEnvironment(base_path=base_path, version=version) for binary in p.binaries: self.assertIn(binary.name, [p['name'] for p in pytouch.PythonEnvironment.INIT_BINARIES]) def test_compile_paths_binaries_tail_equal(self): base_path = '/tmp/test.symlink' version = '2.7.3' p = pytouch.PythonEnvironment(base_path=base_path, version=version) for binary in p.binaries: self.assertIn(binary.tail, [p['tail'] for p in pytouch.PythonEnvironment.INIT_BINARIES]) def test_iter(self): base_path = '/tmp/test.symlink' version = '2.7.3' p = pytouch.PythonEnvironment(base_path=base_path, version=version) self.assertTrue(iter(p)) class PythonSymlinkerTest(unittest.TestCase): def test_base_path(self): base_path = '/tmp' version = '2.7.1' env = pytouch.PythonEnvironment(base_path=base_path, version=version) sym = pytouch.PythonSymlinker(base_path=base_path, python_environment=env) self.assertEqual(sym.base_path, base_path) def test_python_environment(self): base_path = '/tmp' version = '2.7.1' env = pytouch.PythonEnvironment(base_path=base_path, version=version) sym = pytouch.PythonSymlinker(base_path=base_path, python_environment=env) self.assertEqual(sym.python_environment, env) def test_symlink_no_files_in_dir(self): base_python_path = os.path.expanduser( '~/.local/usr/local/python-2.7.5/') base_path = '/tmp' base_path_prefix = 'test_pytouch' base_path_abs = os.path.join(base_path, base_path_prefix) if not os.path.isdir(base_path_abs): os.mkdir(base_path_abs) env = pytouch.PythonEnvironment(base_path=base_python_path, version='2.7.5') sym = pytouch.PythonSymlinker(base_path=base_path_abs, python_environment=env) sym.symlink() for binary in sym.python_environment.binaries: self.assertTrue(os.path.islink( sym.generate_path(binary.get_file_name()))) if os.path.isdir(base_path_abs): for binary in sym.get_symlink_paths(): os.remove(binary.get_full_path()) os.rmdir(base_path_abs) def test_symlink_existing_files_in_dir(self): pass unittest.main()
''' zip(*strs) : 星号用来将列表分离成一个个元素,zip()将所有可遍历参数对应的位置的元素组合成元组。如zip([1,2],[2,3],[3,4]) = (1,2,3),(2,3,4) 长度由最短的元素的长度决定. enumerate()将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列。enumerate(zip([1,2],[2,3],[3,4]))= 0 (1, 2, 3) 1 (2, 3, 4) 这里就将每个单词的相同位置的字母取出组成元组,然后set()来判断是否是相同的 ''' class Solution: # @return a string def longestCommonPrefix(self, strs): if not strs: return "" for i, letter_group in enumerate(zip(*strs)): if len(set(letter_group)) > 1: return strs[0][:i] else: return min(strs)
import csv from collections import namedtuple from datetime import datetime from typing import Tuple class DataPoint(namedtuple('DataPoint', ['date', 'value'])): __slots__ = () def __le__(self, other): return self.value <= other.value def __lt__(self, other): return self.value < other.value def __gt__(self, other): return self.value > other.value def read_prices(csvfile, _strptime=datetime.strptime): with open(csvfile) as infile: reader = csv.DictReader(infile) for row in reader: yield DataPoint(date=_strptime(row['Date'], '%Y-%m-%d').date(), value=float(row['Adj Close'])) prices = tuple(read_prices('SP500.csv')) print(prices)
#判断用户登录 #先定义数据库里的用户名密码 uid = "admin123" password = "123456" #再输入用户名密码 username = input("输入用户名:") passwd = input("输入密码:") if username != "" and passwd != "": if username == uid and passwd == password: print("登陆成功!") else: print("用户名或密码错误!") else: print("用户名或密码不能为空!")
import pandas as pd import os import sys import csv from datetime import datetime import getopt import argparse class DataSampler: def __init__(self, tweets_dataset_path: str, sample_n: int, random_state: int, text_col: str, annotator_name: str, save_taken: bool, out_folder: str ): """ Creates a Data Sampler :param tweets_dataset_path: path of the tweets source :param sample_n: number of samples to extract :param random_state: random state :param text_col: name of the column with tweets :param annotator_name: name of the person that will be making annotations, this will also be the name of the folder where the data sample is stored. :param save_taken: a boolean that when True will store the ids of sampled tweets so future annotators do not extract already annotated ids :param out_folder: folder where all samples will be store. Each annotator should have a folder in this directory :return: None """ self.tweets_dataset_path = os.path.relpath(tweets_dataset_path) # parent folder where .csv with tweets lives self.file_dir = os.path.dirname(tweets_dataset_path) # name of the .csv that contains all the tweets (output of sql in bigQuery) self.input_file = os.path.basename(tweets_dataset_path) # name of the .csv that contains the ids of annotated tweets self.tweets_annotated_ids = self.input_file.split('.')[0] + '-annotated_ids.csv' self.taken_ids = self.__setup_file_of_annotated_ids() self.save_taken = save_taken # self.brat_data_path = brat_data_path self.sample_n = sample_n self.random_state = random_state self.text_col = text_col self.annotator_name = annotator_name self.filename = None self.annotator_folder = os.path.dirname(f'{out_folder}{self.annotator_name}/') # output folder self.get_sample() def __setup_data_folder_for_annotator(self, basedir): """ Creates the folder of the annotator, in case it does not exist. """ if not os.path.exists(basedir): os.makedirs(basedir) print(f'\n\t* A folder for "{self.annotator_name}" has been created in path: {basedir}/ ') def __setup_file_of_annotated_ids(self) -> pd.DataFrame: """ Reads the annotated ids from the csv. In case the file does not exist. Returns an empty DataFrame, otherwise returns the contents of the file. """ file_with_annotated_ids = os.path.join(self.file_dir, self.tweets_annotated_ids) if not os.path.isfile(file_with_annotated_ids): return pd.DataFrame({'id': []}) return pd.read_csv(file_with_annotated_ids) def __save_annotated_ids(self, new_annotated_ids): if self.taken_ids.shape[0] > 0: # if there are ids, append the new to the list. annotated_ids = self.taken_ids.append(new_annotated_ids.to_frame()) else: # if there are no existing ids, store this ones as new ones annotated_ids = new_annotated_ids annotated_ids.to_csv(f'{self.file_dir}/{self.tweets_annotated_ids}', index=False) print(f'File Updated: {self.file_dir}/{self.tweets_annotated_ids}') def __process_and_sample(self) -> pd.DataFrame: """ Reads the csv with all the tweets, avoids getting a new sample with tweets that have already been annotated by saving a csv with all the tweet IDs that have already been tagged. The resulting csv will have for name the same of that original file with the suffix "-annotated_ids". :return: a dataframe that contains a sample of the tweets to be annotated. """ original_tweets = pd.read_csv(self.tweets_dataset_path) # make sure the ids are of type integer and not interpreted as a float/scientific notation original_tweets['id'] = original_tweets['id'].astype(int) # drop the ids from the original_df, so when we sample we do not repeat an already tagged tweet available_tweets = original_tweets[~original_tweets['id'].isin(self.taken_ids['id'].values)] sampled_df = available_tweets.sample(random_state=self.random_state, n=self.sample_n)[['id', self.text_col]] # Reports the number of total tweets, the ones taken, the ones available and tha ones sampled print(f'Size of original file with tweets: {original_tweets.shape}') print(f'Size of the tweet IDs that have been taken: {self.taken_ids.shape}') print(f'Size of the available tweets: {available_tweets.shape}') print(f'Size of the sampled tweets {sampled_df.shape}') # print(f'sampled: {sampled_df}') if self.save_taken: self.__save_annotated_ids(sampled_df['id']) else: # prompts the user to mark the tweets as tagged user_response = str(input(f''' Do you wish to set these sampled data as tagged? If you select <n> or any other key, you are at risk of extracting data that you have already worked on. Answer (y/n): ''')) if user_response == 'y': self.__save_annotated_ids(sampled_df['id']) print(f''' File has been updated, and next time you require new data, we guarantee you will not have repeated data. ''') else: print(f''' You are at risk of extracting data that you have already worked on. To avoid this, we recommend to tag the sampled data. ''') return sampled_df def cleaner(self, df, is_pandas_series=False): """ Helper function to do basic text cleaning operations. These include: Converting text to lower case, removing spanish accents,and removing links. ------------------------------------------------------------------------------------------- PARAMS df: Dataframe or Pandas.Series object. text_col: String. Column to clean. is_pandas_series: Boolean, Optional. If df is pandas.Series """ # to lower if not is_pandas_series: df[self.text_col] = df[self.text_col].str.lower() # Convert common spanish accents df[self.text_col] = df[self.text_col].str.replace("ú", "u") df[self.text_col] = df[self.text_col].str.replace("ù", "u") df[self.text_col] = df[self.text_col].str.replace("ü", "u") df[self.text_col] = df[self.text_col].str.replace("ó", "o") df[self.text_col] = df[self.text_col].str.replace("ò", "o") df[self.text_col] = df[self.text_col].str.replace("í", "i") df[self.text_col] = df[self.text_col].str.replace("ì", "i") df[self.text_col] = df[self.text_col].str.replace("é", "e") df[self.text_col] = df[self.text_col].str.replace("è", "e") df[self.text_col] = df[self.text_col].str.replace("á", "a") df[self.text_col] = df[self.text_col].str.replace("à", "a") df[self.text_col] = df[self.text_col].str.replace("ñ", "gn") # Remove Punctuation df[self.text_col] = df[self.text_col].str.replace("[\.\-:,\?]", " ") # Remove links df[self.text_col] = df[self.text_col].str.replace("http.+", " ") return df else: df = df.str.lower() # Convert common spanish accents df = df.str.replace("ú", "u") df = df.str.replace("ù", "u") df = df.str.replace("ü", "u") df = df.str.replace("ó", "o") df = df.str.replace("ò", "o") df = df.str.replace("í", "i") df = df.str.replace("ì", "i") df = df.str.replace("é", "e") df = df.str.replace("è", "e") df = df.str.replace("á", "a") df = df.str.replace("à", "a") df = df.str.replace("ñ", "gn") # Remove Punctuation df = df.str.replace("[\.\-:,\?]", " ") # Remove links df = df.str.replace("http.+", " ") return df def get_sample(self): original_df = self.__process_and_sample() original_df = self.cleaner(df=original_df) original_df = original_df[self.text_col].str.replace('\n', '') original_df = original_df.apply(lambda x: x.encode('ascii', 'ignore').decode('ascii')) # Returns csv with annotator's name (e.g. Juanito perez) and with timestamp timestamp = datetime.now() formatted_timestamp = timestamp.strftime('%Y-%m-%d_%H%M%S') self.filename = f'{self.annotator_name}-sample_{self.sample_n}-randstate_{self.random_state}-{formatted_timestamp}.txt' try: # saves the tweets in txt original_df.to_csv(f'{self.annotator_folder}/{self.filename}', sep=' ', header=False, index=False, line_terminator='\n\n', quoting=csv.QUOTE_NONE, escapechar=' ') # creates an empty .ann to use for annotations in brat open(f'{self.annotator_folder}/{self.filename.split(".")[0]}.ann', 'a').close() print(f''' Success! Data was saved at {self.annotator_folder}/{self.filename} ''' ) # !self.brat_data_path/bash ann_creator.sh except FileNotFoundError: print(f''' Error! There was an error with {self.annotator_folder}/{self.filename}. Please check that the folder in which we are saving the data does exist. Name of the folder: {self.annotator_folder.split('/')[-1]} ''' ) if __name__ == '__main__': ''' Example: (1) python data_sampler.py --path ../data_analysis/tagging-set-original_for_jupyter_tagging.csv --sample_size 30 --rand_state 19 --text_col full_text --annotator_name diego or (2) python data_sampler.py -p=../data_analysis/tagging-set-original_for_jupyter_tagging.csv -s=30 -r=19 -tc=full_text --annotator_name diego If case the annotator wishes not to repeat extracting tweets already annotated, a flag: -t ot --taken can be added to the command (1) or (2) above. For more information python data_sampler.py -h ''' def annotator_folder_exists(arg_value): """ See if the folder with the annotator's name has been created. :param arg_value: a string that represents the name of the annotator and at the same time the name of the folder in which the samples will be stored. :return: the string, if no errors are thrown """ if not os.path.isdir(f'{OUTPUT_FOLDER}/{arg_value}'): msg = f'The folder of annotator "{arg_value}" has not been created. Please do.' raise argparse.ArgumentTypeError(msg) return arg_value def source_file_exists(arg_value): """ See if the csv given as path exists. :param arg_value: a string that represents the filepath that contains all tweets. The source. :return: the filepath as string, if no errors are thrown """ if not os.path.isfile(arg_value): msg = f'The provided path "{arg_value}" cannot be found. Please make sure it exists.' raise argparse.ArgumentTypeError(msg) return arg_value def check_input(): parser = argparse.ArgumentParser(description=''' Sample tweets dataframe for annotations in Brat. Example: python data_sampler.py --path ../data_analysis/tagging-set-original_for_jupyter_tagging.csv --sample_size 30 --rand_state 19 --text_col full_text --annotator_name diego ''') parser.add_argument('-p', '--path', type=source_file_exists, help='Path to csv file where tweets are located.') parser.add_argument('-t', '--taken', action="store_true", default=False, help='Determines whether to mark the sample of tweets obtained as tagged, so future ' 'annotators do not repeat annotating tweets that have already been taken.') parser.add_argument('-s', '--sample_size', type=int, help='Sample size to extract.') parser.add_argument('-r', '--rand_state', type=int, help='Random State to used in the models.') parser.add_argument('-tc', '--text_col', type=str, help='Column that contains rwe tweets text.') parser.add_argument('--annotator_name', type=annotator_folder_exists, help='Name of the person that is annotating. Also the name of the folder where ' 'to save the sampled data.') return vars(parser.parse_args()) # folder where all annotations will lay. OUTPUT_FOLDER = '../data/data_to_annotate/' args = check_input() # print(args) sample = DataSampler(tweets_dataset_path=args['path'], sample_n=args['sample_size'], random_state=args['rand_state'], text_col=args['text_col'], annotator_name=args['annotator_name'], save_taken=args['taken'], out_folder=OUTPUT_FOLDER)
import numpy as np import matplotlib.pyplot as plt # this function plots the graph for mistakes as a function of number if instances # use matplotlib to plot the graphs. This function plotts two graphs one for # number of attributes =500 and when number of attributes=1000 def readAndPlot(): getPlotFor1000() getPlotFor500() def getPlotFor1000(): fname = r"output\a\PerceptronWithMargin10of100of1000.txt" percpWithMargin1000errorCount, percpWithMargin1000instances = getData(fname) fname = r"output\a\PerceptronWithoutMargin10of100of1000.txt" percpWithoutMargin1000errorCount, percpWithoutMargin1000instances = getData(fname) fname = r"output\a\WinnowWithMargin10of100of1000.txt" winnowWithMargin1000errorCount, winnowWithMarginMargin1000instances = getData(fname) fname = r"output\a\WinnowWithoutMargin10of100of1000.txt" winnowWithoutMargin1000errorCount, winnowWithoutMarginMargin1000instances = getData(fname) plt.plot(percpWithMargin1000instances, percpWithMargin1000errorCount, label="perceptronWithMargin") plt.plot(percpWithoutMargin1000instances, percpWithoutMargin1000errorCount, label="perceptronWithoutMargin") plt.plot(winnowWithMarginMargin1000instances, winnowWithMargin1000errorCount, label="winnowWithMargin") plt.plot(winnowWithoutMarginMargin1000instances, winnowWithoutMargin1000errorCount, label="winnowWithoutMargin") plt.legend() plt.ylabel("Mistakes") plt.xlabel("n=1000") plt.show() def getPlotFor500(): fname = r"output\a\PerceptronWithMargin10of100of500.txt" percpWithMargin500errorCount, percpWithMargin500instances = getData(fname) fname = r"output\a\PerceptronWithoutMargin10of100of500.txt" percpWithoutMargin500errorCount, percpWithoutMargin500instances = getData(fname) fname = r"output\a\WinnowWithMargin10of100of500.txt" winnowWithMargin500errorCount, winnowWithMarginMargin500instances = getData(fname) fname = r"output\a\WinnowWithoutMargin10of100of500.txt" winnowWithoutMargin500errorCount, winnowWithoutMarginMargin500instances = getData(fname) plt.plot(percpWithMargin500instances, percpWithMargin500errorCount,label="perceptronWithMargin") plt.plot(percpWithoutMargin500instances, percpWithoutMargin500errorCount,label="perceptronWithoutMargin") plt.plot(winnowWithMarginMargin500instances, winnowWithMargin500errorCount,label="winnowWithMargin") plt.plot(winnowWithoutMarginMargin500instances, winnowWithoutMargin500errorCount,label="winnowWithoutMargin") #plt.legend(bbox_to_anchor=(0.2,.95),loc=2,boarderaxespad=0.1) plt.legend() plt.ylabel("Mistakes") plt.xlabel("n=500") plt.show() def getData(fileName): f = open(fileName) listErrorCount = [] listInstances = [] for line in f: line = line.split(":") listInstances.append(line[0]) listErrorCount.append(line[1]) return np.array(listErrorCount), np.array(listInstances) readAndPlot()
import signal from datetime import timedelta, datetime from time import sleep as time_sleep, time class Timer(object): timers = [] def __init__(self, func, duration, repeat=True): self.duration = timedelta(seconds=duration) self.func = func self.repeat = repeat self.last_call = datetime.utcnow() self.next_call = None self.update_next_call() self.add_timer(self) def update_next_call(self): self.next_call = self.last_call + self.duration def run(self, now=None): self.func() self.last_call = now or datetime.utcnow() if self.repeat is True: self.update_next_call() elif self.repeat and self.repeat - 1 > 0: self.repeat -= 1 self.update_next_call() else: self.remove() def remove(self): try: self.remove_timer(self) except ValueError: return False else: return True @classmethod def add_timer(cls, timer): cls.timers.append(timer) cls.set_next_timer() @classmethod def remove_timer(cls, timer): cls.timers.remove(timer) cls.set_next_timer() @classmethod def get_next_timer(cls): if not cls.timers: return None return min((timer.next_call, timer) for timer in cls.timers)[1] @classmethod def action(cls, *args, **kwargs): now = datetime.utcnow() timer = cls.get_next_timer() if timer and timer.next_call <= now: timer.run(now) cls.set_next_timer() @classmethod def set_next_timer(cls): timer = cls.get_next_timer() if not timer: duration = 0 else: duration = (timer.next_call - datetime.utcnow()).total_seconds() if duration <= 0: # we are late duration = 0.00001 signal.setitimer(signal.ITIMER_REAL, duration) signal.signal(signal.SIGALRM, Timer.action) def sleep(duration): # allow to sleep for a duration even with os alarms breaking the time.sleep function left = duration start = time() while left > 0: time_sleep(left) left = duration - (time() - start)
import datetime import os import time import warnings import presets import torch import torch.utils.data import torchvision import utils from coco_utils import get_coco from torch import nn from torch.optim.lr_scheduler import PolynomialLR from torchvision.transforms import functional as F, InterpolationMode def get_dataset(args, is_train): def sbd(*args, **kwargs): kwargs.pop("use_v2") return torchvision.datasets.SBDataset(*args, mode="segmentation", **kwargs) def voc(*args, **kwargs): kwargs.pop("use_v2") return torchvision.datasets.VOCSegmentation(*args, **kwargs) paths = { "voc": (args.data_path, voc, 21), "voc_aug": (args.data_path, sbd, 21), "coco": (args.data_path, get_coco, 21), } p, ds_fn, num_classes = paths[args.dataset] image_set = "train" if is_train else "val" ds = ds_fn(p, image_set=image_set, transforms=get_transform(is_train, args), use_v2=args.use_v2) return ds, num_classes def get_transform(is_train, args): if is_train: return presets.SegmentationPresetTrain(base_size=520, crop_size=480, backend=args.backend, use_v2=args.use_v2) elif args.weights and args.test_only: weights = torchvision.models.get_weight(args.weights) trans = weights.transforms() def preprocessing(img, target): img = trans(img) size = F.get_dimensions(img)[1:] target = F.resize(target, size, interpolation=InterpolationMode.NEAREST) return img, F.pil_to_tensor(target) return preprocessing else: return presets.SegmentationPresetEval(base_size=520, backend=args.backend, use_v2=args.use_v2) def criterion(inputs, target): losses = {} for name, x in inputs.items(): losses[name] = nn.functional.cross_entropy(x, target, ignore_index=255) if len(losses) == 1: return losses["out"] return losses["out"] + 0.5 * losses["aux"] def evaluate(model, data_loader, device, num_classes): model.eval() confmat = utils.ConfusionMatrix(num_classes) metric_logger = utils.MetricLogger(delimiter=" ") header = "Test:" num_processed_samples = 0 with torch.inference_mode(): for image, target in metric_logger.log_every(data_loader, 100, header): image, target = image.to(device), target.to(device) output = model(image) output = output["out"] confmat.update(target.flatten(), output.argmax(1).flatten()) # FIXME need to take into account that the datasets # could have been padded in distributed setup num_processed_samples += image.shape[0] confmat.reduce_from_all_processes() num_processed_samples = utils.reduce_across_processes(num_processed_samples) if ( hasattr(data_loader.dataset, "__len__") and len(data_loader.dataset) != num_processed_samples and torch.distributed.get_rank() == 0 ): # See FIXME above warnings.warn( f"It looks like the dataset has {len(data_loader.dataset)} samples, but {num_processed_samples} " "samples were used for the validation, which might bias the results. " "Try adjusting the batch size and / or the world size. " "Setting the world size to 1 is always a safe bet." ) return confmat def train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, print_freq, scaler=None): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}")) header = f"Epoch: [{epoch}]" for image, target in metric_logger.log_every(data_loader, print_freq, header): image, target = image.to(device), target.to(device) with torch.cuda.amp.autocast(enabled=scaler is not None): output = model(image) loss = criterion(output, target) optimizer.zero_grad() if scaler is not None: scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() else: loss.backward() optimizer.step() lr_scheduler.step() metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"]) def main(args): if args.backend.lower() != "pil" and not args.use_v2: # TODO: Support tensor backend in V1? raise ValueError("Use --use-v2 if you want to use the tv_tensor or tensor backend.") if args.use_v2 and args.dataset != "coco": raise ValueError("v2 is only support supported for coco dataset for now.") if args.output_dir: utils.mkdir(args.output_dir) utils.init_distributed_mode(args) print(args) device = torch.device(args.device) if args.use_deterministic_algorithms: torch.backends.cudnn.benchmark = False torch.use_deterministic_algorithms(True) else: torch.backends.cudnn.benchmark = True dataset, num_classes = get_dataset(args, is_train=True) dataset_test, _ = get_dataset(args, is_train=False) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False) else: train_sampler = torch.utils.data.RandomSampler(dataset) test_sampler = torch.utils.data.SequentialSampler(dataset_test) data_loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=train_sampler, num_workers=args.workers, collate_fn=utils.collate_fn, drop_last=True, ) data_loader_test = torch.utils.data.DataLoader( dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers, collate_fn=utils.collate_fn ) model = torchvision.models.get_model( args.model, weights=args.weights, weights_backbone=args.weights_backbone, num_classes=num_classes, aux_loss=args.aux_loss, ) model.to(device) if args.distributed: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module params_to_optimize = [ {"params": [p for p in model_without_ddp.backbone.parameters() if p.requires_grad]}, {"params": [p for p in model_without_ddp.classifier.parameters() if p.requires_grad]}, ] if args.aux_loss: params = [p for p in model_without_ddp.aux_classifier.parameters() if p.requires_grad] params_to_optimize.append({"params": params, "lr": args.lr * 10}) optimizer = torch.optim.SGD(params_to_optimize, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) scaler = torch.cuda.amp.GradScaler() if args.amp else None iters_per_epoch = len(data_loader) main_lr_scheduler = PolynomialLR( optimizer, total_iters=iters_per_epoch * (args.epochs - args.lr_warmup_epochs), power=0.9 ) if args.lr_warmup_epochs > 0: warmup_iters = iters_per_epoch * args.lr_warmup_epochs args.lr_warmup_method = args.lr_warmup_method.lower() if args.lr_warmup_method == "linear": warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR( optimizer, start_factor=args.lr_warmup_decay, total_iters=warmup_iters ) elif args.lr_warmup_method == "constant": warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR( optimizer, factor=args.lr_warmup_decay, total_iters=warmup_iters ) else: raise RuntimeError( f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported." ) lr_scheduler = torch.optim.lr_scheduler.SequentialLR( optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[warmup_iters] ) else: lr_scheduler = main_lr_scheduler if args.resume: checkpoint = torch.load(args.resume, map_location="cpu") model_without_ddp.load_state_dict(checkpoint["model"], strict=not args.test_only) if not args.test_only: optimizer.load_state_dict(checkpoint["optimizer"]) lr_scheduler.load_state_dict(checkpoint["lr_scheduler"]) args.start_epoch = checkpoint["epoch"] + 1 if args.amp: scaler.load_state_dict(checkpoint["scaler"]) if args.test_only: # We disable the cudnn benchmarking because it can noticeably affect the accuracy torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes) print(confmat) return start_time = time.time() for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, args.print_freq, scaler) confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes) print(confmat) checkpoint = { "model": model_without_ddp.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch, "args": args, } if args.amp: checkpoint["scaler"] = scaler.state_dict() utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print(f"Training time {total_time_str}") def get_args_parser(add_help=True): import argparse parser = argparse.ArgumentParser(description="PyTorch Segmentation Training", add_help=add_help) parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path") parser.add_argument("--dataset", default="coco", type=str, help="dataset name") parser.add_argument("--model", default="fcn_resnet101", type=str, help="model name") parser.add_argument("--aux-loss", action="store_true", help="auxiliary loss") parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)") parser.add_argument( "-b", "--batch-size", default=8, type=int, help="images per gpu, the total batch size is $NGPU x batch_size" ) parser.add_argument("--epochs", default=30, type=int, metavar="N", help="number of total epochs to run") parser.add_argument( "-j", "--workers", default=16, type=int, metavar="N", help="number of data loading workers (default: 16)" ) parser.add_argument("--lr", default=0.01, type=float, help="initial learning rate") parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum") parser.add_argument( "--wd", "--weight-decay", default=1e-4, type=float, metavar="W", help="weight decay (default: 1e-4)", dest="weight_decay", ) parser.add_argument("--lr-warmup-epochs", default=0, type=int, help="the number of epochs to warmup (default: 0)") parser.add_argument("--lr-warmup-method", default="linear", type=str, help="the warmup method (default: linear)") parser.add_argument("--lr-warmup-decay", default=0.01, type=float, help="the decay for lr") parser.add_argument("--print-freq", default=10, type=int, help="print frequency") parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs") parser.add_argument("--resume", default="", type=str, help="path of checkpoint") parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch") parser.add_argument( "--test-only", dest="test_only", help="Only test the model", action="store_true", ) parser.add_argument( "--use-deterministic-algorithms", action="store_true", help="Forces the use of deterministic algorithms only." ) # distributed training parameters parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes") parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training") parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load") parser.add_argument("--weights-backbone", default=None, type=str, help="the backbone weights enum name to load") # Mixed precision training parameters parser.add_argument("--amp", action="store_true", help="Use torch.cuda.amp for mixed precision training") parser.add_argument("--backend", default="PIL", type=str.lower, help="PIL or tensor - case insensitive") parser.add_argument("--use-v2", action="store_true", help="Use V2 transforms") return parser if __name__ == "__main__": args = get_args_parser().parse_args() main(args)
# Purpose: Pre-processing daily CMIP5 GCM 500 hPa geopotential height data and classification # of Central European circulation types with the cost733class software # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # # # Project: Practicum MeteoSwiss/ETH Zurich # # Frequency and Persistence of Central European Circulation Types # # My Name: Maurice Huguenin-Virchaux # # My Email: hmaurice@student.ethz.ch # # Date: 11.12.2018, 16:16 CET # # # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # preamble # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ from netCDF4 import Dataset import numpy as np import matplotlib.pyplot as plt from cdo import * cdo = Cdo() import os from datetime import datetime import sys #cdo.debug = True variable = 'zg' method = 'GWT' classes = '10' # file paths # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ path_hist='/net/atmos/data/cmip5/historical/day/' # input file path 1 path_rcp='/net/atmos/data/cmip5/rcp85/day/' # input file path 2 # file path for processed files path_processed='/net/h2o/climphys/hmaurice/Practicum_meteoswiss_output/cost/cmip5/' # file path for cost output files path_cost='/net/h2o/climphys/hmaurice/Practicum_meteoswiss_output/cost/cmip5/' # array with model names and realisations <- first for the CH2018 models only ## CH2018 model list #a = ['EC-EARTH', 'HadGEM2-ES', 'MPI-ESM-LR', 'MIROC5', 'CanESM2', 'IPSL-CM5A-MR', # 'NorESM1-M', 'GFDL-ESM2M']; b = ['r1i1p1', 'r2i1p1', 'r3i1p1', 'r12i1p1'] ## small model list for testing a = ['GFDL-ESM2M']; b = ['r1i1p1'] ## all CMIP5 model list #a = ['ACCESS1-3', 'CanESM2', # 'CMCC-CMS', 'GFDL-CM3', 'HadGEM2-AO', 'IPSL-CM5A-MR', # 'MIROC-ESM-CHEM', 'MRI-CGCM3', 'bcc-csm1-1', 'CCSM4', 'CNRM-CM5', # 'GFDL-ESM2G', 'HadGEM2-CC', 'IPSL-CM5B-LR', 'MPI-ESM-LR', 'MRI-ESM1', # 'bcc-csm1-1-m', 'CMCC-CESM', 'EC-EARTH', 'GFDL-ESM2M', 'HadGEM2-ES', # 'MIROC5', 'MPI-ESM-MR', 'NorESM1-M'] #b = ['r1i1p1', 'r2i1p1', 'r3i1p1', 'r4i1p1', 'r12i1p1'] # example filename: zg_day_GFDL-ESM2M_historical_r1i1p1_20010101-20051231.nc for model in a: # loop over all models for realisation in b: # loop over all realisations # combine array elements s = path_hist + variable + '/' + model + '/' + realisation + '/' t = path_rcp + variable + '/' + model + '/' + realisation + '/' output_name = 'zg_day_' + model + '_historical_rcp85_' + realisation + '.nc' # (1) check if data exists # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # print 'No data' for model realisations that do not exist if os.path.isdir(s) == False and os.path.isdir(t) == False or \ os.path.isdir(s) == True and os.path.isdir(t) == False: print('No data for: ' + model + '/' + realisation) continue starttime = datetime.now() # start stopwatch # (2) merge all historical and rcp85 files # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # now use cdo merge for that file path cdo.mergetime(input = s + 'zg_day_*', output = path_processed + 'zg_day_' + model + '_' + realisation + '_historical.nc', force = False) cdo.mergetime(input = t + 'zg_day_*', output = path_processed + 'zg_day_' + model + '_' + realisation + '_rcp85.nc', force = False) # (3) merge newly created hist + rcp85 file into one large file # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ filenames = [i for i in os.listdir(path_processed) if i.startswith('zg_day_' + model + '_' + realisation)] print(filenames) # print filenames in console cdo.mergetime(input = ' '.join(filenames), output = path_processed + output_name, force = False) print('Merging hist + rcp85 data done:') print(datetime.now() - starttime) # print time after one iteration # (4) subsetting data to reduce size # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cdo.invertlat(input = '-setlevel,0 -sellevel,50000 -selname,zg -selyear,1960/2099 \ -sellonlatbox,2.5,20,40.73,52.10 ' + path_processed + output_name, output = output_name[:-3] + '_process.nc', force = False) print('Subsetting data done:') print(datetime.now() - starttime) # print time after one iteration # (5) adjusting time dimension # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ output_name = output_name[:-3] + '_process.nc' os.system("ncap2 -O -s " + '"time=time*24+50*365" ' + '-s ' + "'time@units=" + '"hours since 1900-01-01 00:00:00' + '"' + "' " + path_processed + output_name + ' ' + path_processed + output_name.replace('process', 'time')) # cannot use force = False here as system() does not take keyword arguments # (6) convert to classic format # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ output_name = output_name.replace('process', 'time') os.system('ncks -O --fl_fmt=classic ' + path_processed + output_name + ' ' + path_processed + output_name.replace('time', 'classic')) # (7) removing bnds = 2 dimension from the vertical zg dimension # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ output_name = output_name.replace('time', 'classic') os.system('ncwa -a bnds ' + path_processed + output_name + ' ' + path_processed + output_name.replace('classic', 'no_bnds')) # (8) running cost software and creating output .dat file # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ output_name = output_name.replace('classic', 'no_bnds') os.system("cost733class -dat pth:" + path_processed + output_name + " var:" + variable + " -met " + method + " -ncl " + classes + " -cla " + path_processed + output_name[:-3].replace('no_bnds', 'cost') + ".dat" + " -dcol 3 -cnt") # (9) removing redundant files # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ os.system('rm -r ' + path_processed + 'zg_day_' + model + '_' + realisation + '_*') redundant_names = 'zg_day_' + model + '_historical_rcp85_' + realisation os.system('rm -r ' + path_processed + redundant_names + '_classic.nc') os.system('rm -r ' + path_processed + redundant_names + '_no_bnds.nc') os.system('rm -r ' + path_processed + redundant_names + '_process.nc') os.system('rm -r ' + path_processed + redundant_names + '_time.nc') print(datetime.now() - starttime) # print time after one iteration # (10) post-processing cost output file # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # output_name = output_name[:-3].replace('no_bnds', 'cost') + '.dat' # print(output_name) # os.system("awk '{print $4}' " + path_processed + output_name + ' >' + # path_processed + output_name.replace('cost', 'small')) # os.system('rm -r ' + path_processed + output_name) # again remove redundant file # these 'small' files are then used to adjust leap days and combinedd into one file # with date vector (i.e. YYYY MM DD) and all other CMIP5 ensemble member output # to combine these files I just use: hmaurice@h2o:~> paste date.dat zg_day_* >data.dat # end of loop over realisations # end of loop over models sys.exit() # exit script # (XY) notes here # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#!/usr/bin/python3 import wx import gi gi.require_version("Gtk", "3.0") from gi.repository import Gtk # %% _code_git_version="e57a1a0f6fb8b29675f9338afef98a60cd98a0a2" _code_repository="https://github.com/plops/cl-py-generator/tree/master/example/24_gtk3/source/run_00_show.py" _code_generation_time="12:24:46 of Friday, 2020-09-11 (GMT+1)" class TreeViewWindow(Gtk.Window): def language_filter_func(self, model, iter, data): if ( ((self.current_filter_language is None) or (((self.current_filter_language)==("None")))) ): return True else: return ((model[iter][2])==(self.current_filter_language)) def __init__(self): Gtk.Window.__init__(self, title="hello world") self.store=Gtk.ListStore(str, int, str) for ref in [("firefox",2002,"c++",), ("emacs",1984,"lisp",)]: self.store.append(list(ref)) self.filter=self.store.filter_new() self.current_filter_language=None self.filter.set_visible_func(self.language_filter_func) self.treeview=Gtk.TreeView.new_with_model(self.filter) for i, column_title in enumerate(["software", "release_year", "language"]): renderer=Gtk.CellRendererText() column=Gtk.TreeViewColumn(column_title, renderer, text=i) self.treeview.append_column(column) self.scroll=Gtk.ScrolledWindow() self.scroll.set_vexpand(True) self.add(self.scroll) self.scroll.add(self.treeview) self.show_all() win=TreeViewWindow() win.connect("destroy", Gtk.main_quit) win.show_all() Gtk.main()
# generating captcha from django.conf import settings import random import math from io import BytesIO from PIL import Image, ImageDraw, ImageFont class GenCaptcha: def __init__(self): CAPTCHA_CONFIG = settings.CAPTCHA self.height = CAPTCHA_CONFIG['picture_height'] self.width = CAPTCHA_CONFIG['picture_width'] self.length = CAPTCHA_CONFIG['length'] self.fontsize = CAPTCHA_CONFIG['font_size'] self.fonttype = CAPTCHA_CONFIG['font_family'] self.dot_number = CAPTCHA_CONFIG['dot_number'] self.line_number = CAPTCHA_CONFIG['line_number'] # generate random color # @return -> (r, g, b) def get_random_color(self): r = random.randint(0, 255) g = random.randint(0, 255) b = random.randint(0, 255) return (r, g, b) # generate 1 random char (letters & numbers) # @return -> one char def get_random_char(self): random_num = str(random.randint(0, 9)) # numbers random_lower = chr(random.randint(97, 122)) # lower case letters random_upper = chr(random.randint(65, 90)) # upper case letters while random_lower == 'o': random_lower = chr(random.randint(97, 122)) while random_upper == 'O': random_upper = chr(random.randint(65, 90)) random_char = random.choice([random_num, random_lower, random_upper]) return random_char # draw random lines to interfere # @param -> draw: PIL ImageDraw Object # @return -> None def draw_line(self, draw): for i in range(self.line_number): x1 = random.randint(0, self.width) x2 = random.randint(0, self.width) y1 = random.randint(0, self.height) y2 = random.randint(0, self.height) draw.line((x1, y1, x2, y2), fill=self.get_random_color()) # draw random dots to interfere # @param -> draw: PIL ImageDraw Object # @return -> None def draw_point(self, draw): for i in range(self.dot_number): x = random.randint(0, self.width) y = random.randint(0, self.height) draw.point((x, y), fill=self.get_random_color()) def check_similarity(self, color1, color2): r1 = color1[0] g1 = color1[1] b1 = color1[2] r2 = color2[0] g2 = color2[1] b2 = color2[2] r3 = (r1 - r2) / 256 g3 = (g1 - g2) / 256 b3 = (b1 - b2) / 256 color_diff = math.sqrt(r3 ** 2 + g3 ** 2 + b3 ** 2) bright1 = ((r1 * 299) + (g1 * 587) + (b1 * 114)) bright2 = ((r2 * 299) + (g2 * 587) + (b2 * 114)) bright_diff = abs(bright1 - bright2) if color_diff < 0.7 or bright_diff < 100 * 255: return True return False # create random picture # @param -> img save path # @return -> answer def create_img(self): ans = '' bg_color = self.get_random_color() # create new pic with random background img = Image.new(mode='RGB', size=(self.width, self.height), color=bg_color) # get ImageDraw object draw = ImageDraw.Draw(img) # set font with .ttf file font = ImageFont.truetype(font=self.fonttype, size=self.fontsize) for i in range(self.length): # draw text random_txt = self.get_random_char() txt_color = self.get_random_color() # avoid the text color is same to background color while self.check_similarity(bg_color, txt_color): txt_color = self.get_random_color() # draw text # I don't quite sure what the numbers is all about under. # TODO: fix number under draw.text((10 + self.fontsize * i, 3), text=random_txt, fill=txt_color, font=font) ans += random_txt # draw interfere elements self.draw_line(draw) self.draw_point(draw) buffer = BytesIO() img.save(buffer, format='png') buffer.seek(0) ans = ans.lower() return ans, buffer
import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setup(18,GPIO.OUT) for i in range(1,10): GPIO.output(18,1) time.sleep(1) GPIO.output(18,0) time.sleep(1) GPIO.cleanup()
# -*- coding: utf-8 -*- import os import shutil sourcePath = "C:\\Users\Helga\CloudComputing\Database\Bilder\entpackt" destinationPath = "C:\\Users\Helga\CloudComputing\Database\Bilder\destination" personList = os.listdir(sourcePath) # Get List of emotions from the 1st person without the "mixed" emotionList = os.listdir(sourcePath + "\\" + personList[0]) # Check if element "mixed" exists in List, before removing if "mixed" in emotionList: emotionList.remove("mixed") for person in personList: for emotion in emotionList: for path, takes, files in os.walk(sourcePath + "\\" + person + "\\" + emotion): # For each take for take in takes: sourceTakePath = os.path.join(sourcePath + "\\" + person + "\\" + emotion, take) # For each picture for path, dirs, files in os.walk(sourceTakePath): file_count = len(files) getMiddle = int(file_count / 2) selectedIndex = getMiddle +1 fileToCopy = files[selectedIndex] print("-- Person " + person + "-- File to copy: " + fileToCopy) # Duplicate picture and rename it base, extension = os.path.splitext(fileToCopy) newFileToCopyPath = os.path.join(sourceTakePath, person + "_" + emotion + "_" + take + "_" + base + extension) shutil.copy(os.path.join(sourceTakePath, fileToCopy), newFileToCopyPath) # Move duplicate into destination destination = os.path.join(destinationPath, emotion) shutil.move(newFileToCopyPath, destination) print("Picture moved into:" + emotion)
from rest_framework import serializers from datetime import datetime, timedelta from movielist.models import Movie from showtimes.models import Cinema, Screening class CinemaSerializer(serializers.HyperlinkedModelSerializer): movies = serializers.SerializerMethodField() class Meta: model = Cinema fields = ('id', 'name', 'city', 'movies') def get_movies(self, cinema): movies = cinema.movies.filter(screening__date__lt=datetime.now() + timedelta(days=30), screening__date__gte=datetime.now()) return [movie.title for movie in movies] class ScreeningSerializer(serializers.ModelSerializer): cinema = serializers.SlugRelatedField(slug_field='id', queryset=Cinema.objects.all()) cinema_name = serializers.SerializerMethodField() class Meta: model = Screening fields = ('id', 'movie', 'cinema_name', 'cinema', 'date') def get_cinema_name(self, obj): return obj.cinema.name class ScreeningSerializerWrite(serializers.ModelSerializer): cinema = serializers.SlugRelatedField(slug_field='id', queryset=Cinema.objects.all()) class Meta: model = Screening fields = ('id', 'movie', 'cinema', 'date')
from flask_login import login_required from views.base_view import BaseView class HelloView(BaseView): @login_required def get(self, name=None): return self.render_template('hello.html', name=name)
# -*- coding: utf-8 -*- from scrapy import Spider,Request from anjuke.items import AnjukeItem class AnjukehouseSpider(Spider): name = 'anjukeHouse' allowed_domains = ['anjuke.com'] start_urls = ['https://guangzhou.anjuke.com/sale/p1-rd1/#filtersort'] def parse(self, response): # 所有房子URL urls = response.xpath('//div[@class="house-title"]/a/@href').extract() for url in urls: yield Request(url,callback=self.parse_detail) # 下一页 next = response.xpath('//*[@id="content"]/div[4]/div[7]/a[7]/@href').extract() if next: next = response.urljoin(next[0]) yield Request(next,callback=self.parse) def parse_detail(self,response): item = AnjukeItem() item['date'] = response.xpath('//span[@class="house-encode"]/text()').extract()[0].split() item['tittle'] = response.xpath('//h3[@class="long-title"]/text()').extract() item['price'] = response.xpath('//span[@class="light info-tag"]/em/text()').extract_first().split() houseInfo = response.xpath('//div[@class="houseInfo-content"]/text()').extract() item['huxing'] = houseInfo[2].strip().replace("\n","").replace("\t","").split() item['area'] = houseInfo[7].strip().split() item['built'] = houseInfo[9].strip().replace("\n", "").replace("\t", "").split() item['chaoxiang'] = houseInfo[10].strip().split() item['leixing'] = houseInfo[-8].strip().split() item['louceng'] = houseInfo[-7].strip().split() item['zhuangxiu'] = houseInfo[-6].strip().split() print(item) return item
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Handles converting of formatting.""" import cgi from . import constants class FormattingHandler(object): """Class that handles the conversion of formatting.""" # Links with these URL schemas are auto-linked by GFM. _GFM_AUTO_URL_SCHEMAS = ("http://", "https://") # Images that were inlined automatically by Wiki syntax # had to have these URL schemas and image extensions. _IMAGE_URL_SCHEMAS = ("http://", "https://", "ftp://") _IMAGE_EXTENSIONS = (".png", ".gif", ".jpg", ".jpeg", ".svg") # Template for linking to a video. _VIDEO_TEMPLATE = ( u"<a href='http://www.youtube.com/watch?feature=player_embedded&v={0}' " "target='_blank'><img src='http://img.youtube.com/vi/{0}/0.jpg' " "width='{1}' height={2} /></a>") # Formatting tags for list-to-HTML conversion. _HTML_LIST_TAGS = { "Numeric list": { "ListTag": "ol", "ItemTag": "li", }, "Bulleted list": { "ListTag": "ul", "ItemTag": "li", }, "Blockquote": { "ListTag": "blockquote", "ItemTag": None, }, } # Formatting tags for formatting-to-HTML conversion. _HTML_FORMAT_TAGS = { "Bold": { "Markdown": "**", "HTML": "b", }, "Italic": { "Markdown": "_", "HTML": "i", }, "Strikethrough": { "Markdown": "~~", "HTML": "del", }, } # How a single indentation is outputted. _SINGLE_INDENTATION = " " * 2 def __init__(self, warning_method, project, issue_map, symmetric_headers): """Create a formatting handler. Args: warning_method: A function to call to display a warning message. project: The name of the Google Code project for the Wiki page. issue_map: A dictionary of Google Code issues to GitHub issues. symmetric_headers: True if header denotations are symmetric. """ self._warning_method = warning_method self._project = project self._issue_map = issue_map self._symmetric_headers = symmetric_headers # GFM has a quirk with nested blockquotes where a blank line is needed # after closing a nested blockquote while continuing into another. self._last_blockquote_indent = 0 # GFM will not apply formatting if whitespace surrounds the text being # formatted, but Wiki will. To work around this, we maintain a buffer # of text to be outputted, and when the tag is closed we can trim the # buffer before applying formatting. If the trimmed buffer is empty, we # can omit the formatting altogether to avoid GFM rendering issues. self._format_buffer = [] # GitHub won't render formatting within HTML tags. Track if this is the # case so we can issue a warning and try a work-around. self._in_html = 0 # Number of tags currently open. self._in_code_block = False # If we're in a code block in HTML. self._has_written_text = False # If we've written text since the last tag. self._list_tags = [] # If writing HTML for lists, the current list tags. self._table_status = None # Where we are in outputting an HTML table. # GitHub doesn't support HTML comments, so as a workaround we give # a bogus and empty <a> tag, which renders as nothing. self._in_comment = False def HandleHeaderOpen(self, input_line, output_stream, header_level): """Handle the output for opening a header. Args: input_line: Current line number being processed. output_stream: Output Markdown file. header_level: The header level. """ if self._in_html: tag = u"h{0}".format(header_level) self.HandleHtmlOpen(input_line, output_stream, tag, {}, False) else: self._Write("#" * header_level + " ", output_stream) def HandleHeaderClose( self, input_line, output_stream, header_level): """Handle the output for closing a header. Args: input_line: Current line number being processed. output_stream: Output Markdown file. header_level: The header level. """ if self._in_html: tag = u"h{0}".format(header_level) self.HandleHtmlClose(input_line, output_stream, tag) else: if self._symmetric_headers: self._Write(" " + "#" * header_level, output_stream) def HandleHRule(self, input_line, output_stream): """Handle the output for a horizontal rule. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ if self._in_html: self.HandleHtmlOpen(input_line, output_stream, "hr", {}, True) else: # One newline needed before to separate from text, and not make a header. self._Write("\n---\n", output_stream) def HandleCodeBlockOpen(self, input_line, output_stream, specified_language): """Handle the output for starting a code block. Args: input_line: Current line number being processed. output_stream: Output Markdown file. specified_language: Language for the code block, or None. """ if self._in_html: self._PrintHtmlWarning(input_line, "Code") self.HandleHtmlOpen(input_line, output_stream, "pre", {}, False) self.HandleHtmlOpen(input_line, output_stream, "code", {}, False) else: if not specified_language: specified_language = "" self._Write(u"```{0}\n".format(specified_language), output_stream) self._in_code_block = True def HandleCodeBlockClose(self, input_line, output_stream): """Handle the output for ending a code block. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ self._in_code_block = False if self._in_html: self.HandleHtmlClose(input_line, output_stream, "code") self.HandleHtmlClose(input_line, output_stream, "pre") else: self._Write("```", output_stream) def HandleNumericListOpen( self, input_line, output_stream, indentation_level): """Handle the output for the opening of a numeric list item. Args: input_line: Current line number being processed. output_stream: Output Markdown file. indentation_level: The indentation level for the item. """ if self._in_html: self._HandleHtmlListOpen( input_line, output_stream, indentation_level, "Numeric list") else: self._Indent(output_stream, indentation_level) # Just using any number implies a numbered item, # so we take the easy route. self._Write("1. ", output_stream) def HandleBulletListOpen( self, input_line, output_stream, indentation_level): """Handle the output for the opening of a bulleted list item. Args: input_line: Current line number being processed. output_stream: Output Markdown file. indentation_level: The indentation level for the item. """ if self._in_html: self._HandleHtmlListOpen( input_line, output_stream, indentation_level, "Bulleted list") else: self._Indent(output_stream, indentation_level) self._Write("* ", output_stream) def HandleBlockQuoteOpen( self, input_line, output_stream, indentation_level): """Handle the output for the opening of a block quote line. Args: input_line: Current line number being processed. output_stream: Output Markdown file. indentation_level: The indentation level for the item. """ if self._in_html: self._HandleHtmlListOpen( input_line, output_stream, indentation_level, "Blockquote") else: if self._last_blockquote_indent > indentation_level: self._Write("\n", output_stream) self._last_blockquote_indent = indentation_level # Blockquotes are nested not by indentation but by nesting. self._Write("> " * indentation_level, output_stream) def HandleListClose(self, input_line, output_stream): """Handle the output for the closing of a list. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ if self._in_html: self._HandleHtmlListClose(input_line, output_stream) def HandleParagraphBreak(self, unused_input_line, output_stream): """Handle the output for a new paragraph. Args: unused_input_line: Current line number being processed. output_stream: Output Markdown file. """ self._Write("\n", output_stream) def HandleBoldOpen(self, input_line, unused_output_stream): """Handle the output for starting bold formatting. Args: input_line: Current line number being processed. unused_output_stream: Output Markdown file. """ if self._in_html: self._PrintHtmlWarning(input_line, "Bold") # Open up another buffer. self._format_buffer.append("") def HandleBoldClose(self, input_line, output_stream): """Handle the output for ending bold formatting. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ self._HandleFormatClose(input_line, output_stream, "Bold") def HandleItalicOpen(self, input_line, unused_output_stream): """Handle the output for starting italic formatting. Args: input_line: Current line number being processed. unused_output_stream: Output Markdown file. """ if self._in_html: self._PrintHtmlWarning(input_line, "Italic") # Open up another buffer. self._format_buffer.append("") def HandleItalicClose(self, input_line, output_stream): """Handle the output for ending italic formatting. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ self._HandleFormatClose(input_line, output_stream, "Italic") def HandleStrikethroughOpen(self, input_line, unused_output_stream): """Handle the output for starting strikethrough formatting. Args: input_line: Current line number being processed. unused_output_stream: Output Markdown file. """ if self._in_html: self._PrintHtmlWarning(input_line, "Strikethrough") # Open up another buffer. self._format_buffer.append("") def HandleStrikethroughClose(self, input_line, output_stream): """Handle the output for ending strikethrough formatting. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ self._HandleFormatClose(input_line, output_stream, "Strikethrough") def HandleSuperscript(self, unused_input_line, output_stream, text): """Handle the output for superscript text. Args: unused_input_line: Current line number being processed. output_stream: Output Markdown file. text: The text to output. """ # Markdown currently has no dedicated markup for superscript. self._Write(u"<sup>{0}</sup>".format(text), output_stream) def HandleSubscript(self, unused_input_line, output_stream, text): """Handle the output for subscript text. Args: unused_input_line: Current line number being processed. output_stream: Output Markdown file. text: The text to output. """ # Markdown currently has no dedicated markup for subscript. self._Write(u"<sub>{0}</sub>".format(text), output_stream) def HandleInlineCode(self, input_line, output_stream, code): """Handle the output for a code block. Args: input_line: Current line number being processed. output_stream: Output Markdown file. code: The code inlined. """ if self._in_html: self.HandleHtmlOpen(input_line, output_stream, "code", {}, False) self.HandleText(input_line, output_stream, cgi.escape(code)) self.HandleHtmlClose(input_line, output_stream, "code") else: # To render backticks within inline code, the surrounding tick count # must be one greater than the number of consecutive ticks in the code. # E.g.: # `this is okay, no ticks in the code` # `` `one consecutive tick in the code implies two in the delimiter` `` # ``` `` `and two consecutive ticks in here implies three -> ``` max_consecutive_ticks = 0 consecutive_ticks = 0 for char in code: if char == "`": consecutive_ticks += 1 max_consecutive_ticks = max(max_consecutive_ticks, consecutive_ticks) else: consecutive_ticks = 0 surrounding_ticks = "`" * (max_consecutive_ticks + 1) self._Write(u"{0}{1}{0}".format(surrounding_ticks, code), output_stream) def HandleTableCellBorder(self, input_line, output_stream): """Handle the output for a table cell border. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ if self._in_html: if not self._table_status: # Starting a new table. self._PrintHtmlWarning(input_line, "Table") self.HandleHtmlOpen(input_line, output_stream, "table", {}, False) self.HandleHtmlOpen(input_line, output_stream, "thead", {}, False) self.HandleHtmlOpen(input_line, output_stream, "th", {}, False) self._table_status = "header" elif self._table_status == "header": # Header cell. Close the previous cell, open the next one. self.HandleHtmlClose(input_line, output_stream, "th") self.HandleHtmlOpen(input_line, output_stream, "th", {}, False) elif self._table_status == "rowstart": # First row cell. self.HandleHtmlOpen(input_line, output_stream, "tr", {}, False) self.HandleHtmlOpen(input_line, output_stream, "td", {}, False) self._table_status = "row" elif self._table_status == "row": # Row cell. Close the previous cell, open the next one. self.HandleHtmlClose(input_line, output_stream, "td") self.HandleHtmlOpen(input_line, output_stream, "td", {}, False) else: self._Write("|", output_stream) def HandleTableRowEnd(self, input_line, output_stream): """Handle the output for a table row end. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ if self._in_html: if self._table_status == "header": # Closing header. Close the previous cell and header, start the body. self.HandleHtmlClose(input_line, output_stream, "th") self.HandleHtmlClose(input_line, output_stream, "thead") self.HandleHtmlOpen(input_line, output_stream, "tbody", {}, False) elif self._table_status == "row": # Closing row. Close the previous cell and row. self.HandleHtmlClose(input_line, output_stream, "td") self.HandleHtmlClose(input_line, output_stream, "tr") self._table_status = "rowstart" else: self._Write("|", output_stream) def HandleTableClose(self, input_line, output_stream): """Handle the output for a table end. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ if self._in_html: # HandleTableRowEnd will have been called by this point. # All we need to do is close the body and table. self.HandleHtmlClose(input_line, output_stream, "tbody") self.HandleHtmlClose(input_line, output_stream, "table") self._table_status = None def HandleTableHeader(self, input_line, output_stream, columns): """Handle the output for starting a table header. Args: input_line: Current line number being processed. output_stream: Output Markdown file. columns: Column sizes. """ if self._in_html: return self.HandleText(input_line, output_stream, "\n") for column_width in columns: self.HandleTableCellBorder(input_line, output_stream) # Wiki tables are left-aligned, which takes one character to specify. self._Write(u":{0}".format("-" * (column_width - 1)), output_stream) self.HandleTableCellBorder(input_line, output_stream) def HandleLink(self, input_line, output_stream, target, description): """Handle the output of a link. Args: input_line: Current line number being processed. output_stream: Output Markdown file. target: The target URL of the link. description: The description for the target. """ # There are six cases to handle in general: # 1. Image link with image description: # Link to image, using image from description as content. # 2. Image link with non-image description: # Link to image, using description text as content. # 3. Image link with no description: # Inline image. # 4. URL link with image description: # Link to URL, using image from description as content. # 5. URL link with non-image description: # Link to URL, using description text as content. # 6. URL link with no description: # Link to URL, using URL as content. # Only in case 3 is no actual link present. is_image = target.endswith(self._IMAGE_EXTENSIONS) is_image_description = (description and description.startswith(self._IMAGE_URL_SCHEMAS) and description.endswith(self._IMAGE_EXTENSIONS)) if self._in_html: self._PrintHtmlWarning(input_line, "Link") # Handle inline image case. if is_image and not description: self.HandleHtmlOpen( input_line, output_stream, "img", {"src": target}, True) else: # Handle link cases. self.HandleHtmlOpen( input_line, output_stream, "a", {"href": target}, False) if is_image_description: self.HandleHtmlOpen( input_line, output_stream, "img", {"src": description}, True) else: description = description or target self._Write(cgi.escape(description), output_stream) self.HandleHtmlClose(input_line, output_stream, "a") else: # If description is None, this means that only the URL was given. We'd # like to let GFM auto-link it, because it's prettier. However, while Wiki # syntax would auto-link a variety of URL schemes, GFM only supports http # and https. In other cases and in the case of images, we explicitly link. is_autolinkable = target.startswith(self._GFM_AUTO_URL_SCHEMAS) autolink = (description is None) and is_autolinkable and (not is_image) if autolink: self._Write(target, output_stream) else: # If the descriptive text looks like an image URL, Wiki syntax would # make the link description an inlined image. We do this by setting # the output description to the syntax used to inline an image. if is_image_description: description = u"![]({0})".format(description) elif description: description = self._Escape(description) else: description = target is_image_description = is_image # Prefix ! if linking to an image without a text description. prefix = "!" if is_image and is_image_description else "" output = u"{0}[{1}]({2})".format(prefix, description, target) self._Write(output, output_stream) def HandleWiki(self, input_line, output_stream, target, text): """Handle the output of a wiki link. Args: input_line: Current line number being processed. output_stream: Output Markdown file. target: The target URL of the link. text: The description for the target. """ # A wiki link is just like a regular link, except under the wiki directory. # At this point we make the text equal to the original target if unset. # We do however append ".md", assuming the wiki files now have that extension. self.HandleLink(input_line, output_stream, target + ".md", text or target) def HandleIssue(self, input_line, output_stream, prefix, issue): """Handle the output for an auto-linked issue. Args: input_line: Current line number being processed. output_stream: Output Markdown file. prefix: The text that came before the issue number. issue: The issue number. """ handled = False # Preferred handler is to map the Google Code issue to a GitHub issue. if self._issue_map and issue in self._issue_map: migrated_issue_url = self._issue_map[issue] migrated_issue = migrated_issue_url.rsplit("/", 1)[1] self.HandleLink( input_line, output_stream, migrated_issue_url, u"{0}{1}".format(prefix, migrated_issue)) handled = True instructions = (u"In the output, it has been linked to the migrated issue " "on GitHub: {0}. Please verify this issue on GitHub " "corresponds to the original issue on Google Code. " .format(migrated_issue)) elif self._issue_map: instructions = ("However, it was not found in the issue migration map; " "please verify that this issue has been correctly " "migrated to GitHub and that the issue mapping is put " "in the issue migration map file. ") else: instructions = ("However, no issue migration map was specified. You " "can use issue_migration.py to migrate your Google " "Code issues to GitHub, and supply the resulting issue " "migration map file to this converter. Your old issues " "will be auto-linked to your migrated issues. ") # If we couldn't handle it in the map, try linking to the old issue. if not handled and self._project: old_link = (u"https://code.google.com/p/{0}/issues/detail?id={1}" .format(self._project, issue)) self.HandleLink( input_line, output_stream, old_link, u"{0}{1}".format(prefix, issue)) handled = True instructions += (u"As a placeholder, the text has been modified to " "link to the original Google Code issue page:\n\t{0}" .format(old_link)) elif not handled: instructions += ("Additionally, because no project name was specified " "the issue could not be linked to the original Google " "Code issue page.") # Couldn't map it to GitHub nor could we link to the old issue. if not handled: output = u"{0}{1} (on Google Code)".format(prefix, issue) self._Write(output, output_stream) handled = True instructions += (u"The auto-link has been removed and the text has been " "modified from '{0}{1}' to '{2}'." .format(prefix, issue, output)) self._warning_method( input_line, u"Issue {0} was auto-linked. {1}".format(issue, instructions)) def HandleRevision(self, input_line, output_stream, prefix, revision): """Handle the output for an auto-linked issue. Args: input_line: Current line number being processed. output_stream: Output Markdown file. prefix: The text that came before the revision number. revision: The revision number. """ # Google Code only auto-linked revision numbers, not hashes, so # revision auto-linking cannot be done for the conversion. if self._project: old_link = (u"https://code.google.com/p/{0}/source/detail?r={1}" .format(self._project, revision)) self.HandleLink( input_line, output_stream, old_link, u"{0}{1}".format(prefix, revision)) instructions = (u"As a placeholder, the text has been modified to " "link to the original Google Code source page:\n\t{0}" .format(old_link)) else: output = u"{0}{1} (on Google Code)".format(prefix, revision) self._Write(output, output_stream) instructions = (u"Additionally, because no project name was specified " "the revision could not be linked to the original " "Google Code source page. The auto-link has been removed " "and the text has been modified from '{0}{1}' to '{2}'." .format(prefix, revision, output)) self._warning_method( input_line, u"Revision {0} was auto-linked. SVN revision numbers are not sensible " "in Git; consider updating this link or removing it altogether. {1}" .format(revision, instructions)) def HandleHtmlOpen( self, unused_input_line, output_stream, html_tag, params, has_end): """Handle the output for an opening HTML tag. Args: unused_input_line: Current line number being processed. output_stream: Output Markdown file. html_tag: The HTML tag name. params: The parameters for the tag. has_end: True if the tag was self-closed. """ core_params = self._SerializeHtmlParams(params) core = u"{0}{1}".format(html_tag, core_params) if has_end: output = u"<{0} />".format(core) else: output = u"<{0}>".format(core) self._in_html += 1 self._Write(output, output_stream) self._has_written_text = False def HandleHtmlClose(self, unused_input_line, output_stream, html_tag): """Handle the output for an closing HTML tag. Args: unused_input_line: Current line number being processed. output_stream: Output Markdown file. html_tag: The HTML tag name. """ self._Write(u"</{0}>".format(html_tag), output_stream) self._in_html -= 1 self._has_written_text = False def HandleGPlusOpen(self, input_line, output_stream, unused_params): """Handle the output for opening a +1 button. Args: input_line: Current line number being processed. output_stream: Output Markdown file. unused_params: The parameters for the tag. """ self._warning_method( input_line, "A Google+ +1 button was embedded on this page, but GitHub does not " "currently support this. Should it become supported in the future, " "see https://developers.google.com/+/web/+1button/ for more " "information.\nIt has been removed.") def HandleGPlusClose(self, unused_input_line, unused_output_stream): """Handle the output for closing a +1 button. Args: unused_input_line: Current line number being processed. unused_output_stream: Output Markdown file. """ pass def HandleCommentOpen(self, input_line, output_stream): """Handle the output for opening a comment. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ self._warning_method( input_line, "A comment was used in the wiki file, but GitHub does not currently " "support Markdown or HTML comments. As a work-around, the comment will " "be placed in a bogus and empty <a> tag.") self._Write("<a href='Hidden comment: ", output_stream) self._in_comment = True def HandleCommentClose(self, unused_input_line, output_stream): """Handle the output for closing a comment. Args: unused_input_line: Current line number being processed. output_stream: Output Markdown file. """ self._in_comment = False self._Write("'></a>", output_stream) def HandleVideoOpen(self, input_line, output_stream, video_id, width, height): """Handle the output for opening a video. Args: input_line: Current line number being processed. output_stream: Output Markdown file. video_id: The video ID to play. width: Width of the resulting widget. height: Height of the resulting widget. """ self._warning_method( input_line, "GFM does not support embedding the YouTube player directly. Instead " "an image link to the video is being used, maintaining sizing options.") output = self._VIDEO_TEMPLATE.format(video_id, width, height) self._Write(output, output_stream) def HandleVideoClose(self, unused_input_line, output_stream): """Handle the output for closing a video. Args: unused_input_line: Current line number being processed. output_stream: Output Markdown file. """ # Everything was handled on the open side. pass def HandleText(self, unused_input_line, output_stream, text): """Handle the output of raw text. Args: unused_input_line: Current line number being processed. output_stream: Output Markdown file. text: The text to output. """ self._Write(text, output_stream) self._has_written_text = True def HandleEscapedText(self, input_line, output_stream, text): """Handle the output of text, which should be escaped for Markdown. Args: input_line: Current line number being processed. output_stream: Output Markdown file. text: The text to output. """ # If we're in HTML, Markdown isn't processed anyway. if self._in_html: self.HandleText(input_line, output_stream, text) else: self.HandleText(input_line, output_stream, self._Escape(text)) def _PrintHtmlWarning(self, input_line, kind): """Warn about HTML translation being performed. Args: input_line: Current line number being processed. kind: The kind of tag being changed. """ self._warning_method( input_line, u"{0} markup was used within HTML tags. Because GitHub does not " "support this, the tags have been translated to HTML. Please verify " "that the formatting is correct.".format(kind)) def _HandleHtmlListOpen( self, input_line, output_stream, indentation_level, kind): """Handle the output for opening an HTML list. Args: input_line: Current line number being processed. output_stream: Output Markdown file. indentation_level: The indentation level for the item. kind: The kind of list being opened. """ # Determine if this is a new list, and if a previous list was closed. if self._list_tags: top_tag = self._list_tags[-1] if top_tag["indent"] != indentation_level: # Opening a new nested list. Indentation level will always be greater, # because for it to have gone down, the list would have been closed. new_list = True closing = False elif top_tag["kind"] != kind: # Closed the previous list, started a new one. new_list = True closing = True else: # Same list, already opened. new_list = False closing = False else: new_list = True closing = False # If we need to, close the prior list. if closing: self._HandleHtmlListClose(input_line, output_stream) # Grab the tags we'll be using. list_tag = self._HTML_LIST_TAGS[kind]["ListTag"] item_tag = self._HTML_LIST_TAGS[kind]["ItemTag"] # If this is a new list, note it in the stack and open it. if new_list: new_tag = { "indent": indentation_level, "kind": kind, } self._list_tags.append(new_tag) self._PrintHtmlWarning(input_line, kind) self.HandleHtmlOpen(input_line, output_stream, list_tag, {}, False) else: # Not a new list, close the previously outputted item. if item_tag: self.HandleHtmlClose(input_line, output_stream, item_tag) # Open up a new list item if item_tag: self.HandleHtmlOpen(input_line, output_stream, item_tag, {}, False) def _HandleHtmlListClose(self, input_line, output_stream): """Handle the output for closing an HTML list. Args: input_line: Current line number being processed. output_stream: Output Markdown file. """ # Fix index error if list_tags is empty. if len(self._list_tags) == 0: self._warning_method(input_line, "HtmlListClose without list_tags?") self._list_tags = [ { "indent": 0, "kind": "Bulleted list" } ] top_tag = self._list_tags[-1] kind = top_tag["kind"] self._list_tags.pop() # Grab the tags we'll be using. list_tag = self._HTML_LIST_TAGS[kind]["ListTag"] item_tag = self._HTML_LIST_TAGS[kind]["ItemTag"] # Close the previously outputted item and the list. if item_tag: self.HandleHtmlClose(input_line, output_stream, item_tag) self.HandleHtmlClose(input_line, output_stream, list_tag) def _HandleFormatClose(self, input_line, output_stream, kind): """Handle the output of a closing format tag. Args: input_line: Current line number being processed. output_stream: Output Markdown file. kind: The formatting kind. """ if self._format_buffer: # End redirection. format_buffer = self._format_buffer[-1] self._format_buffer.pop() # Don't do anything if we didn't buffer, or it was only whitespace. format_buffer = format_buffer.strip() if not format_buffer: return if self._in_html: tag = self._HTML_FORMAT_TAGS[kind]["HTML"] self.HandleHtmlOpen(input_line, output_stream, tag, {}, False) self.HandleText(input_line, output_stream, format_buffer) self.HandleHtmlClose(input_line, output_stream, tag) else: tag = self._HTML_FORMAT_TAGS[kind]["Markdown"] self._Write(u"{0}{1}{0}".format(tag, format_buffer), output_stream) else: self._warning_method(input_line, u"Re-closed '{0}', ignoring.".format(tag)) def _Indent(self, output_stream, indentation_level): """Output indentation. Args: output_stream: Output Markdown file. indentation_level: Number of indentations to output. """ self._Write(self._SINGLE_INDENTATION * indentation_level, output_stream) def _Escape(self, text): """Escape Wiki text to be suitable in Markdown. Args: text: Input Wiki text. Returns: Escaped text for Markdown. """ text = text.replace("*", r"\*") text = text.replace("_", r"\_") # If we find a plugin-like bit of text, escape the angle-brackets. for plugin_re in [constants.PLUGIN_RE, constants.PLUGIN_END_RE]: while plugin_re.search(text): match = plugin_re.search(text) before_match = text[:match.start()] after_match = text[match.end():] escaped_match = match.group(0).replace("<", "&lt;").replace(">", "&gt;") text = u"{0}{1}{2}".format(before_match, escaped_match, after_match) # In Markdown, if a newline is preceeded by two spaces it breaks the line. # For Wiki text, this is not the case, so we strip such endings off. while text.endswith(" \n"): text = text[:-len(" \n")] + "\n" return text def _SerializeHtmlParams(self, params): """Serialize parameters for an HTML tag. Args: params: The parameters for the tag. Returns: Serialized parameters. """ core_params = "" for name, value in params.items(): if "'" not in value: quote = "'" else: quote = "\"" core_params += u" {0}={1}{2}{1}".format(name, quote, value) return core_params def _Write(self, text, output_stream): """Write text to the output stream, taking into account any redirection. Args: text: Input raw text. output_stream: Output Markdown file. """ if not text: return if not self._in_comment and self._in_html: if self._in_code_block: text = cgi.escape(text) if self._in_code_block or self._has_written_text: text = text.replace("\n", "<br>\n") if self._in_comment: text = text.replace("'", "\"") if self._format_buffer: # Buffering is occuring, add to buffer. self._format_buffer[-1] += text else: # No buffering occuring, just output it. output_stream.write(text)
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from pants.backend.python.util_rules.package_dists import SetupKwargs, SetupKwargsRequest from pants.core.util_rules.external_tool import ( DownloadedExternalTool, ExternalTool, ExternalToolRequest, ) from pants.engine.console import Console from pants.engine.goal import Goal, GoalSubsystem from pants.engine.internals.options_parsing import _Options from pants.engine.internals.session import SessionValues from pants.engine.rules import Get, collect_rules, goal_rule, rule from pants.engine.target import Target from pants.engine.unions import UnionRule from pants.option.alias import CliAlias from pants.option.config import Config from pants.option.options_bootstrapper import OptionsBootstrapper from pants.util.strutil import softwrap from pants.version import VERSION class PantsSetupKwargsRequest(SetupKwargsRequest): @classmethod def is_applicable(cls, _: Target) -> bool: # We always use our custom `setup()` kwargs generator for `python_distribution` targets in # this repo. return True @rule async def pants_setup_kwargs(request: PantsSetupKwargsRequest) -> SetupKwargs: kwargs = request.explicit_kwargs.copy() if request.target.address.path_safe_spec.startswith("testprojects"): return SetupKwargs(kwargs, address=request.target.address) # Validate that required fields are set. if not kwargs["name"].startswith("pantsbuild.pants"): raise ValueError( f"Invalid `name` kwarg in the `provides` field for {request.target.address}. The name " f"must start with 'pantsbuild.pants', but was {kwargs['name']}." ) if "description" not in kwargs: raise ValueError( f"Missing a `description` kwarg in the `provides` field for {request.target.address}." ) # Add classifiers. We preserve any that were already set. standard_classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Topic :: Software Development :: Build Tools", ] kwargs["classifiers"] = [*standard_classifiers, *kwargs.get("classifiers", [])] # Hardcode certain kwargs and validate that they weren't already set. hardcoded_kwargs = dict( version=VERSION, long_description=softwrap( """ Pants is an Apache2 licensed build tool written in Python and Rust. The latest documentation can be found at [pantsbuild.org](https://www.pantsbuild.org/). """ ), long_description_content_type="text/markdown", url="https://github.com/pantsbuild/pants", project_urls={ "Documentation": "https://www.pantsbuild.org/", "Source": "https://github.com/pantsbuild/pants", "Tracker": "https://github.com/pantsbuild/pants/issues", "Changelog": "https://www.pantsbuild.org/docs/changelog", "Twitter": "https://twitter.com/pantsbuild", "Slack": "https://www.pantsbuild.org/docs/getting-help", "YouTube": "https://www.youtube.com/channel/UCCcfCbDqtqlCkFEuENsHlbQ", "Mailing lists": "https://www.pantsbuild.org/docs/getting-help", }, license="Apache License, Version 2.0", zip_safe=True, ) conflicting_hardcoded_kwargs = set(kwargs.keys()).intersection(hardcoded_kwargs.keys()) if conflicting_hardcoded_kwargs: raise ValueError( f"These kwargs should not be set in the `provides` field for {request.target.address} " "because Pants's internal plugin will automatically set them: " f"{sorted(conflicting_hardcoded_kwargs)}" ) kwargs.update(hardcoded_kwargs) return SetupKwargs(kwargs, address=request.target.address) class CheckDefaultToolsSubsystem(GoalSubsystem): name = "check-default-tools" help = "Options for checking that external tool default locations are correctly typed." class CheckDefaultTools(Goal): subsystem_cls = CheckDefaultToolsSubsystem environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY # TODO(#17129) — Migrate this. @goal_rule async def check_default_tools( console: Console, real_opts: _Options, ) -> CheckDefaultTools: # The real options know about all the registered tools. for scope, si in real_opts.options.known_scope_to_info.items(): if si.subsystem_cls and issubclass(si.subsystem_cls, ExternalTool): tool_cls = si.subsystem_cls console.print_stdout(f"Checking {console.cyan(tool_cls.name)}:") for known_version in tool_cls.default_known_versions: version = tool_cls.decode_known_version(known_version) # Note that we don't want to use the real option values here - we want to # verify that the *defaults* aren't broken. However the get_request_for() method # requires an instance (since it can consult option values, including custom # options for specific tools, that we don't know about), so we construct a # default one, but we force the --version to the one we're checking (which will # typically be the same as the default version, but doesn't have to be, if the # tool provides default_known_versions for versions other than default_version). args = ("./pants", f"--{scope}-version={version.version}") blank_opts = await Get( _Options, SessionValues( { OptionsBootstrapper: OptionsBootstrapper( tuple(), ("./pants",), args, Config(tuple()), CliAlias() ) } ), ) instance = tool_cls(blank_opts.options.for_scope(scope)) req = instance.get_request_for(version.platform, version.sha256, version.filesize) console.write_stdout(f" version {version.version} for {version.platform}... ") # TODO: We'd like to run all the requests concurrently, but since we can't catch # engine exceptions, we wouldn't have an easy way to output which one failed. await Get(DownloadedExternalTool, ExternalToolRequest, req) console.print_stdout(console.sigil_succeeded()) return CheckDefaultTools(exit_code=0) def rules(): return (*collect_rules(), UnionRule(SetupKwargsRequest, PantsSetupKwargsRequest))
#!/usr/bin/env python # coding: utf-8 # <b> Strings, List, Tuples <b> # In[2]: #Code prompts written by Cris Doloc #Code responses written by Erika Lin # <b> Transform the input string such that you will create another string by removing the characters of odd index values and replace them with the first character of the input string. Print the modified string! <b> # In[11]: unreplaced_string = input() def remove_odd_characters(unreplaced_string): x = 0 #set the x variable to the first index value of 0 replaced_string = "" #the contents of the for loop should loop into this variable for char in unreplaced_string: original_character = unreplaced_string[0] #the index value position will start at 0 and continue to loop if (x %2 == 0): #if the index value is even replaced_string += unreplaced_string[x] elif (x %2 !=0): #if the index value is odd replaced_string += original_character #at an odd index value: keep the original character x +=1 #this tells x to keep adding 1 under the for loop until the end of the input() return replaced_string print(replaced_string) remove_odd_characters(unreplaced_string) # <b> Reverse the string obtained above by using three different implementations. Print the new string! <b> # In[12]: ####Method 1 unreplaced_string[::-1] # In[13]: ####Method 2 def reversed_string(unreplaced_string): pos_last_char = (len(unreplaced_string) - 1) #position of the last character is the total length of the word subtracted from 1 #since index values start at 0 we want the position to start at one less value to start at index value 0 n = pos_last_char #set the n variable to the pos_last_char variable for simplification purposes reverse_char = "" #the contents of the for loop should loop into this variable for char in unreplaced_string: reverse_char += unreplaced_string[n] #reverse_char is itself added to the character at the position value of n #n should start at 0 and continue to print until the end of the word n = n - 1 #put n inside for loop or else it will print 'ooooo' and tell n to subtract by one to reverse the string from #index values will go from 0,-1,-2,... return reverse_char print(reverse_char) reversed_string(unreplaced_string) # In[14]: ####Method 3 def reversed_string(unreplaced_string): if unreplaced_string == "": return unreplaced_string else: return reversed_string(unreplaced_string[1:])+unreplaced_string[0] reversed_string(unreplaced_string) # <b> Write a Py function that takes the numerical list from the input and returns another list from where the largest and smallest elements were replaced by 1000, respectively -1000. Print the new list! <b> # In[16]: s = input("Introduce a list with numbers: ") myList = list(map(int, s.split())) #do not include commas def max_min_in_list(myList): n = 0 my_max = myList[n] my_min = myList[n] for char in myList: if char > my_max: #my_max is looping at index values n my_max = char elif char < my_min:#my_min is looping at index values n my_min = char n = n + 1 #tells n to continue counting by 1 until the end myList.remove(my_max) myList.remove(my_min) myList.insert(my_max,"1000") myList.insert(my_min, "-1000") return myList print(myList) max_min_in_list(myList) # <b> Sort the elements of a tuple in a decreasing order. <b> # In[17]: s = input("Introduce a tuple: ") my_tuple = tuple(map(int, s.split())) tuple_to_list = list(my_tuple) new_list_from_tuple = sorted(tuple_to_list)#sorted will make it from smallest to biggest # decreasing_tuple = new_list_from_tuple[::-1] new_tuple = tuple(decreasing_tuple) print(new_tuple) type(my_tuple) print(type(my_tuple)) # In[ ]:
import scrapy import json from scrapy import Request, FormRequest import csv import re from scrapy.shell import inspect_response class YelpSpider(scrapy.Spider): name = "proxy" allowed_domains = ["https://free-proxy-list.net"] def start_requests(self): url = 'https://free-proxy-list.net' yield scrapy.Request(url, self.parsePage) pass def parsePage(self, response): # inspect_response(response, self) sites = response.css('tr') sites = sites[1:] for i in range(1,11): list = sites[i].css('td::text').extract() proxy = list[0]+':'+list[1] yield { 'proxy':proxy } pass pass
################################# # Split the lineage output ################################ with open("lineages.tsv") as fn: out = [] lines = fn.readlines() splitLines = [line.strip('\n').split('\t') for line in lines] for sp in splitLines: spL = sp[1].split(";") out.append(spL) with open("split_lineages.tsv",'w') as outFile: for rec in out: outFile.write('\t'.join(rec)) outFile.write('\n')
import airflow from airflow.models import DAG from airflow.operators.python_operator import PythonOperator from airflow.operators.bash_operator import BashOperator from airflow.operators.dummy_operator import DummyOperator from datetime import datetime args = { 'owner': 'Miha', 'start_date': datetime(2019,11,17), } dag = DAG( dag_id='exercise2', default_args=args, schedule_interval="@daily", ) def print_date (**context): print("Date = " + str(datetime.today())) print_execution_date = PythonOperator( task_id="print_execution_date", python_callable=print_date, provide_context=True, dag=dag, ) the_end = DummyOperator( task_id='the_end', dag=dag, ) w1 = BashOperator(task_id="wait_5", bash_command="sleep 5", dag=dag,) w2 = BashOperator(task_id="wait_1", bash_command="sleep 1", dag=dag,) w3 = BashOperator(task_id="wait_10", bash_command="sleep 10", dag=dag,) print_execution_date >> [w1, w2, w3] [w1, w2, w3] >> the_end
/home/ub/cvbridge_build_ws/devel/.private/catkin_tools_prebuild/_setup_util.py
# coding: utf-8 """ Telstra SMS Messaging API The Telstra SMS Messaging API allows your applications to send and receive SMS text messages from Australia's leading network operator. It also allows your application to track the delivery status of both sent and received SMS messages. OpenAPI spec version: 2.1.3 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class SMSApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def messages_sms_incoming(self, authorization, **kwargs): """ Retrieve the unread incoming SMS messages Returns a list of unread incoming SMS messages that were sent to the mobile phone nubmer registered with the developer's application. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.messages_sms_incoming(authorization, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str authorization: Authorization header in the format 'Bearer {access_token}' - get the token by using the OAuth API with the scope 'SMS' (required) :return: list[InlineResponse200] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.messages_sms_incoming_with_http_info(authorization, **kwargs) else: (data) = self.messages_sms_incoming_with_http_info(authorization, **kwargs) return data def messages_sms_incoming_with_http_info(self, authorization, **kwargs): """ Retrieve the unread incoming SMS messages Returns a list of unread incoming SMS messages that were sent to the mobile phone nubmer registered with the developer's application. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.messages_sms_incoming_with_http_info(authorization, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str authorization: Authorization header in the format 'Bearer {access_token}' - get the token by using the OAuth API with the scope 'SMS' (required) :return: list[InlineResponse200] If the method is called asynchronously, returns the request thread. """ all_params = ['authorization'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method messages_sms_incoming" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'authorization' is set if ('authorization' not in params) or (params['authorization'] is None): raise ValueError("Missing the required parameter `authorization` when calling `messages_sms_incoming`") collection_formats = {} resource_path = '/messages/sms'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'text/xml']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[InlineResponse200]', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def messages_sms_send(self, authorization, payload, **kwargs): """ Send an SMS to a Australian or International mobile phone. Sends a SMS to a single Australian or International mobile phone number. A unique identifier (messageId) returned in the response, which may be used to query for the delivery status of the message. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.messages_sms_send(authorization, payload, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str authorization: A header in the format 'Bearer {access_token}' - get the token by using the OAuth API with the scope 'SMS' (required) :param Payload payload: A JSON or XML payload containing the recipient's phone number and text message. The recipient number should be in the format '04xxxxxxxx' where x is a digit (required) :return: InlineResponse201 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.messages_sms_send_with_http_info(authorization, payload, **kwargs) else: (data) = self.messages_sms_send_with_http_info(authorization, payload, **kwargs) return data def messages_sms_send_with_http_info(self, authorization, payload, **kwargs): """ Send an SMS to a Australian or International mobile phone. Sends a SMS to a single Australian or International mobile phone number. A unique identifier (messageId) returned in the response, which may be used to query for the delivery status of the message. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.messages_sms_send_with_http_info(authorization, payload, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str authorization: A header in the format 'Bearer {access_token}' - get the token by using the OAuth API with the scope 'SMS' (required) :param Payload payload: A JSON or XML payload containing the recipient's phone number and text message. The recipient number should be in the format '04xxxxxxxx' where x is a digit (required) :return: InlineResponse201 If the method is called asynchronously, returns the request thread. """ all_params = ['authorization', 'payload'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method messages_sms_send" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'authorization' is set if ('authorization' not in params) or (params['authorization'] is None): raise ValueError("Missing the required parameter `authorization` when calling `messages_sms_send`") # verify the required parameter 'payload' is set if ('payload' not in params) or (params['payload'] is None): raise ValueError("Missing the required parameter `payload` when calling `messages_sms_send`") collection_formats = {} resource_path = '/messages/sms'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] form_params = [] local_var_files = {} body_params = None if 'payload' in params: body_params = params['payload'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'text/xml']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/xml']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse201', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def messages_sms_status(self, authorization, message_id, **kwargs): """ Retrieve the status of a single outgoing SMS message. Retrieve the status of a message by using the 'messageId' that returned as returned in the response from the Send SMS method to get the status. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.messages_sms_status(authorization, message_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str authorization: Authorization header in the format 'Bearer {access_token}' - get the token by using the OAuth API with the scope 'SMS' (required) :param str message_id: Unique identifier of a message - it is the value returned from a previous POST call to https://api.telstra.com/v2/messages/sms (required) :return: InlineResponse2001 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.messages_sms_status_with_http_info(authorization, message_id, **kwargs) else: (data) = self.messages_sms_status_with_http_info(authorization, message_id, **kwargs) return data def messages_sms_status_with_http_info(self, authorization, message_id, **kwargs): """ Retrieve the status of a single outgoing SMS message. Retrieve the status of a message by using the 'messageId' that returned as returned in the response from the Send SMS method to get the status. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.messages_sms_status_with_http_info(authorization, message_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str authorization: Authorization header in the format 'Bearer {access_token}' - get the token by using the OAuth API with the scope 'SMS' (required) :param str message_id: Unique identifier of a message - it is the value returned from a previous POST call to https://api.telstra.com/v2/messages/sms (required) :return: InlineResponse2001 If the method is called asynchronously, returns the request thread. """ all_params = ['authorization', 'message_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method messages_sms_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'authorization' is set if ('authorization' not in params) or (params['authorization'] is None): raise ValueError("Missing the required parameter `authorization` when calling `messages_sms_status`") # verify the required parameter 'message_id' is set if ('message_id' not in params) or (params['message_id'] is None): raise ValueError("Missing the required parameter `message_id` when calling `messages_sms_status`") collection_formats = {} resource_path = '/messages/sms/{messageId}/status'.replace('{format}', 'json') path_params = {} if 'message_id' in params: path_params['messageId'] = params['message_id'] query_params = {} header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'text/xml']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse2001', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
soma = 0 idade_velho = 0 nome_velho = '' menos_20_anos = 0 for p in range(1, 5, 1): print(f'----- {p}ª PESSOA -----') nome = str(input('Nome: ')).strip() idade = int(input('Idade: ')) sexo = str(input('Sexo [M/F]: ')).upper().strip() soma += idade if sexo == 'F' and idade < 20: menos_20_anos += 1 if sexo == 'M' and idade_velho < idade: idade_velho = idade nome_velho = nome media = soma / 4 print(f'A média de idade do grupo é de {media} anos') print(f'O homem mais velho tem {idade_velho} anos e se chama {nome_velho}.') print(f'Ao todo, são {menos_20_anos} mulheres com menos de 20 anos')
from functools import lru_cache @lru_cache(maxsize=None) def number_of_ways_to_climb(stair_height): if stair_height == 0: return 1 elif stair_height < 0: return 0 else: return number_of_ways_to_climb(stair_height - 1) + number_of_ways_to_climb( stair_height - 2) + number_of_ways_to_climb(stair_height - 3) # def number_of_ways_to_climb(stair_height): # steps = [1, 2, 3] # lookup_table = [[0 for column in range(0, stair_height + 1)] for row in range(len(steps))] # # # populate base case # for stair_height, _ in enumerate(lookup_table[0]): # lookup_table[0][stair_height] = 1 # # for step_size, _ in enumerate(steps): # lookup_table[step_size][0] = 1 # # # fill the table # for step_index, step_value in enumerate(steps[1:], 1): # for stair_height_value, _ in enumerate(lookup_table[0][1:], 1): # if step_value > stair_height_value: # lookup_table[step_index][stair_height_value] = lookup_table[step_index - 1][stair_height_value] # else: # lookup_table[step_index][stair_height_value] = lookup_table[step_index - 1][stair_height_value - 1] \ # + lookup_table[step_index ][stair_height_value - 1] # # return lookup_table[-1][-1] sample_inputs = [3, 7] for s in sample_inputs: print(number_of_ways_to_climb(s))
import os import sys import json cur_dir = os.path.dirname(os.path.realpath(__file__)) def isnum(s): try: float(s) return True except ValueError: return False #implement this method to return data for each time's command line! def parse_tmp_file(fp): res = {} ''' keys = ['SerialSumTree', 'SimpleParallelSumTree', 'OptimizedParallelSumTree', 'FFSumTree', 'OptFFSumTree'] for line in open(fp, 'r'): line = line.strip() ns = [float(s) for s in line.split() if isnum(s)] if len(ns) == 0: continue n = ns[0] lk = line.split() if len(lk) == 0: continue tk = lk[0].strip(':') if tk in keys: res[tk] = n ''' keys = ['qsort', 'kmeans', 'fib', 'nqueen', 'lu', 'mutex'] data = json.loads(open(fp, 'r').read()) for k in keys: td = data[k]['ff'][0]['para-elapsed-time'] res[k] = td print res return res def main(): fp = os.path.join(cur_dir, sys.argv[1]) res_fp = os.path.join(cur_dir, sys.argv[2]) prefix = '' if len(sys.argv) == 4: prefix = str(sys.argv[3]) old_data = [] if os.path.exists(res_fp): old_data = json.loads(open(res_fp, 'r').read()) d = parse_tmp_file(fp) r = {} for k, v in d.items(): r[prefix + k] = v old_data.append(r) open(res_fp, 'w').write(json.dumps(old_data)) if __name__ == '__main__': main()
import unittest from hyper2web.http import Stream from h2.events import DataReceived class TestStream(unittest.TestCase): def test_header_not_empty(self): """Stream should refuse to construct if the header is Falsy or not a dict""" with self.assertRaises(Exception): Stream(stream_id=1, headers={}) def test_update_on_same_stream_id(self): """A Stream should not update on an event with different stream id""" stream = Stream(stream_id=1, headers={'method': 'GET'}) new_event = DataReceived() new_event.stream_id = 2 new_event.data = b'' with self.assertRaises(Exception): stream.update(new_event) def test_finalize(self): """Should not update a finalized Stream""" stream = Stream(stream_id=1, headers={'method': 'GET'}) stream.finalize() with self.assertRaises(Exception): new_event = DataReceived() new_event.stream_id = 2 new_event.data = b'' stream.update(new_event)
import sys from collections import Counter ## get all types from gweb sancl labeled + unlabeled data file_path="tokens.txt" data = set() with open(file_path, 'rb') as f: for line in f: word = line.decode('utf-8','ignore').strip() data.add(word) def load_embeddings_file(file_name, sep=" ",lower=False, vocab=None): """ load embeddings file """ emb={} for line in open(file_name, errors='ignore', encoding='utf-8'): try: fields = line.strip().split(sep) vec = [float(x) for x in fields[1:]] word = fields[0] if lower: word = word.lower() if word in vocab: emb[word] = vec # only use words which are in vocab except ValueError: print("Error converting: {}".format(line)) print("loaded pre-trained embeddings (word->emb_vec) size: {} (lower: {})".format(len(emb.keys()), lower)) return emb def save_embeds(emb, out_filename): OUT = open(out_filename,"w") for word in emb.keys(): wembeds_expression = emb[word] OUT.write("{} {}\n".format(word," ".join([str(x) for x in wembeds_expression]))) OUT.close() print("vocab of size loaded: ", len(data)) embeds = load_embeddings_file("glove.6B.100d.txt", vocab=data) save_embeds(embeds, "glove.6B.100d.restr.txt")
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import render, HttpResponse, redirect from time import gmtime, strftime # Create your views here. def index(request): return render(request, "session_words_app/index.html") def result(request): return render(request, "session_words_app/index.html") def add_word(request): try: request.session["words"] except KeyError: request.session["words"] = [] if request.POST.get and 'font_size' in request.POST and request.POST['font_size'] == "on": font = "big" else: font = "" words = { "word": request.POST["new_word"], "color": request.POST["color"], "size": font, "datetime": strftime("%I:%M:%S%p, %B %d, %Y", gmtime()), } temp_words = request.session["words"] temp_words.append(words) request.session["words"] = temp_words print request.session["words"] return redirect("/session_words/result") def clear(request): request.session.clear() return redirect("/session_words/result")
# leap.bank_id OUR_BANK = '' # leap.username USERNAME = '' # leap.password PASSWORD = '+' #leap.consumer_key CONSUMER_KEY = '' # API server URL BASE_URL = '' API_VERSION = "v4.0.0" # API server will redirect your browser to this URL, should be non-functional # You will paste the redirect location here when running the script CALLBACK_URI = 'http://127.0.0.1/cb' # counterparty id (mappedcounterparty.mcounterpartyid) OUR_COUNTERPARTY = '' # counterparty bank id (mappedcounterparty.mthisbankid) COUNTERPARTY_BANK = '' OUR_COUNTERPARTY_ID = '' OUR_COUNTERPARTY_IBAN = '' # Our currency to use OUR_CURRENCY = 'CAD' # Our value to transfer # values below 1000 do not requre challenge request OUR_VALUE = '0.01' OUR_VALUE_LARGE = '1000.00'
import platform, os print("\tPlatform Infromation \n") print('\tVersion: ',platform.python_version()) print('\tVersion Tuple: ',platform.python_version_tuple()) print('\tCompiler: ',platform.python_compiler()) print('\tBuild: ',platform.python_build()) print('\n\tSystem Information\n') print("\tUname: ",platform.uname()) print("\tSystem: ",platform.system()) print("\tNode: ",platform.node()) print("\tRelease: ",platform.release()) print("\tVersion:",platform.version()) print("\tMachine: ",platform.machine()) print("\tProcessor: ",platform.processor())
import numpy as np import matplotlib.pyplot as plt import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter import os def plot_pic(x_true, fbp, guess, plot_pic_savepath, plot_title): fig, ax = plt.subplots(nrows=1, ncols=3) ax[0].imshow(x_true[0].squeeze(-1)) ax[0].set_title('x_true') ax[1].imshow(fbp[0].squeeze(-1)) ax[1].set_title('fbp') ax[2].imshow(guess[0].squeeze(-1)) ax[2].set_title('guess') plt.suptitle(plot_title) plt.savefig(plot_pic_savepath) # plt.show() def plot_pic_custom(x_true, gen_zero, guess, plot_pic_savepath, plot_title): fig, ax = plt.subplots(nrows=1, ncols=3) ax[0].imshow(x_true[0].squeeze(-1)) ax[0].set_title('x_true') ax[1].imshow(gen_zero[0].squeeze(-1)) ax[1].set_title('gen_zero') ax[2].imshow(guess[0].squeeze(-1)) ax[2].set_title('guess') plt.suptitle(plot_title) plt.savefig(plot_pic_savepath) # plt.show() def plot_metrics(l2_arr, psnr_arr, ssim_arr, step_arr, plot_metric_savepath, plot_title): # Plot l2, psnr, and ssim fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(18, 6)) plt.suptitle(plot_title) ax[0].semilogy(step_arr, l2_arr, c='k') ax[0].scatter(step_arr, l2_arr, c='r') ax[0].set_title('l2') ax[0].tick_params(axis='both', which='major') ax[0].yaxis.set_major_formatter(FormatStrFormatter('%.2f')) # ax[0].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e')) # ax[0].set_aspect('equal') # ax[0].set(adjustable='box-forced', aspect='equal') ax[1].semilogy(step_arr, psnr_arr, c='k') ax[1].scatter(step_arr, psnr_arr, c='r') ax[1].set_title('psnr') ax[1].tick_params(axis='both', which='major') ax[1].yaxis.set_major_formatter(FormatStrFormatter('%.2f')) # ax[1].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e')) # ax[1].set_aspect('equal') # ax[1].set(adjustable='box-forced', aspect='equal') ax[2].semilogy(step_arr, ssim_arr, c='k') ax[2].scatter(step_arr, ssim_arr, c='r') ax[2].set_title('ssim') ax[2].tick_params(axis='both', which='major') ax[2].yaxis.set_major_formatter(FormatStrFormatter('%.2f')) # ax[2].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e')) # ax[2].set_aspect('equal') # ax[2].set(adjustable='box-forced', aspect='equal') # plot_metric_savepath = os.path.join(self.path, 'plot_metrics_logmin2.png') plt.savefig(plot_metric_savepath) # plt.show() def write_to_csv_logmin2(csv_file, step, qualities): if not os.path.exists(csv_file): with open(csv_file, 'w') as f: f.write('step,l2,psnr,ssim\n') with open(csv_file, 'a') as f: f.write(f'{step},{qualities[0]},{qualities[1]},{qualities[2]}\n') def write_to_csv_logopt2(csv_file, mu, step_size, total_steps, qualities): if not os.path.exists(csv_file): with open(csv_file, 'w') as f: f.write('mu,step_size,total_steps,l2,psnr,ssim\n') with open(csv_file, 'a') as f: f.write(f'{mu},{step_size},{total_steps},{qualities[0]},{qualities[1]},{qualities[2]}\n')
import pyqrcode import png from pyqrcode import QRCode from PIL import Image, ImageFont, ImageDraw import os s = "https://patients-db-system.herokuapp.com/lab" url = pyqrcode.create(s) url.png('myqr.png', scale=6) logo_file = "myqr.png" logoIm = Image.open(logo_file) im = Image.open("2.jpeg") logoIm = logoIm.resize((350, 350)) logoWidth, logoHeight = logoIm.size print(logoWidth, logoHeight) im.paste(logoIm, (610, 120)) im.save(os.path.join("qr.jpg")) im = Image.open("qr.jpg") draw = ImageDraw.Draw(im) extra_bold = ImageFont.truetype('Raleway-ExtraBold.ttf', size=45) black = ImageFont.truetype('Raleway-Black.ttf', size=25) light = ImageFont.truetype('Raleway-Light.ttf', size=20) (x, y) = (50, 90) message = "JANE DOE" color = 'rgb(58,175,169)' draw.text((x, y), message, fill=color, font=extra_bold) (x, y) = (50, 170) name = 'Patient - 1A1012' color = 'rgb(23,37,40)' draw.text((x, y), name, fill=color, font=black) (x, y) = (50, 400) name = 'Mobile - 9988776655' color = 'rgb(23,37,40)' draw.text((x, y), name, fill=color, font=light) (x, y) = (50, 430) name = 'Address - Anywhere street, cityname, 111111' color = 'rgb(23,37,40)' draw.text((x, y), name, fill=color, font=light) im.save('greeting_card.png')
from __future__ import annotations import re from collections import namedtuple from .helpers import prepend_scheme VERBS = r"(get|options|head|post|put|patch|delete)\(" PREFIX = r"[\w_][\w\d_]*\." PREFIX_VERBS = PREFIX + VERBS SESSION_SEND = PREFIX + r"send\(" ASSERTIONS = r"assert \{" Selection = namedtuple("Selection", "selection, ordering") TypedSelection = namedtuple("TypedSelection", "selection, type") RequestAssertion = namedtuple("RequestAssertion", "request, assertion") def parse_requests(s, **kwargs): """Parse string for all calls to `{name}.{verb}(`, or simply `{verb}(`. Returns a list of request strings. """ selections = parse(s, "(", ")", [PREFIX_VERBS, VERBS, SESSION_SEND], **kwargs) return [sel.selection for sel in selections] def parse_tests(s: str): """Parse string and return an ordered list of (request, assertion) strings.""" requests = [TypedSelection(sel, "request") for sel in parse(s, "(", ")", [PREFIX_VERBS, VERBS, SESSION_SEND])] assertions = [TypedSelection(sel, "assertion") for sel in parse(s, "{", "}", [ASSERTIONS])] selections = requests + assertions selections.sort(key=lambda s: s.selection.ordering) tests = [] for i, sel in enumerate(selections): try: next_sel = selections[i + 1] except IndexError: break else: if sel.type == "request" and next_sel.type == "assertion": tests.append(RequestAssertion(sel.selection.selection, next_sel.selection.selection)) return tests def parse(s: str, open_bracket, close_bracket, match_patterns, n=None, es=None): """Parse string `s` for selections that begin with at least one of the specified match patterns. Continue expanding each selection until its opening and closing brackets are balanced. Returns a list of `Selection` instances. Optionally stop after `n` selections have been parsed. Also supports a shorthand syntax for basic, one-line GET requests. """ start_indices = [] index = 0 lines = s.splitlines(True) for line in lines: if n and len(start_indices) >= n: break for pattern in match_patterns: # for multiline requests, match must occur at start of line, # to diminish risk of matching something like 'get(' in the middle of a string if re.match(pattern, line): start_indices.append(index) break index += len(line) if not start_indices and len(lines) == 1: # shorthand syntax for basic, one-line GET requests # if selection has only one line, we can safely search for start index of first match, # even if it's not at start of line match = None for pattern in match_patterns: match = re.search(pattern, line) if match: break if not match: return [Selection("get('{}')".format(prepend_scheme(s.strip().strip('"').strip("'"))), 0)] start_indices.append(match.start()) if es: # replace selection with extended selection AFTER `start_indices` have been found s = es sq, dq, comment, escape = False, False, False, False end_indices = [] for index in start_indices: if n and len(end_indices) >= n: break bc = 0 # bracket count while True: c = s[index] if c == "\n": # new line always terminates comment comment = False if c == "\\": # escape char skips next char, unless it's a new line escape = True index += 1 continue if escape: escape = False index += 1 continue if c == "'" and not dq and not comment: sq = not sq if c == '"' and not sq and not comment: dq = not dq if c == "#" and not sq and not dq: comment = True if sq or dq or comment: index += 1 continue if c == open_bracket: bc += 1 elif c == close_bracket: bc -= 1 if bc == 0: end_indices.append(index) break index += 1 # make sure there are no "unclosed" selections assert len(start_indices) == len(end_indices) selections = [] for pair in zip(start_indices, end_indices): selections.append(Selection(s[pair[0] : pair[1] + 1], pair[0])) return selections
# -*- coding: utf-8 -*- # @Author : WangNing # @Email : 3190193395@qq.com # @File : db_tool.py # @Software: PyCharm import MySQLdb import threading from DBUtils.PooledDB import PooledDB from common_utils.config_parser import DBConfigParser class DBPool(object): _lock = threading.Lock() def __init__(self): self.conn = DBPool._instance.connection() self.cursor = self.conn.cursor() def __new__(cls, *args, **kwargs): if not hasattr(DBPool, "_instance"): with DBPool._lock: if not hasattr(DBPool, "_instance"): cls.conf = DBConfigParser().get_db_config() DBPool._instance = PooledDB(MySQLdb, maxconnections=15, blocking=True, host=cls.conf.get("host"), user=cls.conf.get("user"), password=cls.conf.get("password"), db=cls.conf.get("db_name"), charset="utf8") return object.__new__(cls, *args, **kwargs) def get_api_list(self): sql = "select * from api where status = 1" self.cursor.execute(sql) return self.cursor.fetchall() def get_case_list(self, api_id): sql = 'select * from api_case where api_id = %s' % api_id self.cursor.execute(sql) return list(self.cursor.fetchall()) def close(self): self.cursor.close() self.conn.close() if __name__ == '__main__': pool = DBPool() ret = pool.get_api_list() print(ret)