text
stringlengths
8
6.05M
# code to print prime numbers n=int(input()) t=[] for i in range(2,n): for j in range(2,i): if(i%j==0): break else: t.append(i) print(t) #to check the sum z=len(t) su=0 c=0 #for k in range(0,z): # for m in range(1,z): # su=su+t[k] # print(su,t[m]) # if(su%t[m]==0): # c=c+1 # continue #print(c) for k in range(len(t)): su=su+t[k] print(su) if su in t: c=c+1 print(c)
import sae from hitbookdb import wsgi application = sae.create_wsgi_app(wsgi.application)
from ..utils.user_nested_exclude_list import USER_NESTED_FIELDS_EXCLUDES from ..extensions import marshmallow from .user import UserSchema from marshmallow import fields class QuestionSchema(marshmallow.Schema): class Meta: fields = ('id', 'text', 'upvote_count', 'downvote_count', 'created', 'updated', 'user') id = fields.Int() upvote_count = fields.Int() downvote_count = fields.Int() user = fields.Nested(UserSchema, exclude=USER_NESTED_FIELDS_EXCLUDES)
""" Adventures in inheritance. """ from helpers import assert_raises # Demonstrate the strange interaction between hidden methods and subclassing. class Foo(object): def __do_something(self): return "Foo" def trigger(self): return self.__do_something() assert Foo().trigger() == "Foo" class Bar(Foo): def __do_something(self): return "Bar" assert Bar().trigger() == "Foo" class FooBar(Foo): def __do_something(self): return "FooBar" def trigger(self): return self.__do_something() assert FooBar().trigger() == "FooBar" class BarFoo(Foo): def trigger(self): return self.__do_something() assert_raises(AttributeError, lambda: BarFoo().trigger() == "Foo") # We might say then, that 'Class.method' is public, 'Class._method' is # protected, and 'Class.__method' is private. # Method Resolution Order class Maybe(): def trigger(self): return "Main" class Left(Maybe): def trigger(self): return "Left" class Right(Maybe): def trigger(self): return "Right" # Classes that inherit from multiple classes that define the same name use # the name defined on the left most inherited class. class LeftRight(Left, Right): pass assert LeftRight().trigger() == "Left" class RightLeft(Right, Left): pass assert RightLeft().trigger() == "Right" # Repeated Inheritance still follows the regular MRO. class RightLeftLeft(RightLeft, Left): pass assert RightLeftLeft().trigger() == "Right" class LeftLeft(Left, Left): pass assert LeftLeft().trigger() == "Left" # Calling len on new-style classes raises an AttributeError class LenA: pass assert_raises(AttributeError, lambda: len(LenA())) # Calling len on new-style classes raises a TypeError class LenB(object): pass assert_raises(TypeError, lambda: len(LenB())) # properties are actually classes # http://docs.python.org/2/reference/datamodel.html#implementing-descriptors class test_property(property): def __get__(self, instance, owner): return 1 class PropFoo(object): @test_property def hi(self): return 0 assert PropFoo().hi == 1 # Some types are circular assert isinstance(object, type) assert isinstance(type, object) # TODO: Adventures with super!
import csv import random import numpy as np def PLA_pocket(data, target, attr_num, alpha, iter_num=200): parameter = np.zeros(attr_num, dtype=np.uint8) pre_mistakes_cnt = None data_len = len(data) mistakes = [i for i in range(len(data))] for _ in range(iter_num): pre_mistakes_cnt = len(mistakes) i = random.choice(mistakes) new_parameter = parameter + target[i] * data[i] * alpha new_mistakes = [k for k in range(len(data)) if error( data[k], new_parameter, target[k])] new_mistakes_cnt = len(new_mistakes) if new_mistakes_cnt < pre_mistakes_cnt: parameter = new_parameter mistakes = new_mistakes accuracy = (data_len - new_mistakes_cnt) / data_len return parameter, accuracy def error(data, parameter, target): return predict(parameter, data) != target def predict(parameter, data): return np.sign(np.inner(parameter, data)) def preprocess(file_name, train=True): with open(file_name, "r") as f: attrs = ["Pclass", "Sex", "Fare", "Age"] reader = csv.DictReader(f) # Deal with null of age attribute valid_age_list = [] valid_fare_list = [] for row in reader: try: valid_age_list.append(int(row["Age"])) try: fare = float(row["Fare"]) if fare != 0.0: valid_fare_list.append(fare) except: pass except: pass avg_age = int(sum(valid_age_list) / len(valid_age_list)) avg_fare = float(sum(valid_fare_list) / len(valid_fare_list)) # move the file handler f.seek(0) next(reader) if train: data_list, target_list = [], [] for row in reader: data_list.append(np.array(feature_extraction(row, attrs, avg_age, avg_fare))) target_list.append(1 if int(row["Survived"]) == 1 else -1) return data_list, target_list else: data_dict = {} for row in reader: data_dict[row["PassengerId"]] = feature_extraction(row, attrs, avg_age, avg_fare) return data_dict def feature_extraction(row, attrs, avg_age, avg_fare): ''' number of classes sex: 2 Pclass: 3 k class categorical feature => k - 1 features number and order of features Pclass: 1: Sex: 1(male): (0) if Sex == female (1) if Sex == male Fare: 1 Age: 1 Family: 1(SibSp + Parch) bias: 1 ''' feature = [] for attr in attrs: if attr == "Pclass": feature.append(int(row[attr])) # Sex == "male" elif attr == "Sex" and row[attr] == "male": feature.append(1) # Sex == "female" elif attr == "Sex" and row[attr] == "female": feature.append(0) elif attr == "Fare": try: feature.append(float(row[attr])) except: feature.append(avg_fare) elif attr == "Age": try: feature.append(int(row[attr])) except: feature.append(avg_age) # Relation feature.append(int(row["SibSp"]) + int(row["Parch"])) # bias feature.append(1) return feature if __name__ == "__main__": attr_len = 6 train_list, target_list = preprocess("train.csv") parameter, accuracy = PLA_pocket(train_list, target_list, attr_len, 0.3) print(accuracy) test_dict = preprocess("test.csv", train=False) predict_list = [{"PassengerId": p_id, "Survived": 1 if predict(parameter, data) == 1 else 0} for p_id, data in test_dict.items()] with open("submission.csv", "w", newline='') as f: header = ["PassengerId", "Survived"] writer = csv.DictWriter(f, fieldnames=header) writer.writeheader() writer.writerows(predict_list)
import pytest import sys sys.path.append('C:/Users/utilisateur/Documents/briefs/UnitTest/module') import panier as pa @pytest.fixture() def panier(): panier = pa.Panier() return panier def test_add_item_passes_where_item_is_string(panier): panier.add_item('a', 1, 1) assert len(panier.articles) == 1 assert panier.articles[0]['item'] == 'a' def test_add_item_fails_where_item_is_not_string(panier): with pytest.raises(Exception): panier.add_item(1, 1, 1) def test_add_item_passes_where_nbr_items_and_price_are_numeric(panier): panier.add_item('a', 1, 1) assert len(panier.articles) == 1 assert isinstance(panier.articles[0]['nbr_item'], int) assert isinstance(panier.articles[0]['price'], float) def test_add_item_passes_where_nbr_item_is_numeric(panier): panier.add_item("a", 1, 1) assert len(panier.articles) == 1 assert panier.articles[0]['nbr_item'] == 1 def test_add_item_fails_where_nbr_item_is_not_numeric(panier): with pytest.raises(Exception): panier.add_item("a", "b", 1) def test_add_item_fails_where_nbr_item_is_negative(panier): with pytest.raises(Exception): panier.add_item("a", -1, 1) def test_add_item_passes_where_price_is_numeric(panier): panier.add_item("a", 1, 1) assert len(panier.articles) == 1 assert panier.articles[0]['price'] == 1 def test_add_item_fails_where_price_is_not_numeric(panier): with pytest.raises(Exception): panier.add_item("a", 1, "c") def test_add_item_fails_where_nbr_item_is_negative(panier): with pytest.raises(Exception): panier.add_item("a", 1, -1) def test_add_item_passes_add_nbr_item_to_id_item_already_exists(panier): panier.add_item("a", 1, 1) panier.add_item("b", 1, 1) panier.add_item("a", 1, 1) nbr_item_a = panier.articles[0]['nbr_item'] nbr_item_b = panier.articles[1]['nbr_item'] assert nbr_item_a == 2 assert nbr_item_b == 1 def test_calculate_total_passes_where_items_exists(panier): panier.add_item("a", 1, 10) panier.add_item("b", 2, 20) panier.add_item("a", 2, 10) total = sum(p['price'] * p['nbr_item'] for p in panier.articles) assert total == 70 def test_calculate_total_passes_where_no_items_exists(panier): total = sum(p['price'] * p['nbr_item'] for p in panier.articles) assert total == 0 def test_display_items_return_empty_where_no_items_exists(panier): assert len(panier.articles) == 0 def test_display_items_return_a_list_where_items_exists(panier): panier.add_item("a", 1, 10) panier.add_item("b", 2, 20) assert len(panier.articles) != 0 def test_remove_item_passes_where_item_exists(panier): panier.add_item("a", 1, 10) panier.add_item("b", 2, 20) assert len(panier.articles) == 2 assert panier.articles[0]['item'] == 'a' assert panier.articles[1]['item'] == 'b' panier.remove_item('a') assert len(panier.articles) == 1 assert panier.articles[0]['item'] == 'b' def test_remove_item_passes_where_item_not_exists(panier): panier.add_item("a", 1, 10) panier.add_item("b", 2, 20) assert len(panier.articles) == 2 panier.remove_item('c') assert len(panier.articles) == 2 def test_update_quantity_passes_where_item_exists_and_quantity_valid(panier): panier.add_item("a", 1, 10) assert panier.articles[0]['nbr_item'] == 1 panier.update_quantity('a', 2) assert panier.articles[0]['nbr_item'] == 2 def test_update_quantity_where_item_exists_and_quantity_not_valid(panier): panier.add_item("a", 1, 10) assert panier.articles[0]['nbr_item'] == 1 with pytest.raises(Exception): panier.update_quantity('a', 'b') def test_update_quantity_failed_where_quantity_is_negative(panier): panier.add_item("a", 1, 10) assert panier.articles[0]['nbr_item'] == 1 with pytest.raises(Exception): panier.update_quantity('a', -1) def test_update_quantity_remove_item_where_quantity_is_0(panier): panier.add_item("a", 1, 10) assert len(panier.articles) == 1 panier.update_quantity('a', 0) assert len(panier.articles) == 0
import math ''' Implementation of the left hand sum and the trapazoid methods of numerical integration. ''' def leftsum(f, a, b, n): # f: continuous funciton to estimate the signed area for # a and b: the limets of integration (with a<b) # n: the number of subintervals desired h = (b - a) / n print("Left Sum: ", h * sum( f(a+i*h) for i in range(n)) ) # | \ | # Step size summation of all my rectangles # (factored out) def trapezoid(f, a, b, n): # f: continuous funciton to estimate the signed area for # a and b: the limets of integration (with a<b) # n: the number of subintervals desired h = (b - a) / n # summation form of an integral with a divide by zero check ival = h * sum( (f(a+i*h) + f(a+i*h+h))/2 for i in range(n)) print("Trapazoid: ", ival) # For 10^-12 error max, trap needs 2473082 steps # print("2a") leftsum(lambda x: math.exp(math.sin(x)), 0, 3, 100) trapezoid(lambda x: math.exp(math.sin(x)), 0, 3, 100) # For 10^-12 error max, trap needs 9724300 steps # print("2b") leftsum(lambda x: x**2 * math.log(x) + 100, 1, 1000, 100) trapezoid(lambda x: x**2 * math.log(x) + 100, 1, 1000, 100) # For 10^-12 error max, trap needs 14907120 steps # print("2c") leftsum(lambda x: math.sin(x) / x, -10, 10, 101) trapezoid(lambda x: math.sin(x) / x, -10, 10, 101) # In assignment 4, the program computed the ln(b)
import os import sys import dotenv import logging import requests dotenv.load_dotenv() hue_api_key = os.environ.get('HUE_API_KEY') if not hue_api_key: sys.exit('Please set HUE_API_KEY in your environment') hue_bridge_ip = os.environ.get('HUE_BRIDGE_IP') if not hue_bridge_ip: sys.exit('Please set HUE_BRIDGE_IP in your environment') hue_api_url = "http://{}/api/{}".format(hue_bridge_ip, hue_api_key) outside = 1 kitchen = 2 red = {"xy": [.6, .3]} green = {"xy": [.4, .5]} blue = {"xy": [.22, .15]} orange = {"xy": [.45, .4]} def set_color(light, color): url = "{}/lights/{}/state".format(hue_api_url, light) try: return requests.put(url, json=color) except requests.exceptions.ConnectionError as e: logging.error('unable to talk to light: %s', e) def outside_color(color): return set_color(1, color) def kitchen_color(color): return set_color(2, color)
from py4j.java_gateway import JavaGateway gateway = JavaGateway() lruCache = gateway.entry_point.getLruCache() print("lru:",lruCache.toString()) lruCache.put("a","1") print("lru:",lruCache.toString()) lruCache.put("b","2") print("lru:",lruCache.toString()) lruCache.put("c","3") print("lru:",lruCache.toString()) lruCache.put("d","4") print("lru:",lruCache.toString())
#!/usr/bin/python # -*- coding: utf-8 -*- from PIL import Image, ImageFont, ImageDraw left_menupics = { #u"ФИРМЕННЫЕ БЛЮДА" :"1.png", #u"ХОЛОДНЫЕ БЛЮДА" :"2.png", #u"СУПЫ" :"3.png", #u"ГОРЯЧЕЕ ИЗ ОВОЩЕЙ" :"4.png", #u"ГОРЯЧЕЕ ИЗ СВИНИНЫ" :"5.png", #u"ГОВЯДИНА И БАРАНИНА" :"6.png", #u"ГОРЯЧЕЕ ИЗ ПТИЦЫ" :"7.png", #u"РЫБА И МОРЕПРОДУКТЫ" :"8.png", #u"БЛЮДА В ГОРШОЧКЕ" :"9.png", #u"БЛЮДА В ХОГО" :"10.png", #u"БЛЮДА НА ПЛИТКЕ" :"11.png", #u"ГАРНИРЫ" :"12.png", u"ДЕСЕРТЫ" :"13.png" #u"БИЗНЕС-ЛАНЧ": "14.png", #u"КИТАЙСКОЕ МЕНЮ": "15.png" } upper_menupics = { #u"О нас": ("onas.png", (73,35) ), #u"Меню": ("menu.png",(88,35) ), #u"Бизнес-ланч": ("business.png", (155,35) ), #u"Контакты": ("kontakty.png", (130,35) ), #u"Вакансии": ("vakansii.png", (121,35) ), #u"Доставка": ("dostavka.png", (160,35) ), } menufont = "bonzai.ttf" big_font_size = 44 small_font_size = 34 menufont = "chinacyr.ttf" big_font_size = 25 small_font_size = 17 for m in left_menupics.keys(): text_pos = (0,0) size = (360, 45) font_size = big_font_size im = Image.new('RGB', size) draw = ImageDraw.Draw(im) im.putalpha (0) black = (0,0,0, 255) font = ImageFont.truetype(menufont, font_size) draw.text(text_pos, m, font=font, fill = black) im.save(left_menupics[m], 'PNG') im = Image.new('RGB', size) draw = ImageDraw.Draw(im) im.putalpha (0) grey = (123,123,123, 255) font = ImageFont.truetype(menufont, font_size) draw.text(text_pos, m, font=font, fill = grey) new_name = left_menupics[m].split(".")[0] + "o." + left_menupics[m].split(".")[1] im.save(new_name, 'PNG') for m in upper_menupics.keys(): text_pos = (0,14) size = upper_menupics[m][1] font_size = small_font_size im = Image.new('RGB', size) draw = ImageDraw.Draw(im) im.putalpha (0) black = (0,0,0, 255) font = ImageFont.truetype(menufont, font_size) draw.text(text_pos , m, font=font, fill = black) im.save(upper_menupics[m][0], 'PNG') im = Image.new('RGB', size) draw = ImageDraw.Draw(im) im.putalpha (0) grey = (123,123,123, 255) font = ImageFont.truetype(menufont, font_size) draw.text(text_pos , m, font=font, fill = grey) new_name = upper_menupics[m][0].split(".")[0] + "o." + upper_menupics[m][0].split(".")[1] im.save(new_name, 'PNG')
# import package_runoob.we1.runoob1 # from import we1 # import package_runoob.we1 # from . import runoob1 # import package_runoob.we1 # from . import package_runoob # import package_runoob # from . import custom_1 # import sys # print (sys.path) # import learn_class # from . import showme import os print (os.path)
"""The parameter set of the MSSM.""" from typing import Any, Dict, Optional import yaslha from simsusy.abs_model import AbsModel from simsusy.mssm.abstract import AbsEWSBParameters, AbsSMParameters # noqa: F401 from simsusy.mssm.input import A, MSSMInput, S # noqa: F401 class MSSMModel(AbsModel): """The parameter set of the MSSM.""" def __init__(self, *args: Any) -> None: super().__init__(*args) self.input = None # type: Optional[MSSMInput] self.sm = None # type: Optional[AbsSMParameters] self.ewsb = None # type: Optional[AbsEWSBParameters] def write(self, filename: Optional[str] = None) -> None: """Output the model.""" self._prepare_input_parameters() super().write(filename) def _prepare_input_parameters(self) -> None: assert self.input is not None for block_name, key_max in [("VCKMIN", 4), ("UPMNSIN", 6)]: block = self.input.block(block_name) if block: assert isinstance(block, yaslha.slha.Block) for key in range(1, key_max + 1): if (v := block.get(key, default=None)) is not None: self.slha[block_name, key] = v minpar_used = { 1: False, 2: False, 3: True, 4: True, 5: False, } # type: Dict[int, bool] extpar_used = {} # type: Dict[int, bool] for sfermion in [S.QL, S.UR, S.DR, S.LL, S.ER]: block = self.input.block(sfermion.slha2_input) assert not isinstance(block, yaslha.slha.InfoBlock) for i in [1, 2, 3]: for j in [1, 2, 3]: v = block.get((i, j), default=None) if block else None if i == j: extpar_used[sfermion.extpar + i] = v is None if v is not None: self.slha[sfermion.slha2_input, i, j] = v for a_term in [A.U, A.D, A.E]: block = self.input.block(a_term.slha2_input) assert not isinstance(block, yaslha.slha.InfoBlock) for i in [1, 2, 3]: for j in [1, 2, 3]: v = block.get((i, j), default=None) if block else None if i == j == 3: extpar_used[a_term.extpar] = v is None if v is not None: self.slha[a_term.slha2_input, i, j] = v # EXTPAR block = self.input.block("EXTPAR") assert isinstance(block, yaslha.slha.Block) for key in [1, 2, 3]: v = block[key] if block else None if v is not None: self.slha["EXTPAR", key] = v else: minpar_used[2] = True for sfermion in [S.QL, S.UR, S.DR, S.LL, S.ER]: for gen in [1, 2, 3]: key = sfermion.extpar + gen if extpar_used[key]: v = block.get(key, default=None) if block else None if v is not None: self.slha["EXTPAR", key] = v else: minpar_used[1] = True for a_term in [A.U, A.D, A.E]: key = a_term.extpar if extpar_used[key]: v = block.get(key, default=None) if block else None if v is not None: self.slha["EXTPAR", key] = v else: minpar_used[5] = True ewsb_params = [] # Type: List[int] for key in [21, 22, 23, 24, 25, 26, 27]: v = block.get(key, default=None) if block else None if v is not None: ewsb_params.append(key) self.slha["EXTPAR", key] = v if key == 23: minpar_used[4] = False elif key == 25: minpar_used[3] = False if len(ewsb_params) < 2: minpar_used[1] = True # MINPAR block = self.input.block("MINPAR") if isinstance(block, yaslha.slha.Block): for key in [1, 2, 3, 4, 5]: if minpar_used[key]: v = block.get(key, default=None) if block else None if v is not None: self.slha["MINPAR", key] = v # SMINPUTS and MODSEL block = self.input.block("SMINPUTS") if isinstance(block, yaslha.slha.Block): for k, v in block.items(): if isinstance(k, int) and ( 1 <= k <= 7 or k in [8, 11, 12, 13, 14, 21, 22, 23, 24] ): self.slha["SMINPUTS", k] = v block = self.input.block("MODSEL") if isinstance(block, yaslha.slha.Block): for k, v in block.items(): if k == 1: self.slha["MODSEL", k] = v # NO CLEANING BECAUSE VERBOSE IS BETTER THAN AMBIGUOUS. # def clean_zero(self) -> None: # """Clean zero elements from the model to have better output.""" # for name in ["AU", "AD", "AE", "TU", "TD", "TE", "YU", "YD", "YE"]: # for head in ["", "IM"]: # if (block := self.slha.get(head + name)) is None: # continue # to_kill_block = head == "IM" # line_to_kill = [] # type: List[str] # for key, value in block.items(): # if value == 0: # # diagonal elements must exist, otherwise SDecay complains. # if head == "IM" or not key[0] == key[1]: # line_to_kill.append(key) # else: # to_kill_block = False # if to_kill_block: # del self.slha[block] # else: # for key in line_to_kill: # del block[key] # for name in ( # ["MSQ2", "MSU2", "MSD2", "MSL2", "MSE2"] # + ["USQMIX", "DSQMIX", "SELMIX", "SNUMIX", "STOPMIX", "SBOTMIX","STAUMIX"] # + ["NMIX", "UMIX", "VMIX", "VCKM", "UPMNS"] # ): # for head in ["", "IM"]: # if (block := self.slha.get(head + name)) is None: # continue # to_kill_block = head == "IM" # line_to_kill = [] # for key, value in block.items(): # if value == 0: # if head == "IM" or not key[0] == key[1]: # line_to_kill.append(key) # else: # to_kill_block = False # if to_kill_block: # del self.slha[block] # else: # for key in line_to_kill: # del block[key]
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-02-03 14:41 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('challenges', '0017_added_leaderboard_table'), ] operations = [ migrations.CreateModel( name='ChallengePhaseSplit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('modified_at', models.DateTimeField(auto_now=True)), ('visibility', models.CharField(choices=[('1', 'host'), ('2', 'owner and host'), ('3', 'public')], default='3', max_length=1)), ('challenge_phase', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='challenges.ChallengePhase')), ('dataset_split', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='challenges.DatasetSplit')), ('leaderboard', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='challenges.Leaderboard')), ], options={ 'db_table': 'challenge_phase_split', }, ), ]
import wikipedia query = wikipedia.page("MsDhoni") print(query.summary)
""" 剑指 Offer 18. 删除链表的节点 给定单向链表的头指针和一个要删除的节点的值,定义一个函数删除该节点。 返回删除后的链表的头节点。 """ class ListNode: def __init__(self, x): self.val = x self.next = None def deleteNode(head,val): # 其实就是删除某个节点的操作,常规链表操作。 point = ListNode("#") start = point point.next = head while point.next: if point.next.val == val: point.next = point.next.next else: point = point.next return start.next
try: test_error = ModuleNotFoundError() except NameError: # for python <3.6, ModuleNotFound error does not exist # https://docs.python.org/3/library/exceptions.html#ModuleNotFoundError class ModuleNotFoundError(ImportError): pass FallbackModuleNotFoundError = ModuleNotFoundError class ExecutionOutsideEnvironmentError(ModuleNotFoundError): """An error indicating that a module is missing that is essential to build the execution environment. Most of the times this is because the program is executed outside its normal environment.""" pass class BlockedFunctionError(LookupError): """An error indicating that the called function must not be called because it is blocked. """ pass class DeviceImportError(ImportError): """An error indicating that a `Device` file could not be imported.""" pass class DeviceClassNotDefined(AttributeError): """An error indicating that a `Device` file was found but the class name does not exist in this file or is not a class.""" pass class DeviceCreationError(RuntimeError): """An error indicating that the `Device` file and class are found but when creating the instance an error occurred.""" pass
#!/usr/bin/python # -*- coding: utf-8 -*- def warn(*args, **kwargs): pass from django.shortcuts import render from django.http import HttpResponse, JsonResponse from django.db.models import Q from .models import * # Create your views here. def error_404_view(request, exception): return render(request,'404.html') def index(request): return render(request, 'index.html') def about(request): return render(request, 'about.html') def experience(request): return render(request,'experience.html') def my_skills(request): return render(request,'my_skills.html') def getdataset(request): return render(request,'getdataset.html')
from tkinter import * root = Tk() c = Canvas(root, width=600, height=600, bg="white") c.pack() ball = c.create_oval(0, 100, 60, 140, fill='green') bog= c.create_oval (50 , 200, 200, 50, fill='yellow' ) def motion(): print (str(c.coords(ball)[1])) if c.coords(ball)[2] == 60 and c.coords(ball)[1] == 100: while c.coords(ball)[2] != 150 and c.coords(ball)[1] !=10: c.move(ball, 1, -1) #print (str(c.coords(ball)[2] )+'\nHELL'+ (str((c.coords(ball)[1])))) root.after(100, motion) if c.coords(ball)[2] == 150 and c.coords(ball)[1] == 10: while c.coords(ball)[2] != 240 and c.coords(ball)[1] !=100: c.move(ball, 1, 1) #print (str(c.coords(ball)[2] )+'\nHELL_1 '+ (str((c.coords(ball)[1])))) root.after(100, motion) if c.coords(ball)[2] == 240 and c.coords(ball)[1] == 100: while c.coords(ball)[2] != 150 and c.coords(ball)[1] !=190: c.move(ball, -1, 1) #print (str(c.coords(ball)[2] )+'\nHELL_2 '+ (str((c.coords(ball)[1])))) root.after(100, motion) if c.coords(ball)[2] == 150 and c.coords(ball)[1] == 190: while c.coords(ball)[2] != 60 and c.coords(ball)[1] !=100: c.move(ball, -1, -1) #print (str(c.coords(ball)[2] )+'\nHELL_3 '+ (str((c.coords(ball)[1])))) root.after(100, motion) motion() root.mainloop()
import logging import gensim import argparse from gensim.models.keyedvectors import WordEmbeddingsKeyedVectors, Word2VecKeyedVectors from gensim import utils, matutils from six import string_types from numpy import dot, float32 as REAL, array, ndarray, argmax from utils import embedding_io, emb_utils from embeddings.embedding_configs import EmbeddingConfigs logger = logging.getLogger(__name__) class new_Word2VecKeyedVectors(Word2VecKeyedVectors): def __init__(self, vector_size): super(Word2VecKeyedVectors, self).__init__(vector_size=vector_size) def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None): """ Find the top-N most similar words. Positive words contribute positively towards the similarity, negative words negatively. This method computes cosine similarity between a simple mean of the projection weight vectors of the given words and the vectors for each word in the model. The method corresponds to the `word-analogy` and `distance` scripts in the original word2vec implementation. If topn is False, most_similar returns the vector of similarity scores. `restrict_vocab` is an optional integer which limits the range of vectors which are searched for most-similar values. For example, restrict_vocab=10000 would only check the first 10000 word vectors in the vocabulary order. (This may be meaningful if you've sorted the vocabulary by descending frequency.) Example:: >>> trained_model.most_similar(positive=['woman', 'king'], negative=['man']) [('queen', 0.50882536), ...] """ if positive is None: positive = [] if negative is None: negative = [] self.init_sims() if isinstance(positive, string_types) and not negative: # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog']) positive = [positive] # add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words positive = [ (word, 1.0) if isinstance(word, string_types + (ndarray,)) else word for word in positive ] negative = [ (word, -1.0) if isinstance(word, string_types + (ndarray,)) else word for word in negative ] # compute the weighted average of all words all_words, mean = set(), [] for word, weight in positive + negative: if isinstance(word, ndarray): mean.append(weight * word) else: mean.append(weight * self.word_vec(word, use_norm=True)) if word in self.vocab: all_words.add(self.vocab[word].index) if not mean: raise ValueError("cannot compute similarity with no input") mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL) if indexer is not None: return indexer.most_similar(mean, topn) limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab] dists = dot(limited, mean) if not topn: return dists best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True) # ignore (don't return) words from the input result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words] return result[:topn] def new_accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar, case_insensitive=True): """ Compute accuracy of the model. `questions` is a filename where lines are 4-tuples of words, split into sections by ": SECTION NAME" lines. See questions-words.txt in https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip for an example. The accuracy is reported (=printed to log and returned as a list) for each section separately, plus there's one aggregate summary at the end. Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab` words (default 30,000). This may be meaningful if you've sorted the vocabulary by descending frequency. In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then case normalization is performed. Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens and question words. In case of multiple case variants of a single word, the vector for the first occurrence (also the most frequent if vocabulary is sorted) is taken. This method corresponds to the `compute-accuracy` script of the original C word2vec. """ print("INFO: Using new accuracy") ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]] ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab) oov_counter, idx_cnt, is_vn_counter = 0, 0, 0 sections, section = [], None for line_no, line in enumerate(utils.smart_open(questions)): # TODO: use level3 BLAS (=evaluate multiple questions at once), for speed line = utils.to_unicode(line) if line.startswith(': '): # a new section starts => store the old section if section: sections.append(section) self.log_accuracy(section) section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []} else: # Count number of analogy to check idx_cnt += 1 if not section: raise ValueError("missing section header before line #%i in %s" % (line_no, questions)) try: if case_insensitive: a, b, c, expected = [word.upper() for word in line.split(" | ")] else: a, b, c, expected = [word for word in line.split(" | ")] # print("Line : ", line) # print("a, b, c, expected: %s, %s, %s, %s"%(a, b, c, expected)) # input(">>> Wait ...") except ValueError: logger.info("SVX: ERROR skipping invalid line #%i in %s", line_no, questions) print("Line : ", line) print("a, b, c, expected: %s, %s, %s, %s" % (a, b, c, expected)) input(">>> Wait ...") continue # In case of Vietnamese, word analogy can be a phrase if " " in a or " " in b or " " in c or " " in expected: is_vn_counter += 1 pass else: if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab: logger.debug("SVX: skipping line #%i with OOV words: %s", line_no, line.strip()) oov_counter += 1 continue original_vocab = self.vocab self.vocab = ok_vocab ignore = {a, b, c} # input words to be ignored predicted = None # find the most likely prediction, ignoring OOV words and input words sims = most_similar(self, positive=[b, c], negative=[a], topn=False, restrict_vocab=restrict_vocab) self.vocab = original_vocab for index in matutils.argsort(sims, reverse=True): predicted = self.index2word[index].upper() if case_insensitive else self.index2word[index] if predicted in ok_vocab and predicted not in ignore: if predicted != expected: logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted) break if predicted == expected: section['correct'].append((a, b, c, expected)) else: section['incorrect'].append((a, b, c, expected)) if section: # store the last section, too sections.append(section) self.log_accuracy(section) total = { 'OOV/Total/VNCompound_Words': [oov_counter, (idx_cnt), is_vn_counter], 'section': 'total', 'correct': sum((s['correct'] for s in sections), []), 'incorrect': sum((s['incorrect'] for s in sections), []), } self.log_accuracy(total) sections.append(total) return sections def convert_conll_format_to_normal(connl_file, out_file): """ read file conll format return format : One sentence per line sentences_arr: [EU rejects German call .., ...] tags_arr: [B-ORG O B-MIST O ..., ...] """ f = open(connl_file) sentences = [] sentence = "" for line in f: # print("line: ", line) if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n": sentences.append(sentence.rstrip()) sentence = "" continue else: splits = line.split('\t') sentence += splits[1].rstrip() + " " # To handle the last sentence. if len(sentence) > 0: sentences.append(sentence) del sentence # Write to output if out_file is None: out_file = connl_file + ".std.txt" writer = open(out_file, "w") for sen in sentences: writer.write(sen + "\n") writer.flush() writer.close() return sentences def verify_word_analogies(file): """ Verify the word analogy file. :param file: :return: """ f_reader = open(file, "r") valid_cnt, invalid_cnt = 0, 0 for line in f_reader: # print("line: ", line) if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n": continue else: splits = line.split('\t') if len(splits) != 4: invalid_cnt += 1 else: valid_cnt += 1 print("Valid analogy: %s, invalid analogy: %s" % (valid_cnt, invalid_cnt)) def check_oov_of_word_analogies(w2v_format_emb_file, analogy_file, is_vn=True, case_sensitive=True): emb_model = gensim.models.KeyedVectors.load_word2vec_format(w2v_format_emb_file, binary=False, unicode_errors='ignore') f_reader = open(analogy_file, "r") vocab_arr = [] for line in f_reader: if not case_sensitive: line = line.lower() if line.startswith(': '): continue else: for word in line.split(" | "): # In Vietnamese, we have compound and single word. # if is_vn: # if " " in word: # print("I should not going here") # single_words = word.split(" ") # for single_word in single_words: # vocab_arr.append(single_word) # For other languages. # else: vocab_arr.append(word) print("Before unique set: len = ", len(vocab_arr)) unique_vocab_arr = set(vocab_arr) print("After unique set: len = ", len(unique_vocab_arr)) valid_word_cnt = 0 for word in unique_vocab_arr: if word in emb_model: valid_word_cnt += 1 print("With Is_VN = %s, case_sensitive = %s, Valid word = %s/%s" % (is_vn, case_sensitive, valid_word_cnt, len(unique_vocab_arr))) def evaluator_api(input_files, analoglist, output, embed_config=None): """ :param input_files: :param analoglist: :param output: :param embed_config: :return: """ if embed_config is None: embed_config = EmbeddingConfigs() # Initialize default config for embedding. local_embedding_names, local_word_embeddings = embedding_io.load_word_embeddings(input_files, embed_config) # emb_utils.print_analogy('man', 'him', 'woman', emb_words) local_output_str = emb_utils.eval_word_analogy_4_all_embeddings(analoglist, local_embedding_names, local_word_embeddings, output_file=output) print("OUTPUT: ", local_output_str) if __name__ == "__main__": """ Evaluates a given word embedding model. To use: evaluate.py path_to_model [-restrict] optional restrict argument performs an evaluation using the original Mikolov restriction of vocabulary """ desc = "Evaluates a word embedding model" parser = argparse.ArgumentParser(description=desc) parser.add_argument("-input", required=True, default="../data/embedding_dicts/ELMO_23.vec", help="Input multiple word embeddings, each model separated by a `;`.") parser.add_argument("-analoglist", nargs="?", # default="../data/embedding_analogies/vi/analogy_vn_seg.txt.std.txt", default="../data/embedding_analogies/vi/solveable_analogies_vi.txt", help="Input analogy file to run the word analogy evaluation.") parser.add_argument("-r", nargs="?", default=False, help="Vocabulary restriction") parser.add_argument("-checkoov", nargs="?", default=False, help="Check OOV percentage") parser.add_argument("-lang", nargs="?", default="VI", help="Specify language, by default, it's Vietnamese.") parser.add_argument("-lowercase", nargs="?", default=True, help="Lowercase all word analogies? (depends on how the emb was trained).") parser.add_argument("-output", nargs="?", default="../data/embedding_analogies/vi/results_out.txt", help="Output file of word analogy task") parser.add_argument("-remove_redundancy", nargs="?", default=True, help="Remove redundancy in predicted words") print("Params: ", parser) args = parser.parse_args() embedding_config = EmbeddingConfigs() paths_of_models = args.input testset = args.analoglist is_vietnamese = args.lang output_file = args.output # use restriction? restriction = None if args.r: restriction = 30000 # set logging definitions logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) if args.checkoov: print("Checking OOV ...") check_oov_of_word_analogies(paths_of_models, testset, is_vn=is_vietnamese) if not args.checkoov: print("Evaluating embeddings on the word analogy task ...") if is_vietnamese: print(" ... for ETNLP's evaluation approach.") embedding_names, word_embeddings = embedding_io.load_word_embeddings(paths_of_models, embedding_config) # emb_utils.print_analogy('man', 'him', 'woman', emb_words) output_str = emb_utils.eval_word_analogy_4_all_embeddings(testset, embedding_names, word_embeddings, output_file=args.output_file) print("#"*20) print(output_str) print("#" * 20) else: print(" ... for Mirkolov et al.'s evaluation approach.") word_analogy_obj = new_Word2VecKeyedVectors(1024) # load and evaluate model = word_analogy_obj.load_word2vec_format( paths_of_models, binary=False, unicode_errors='ignore') model.accuracy = word_analogy_obj.new_accuracy acc = model.accuracy(testset, restrict_vocab=restriction, case_insensitive=False) print("Acc = ", acc) print("DONE")
# -*- coding: utf-8 -*- # encoding=utf-8 from flask import Flask, render_template, url_for, request, flash, redirect, session from flask_sqlalchemy import SQLAlchemy import time import sys reload(sys) sys.setdefaultencoding('utf8') app = Flask(__name__) app.secret_key = 'my is some_secret' # app.config['SESSION_TYPE'] = 'filesystem' # from sae.const import (MYSQL_HOST, MYSQL_HOST_S, # MYSQL_PORT, MYSQL_USER, MYSQL_PASS, MYSQL_DB # ) # app.config['SECRET_KEY'] = 'hard to guess string' # app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://%s:%s@%s:%s/%s' % (MYSQL_USER, MYSQL_PASS, MYSQL_HOST, MYSQL_PORT, MYSQL_DB) # app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:root@localhost:3306/dutylist2' db = SQLAlchemy(app) class Category(db.Model): __tablename__ = "du_category" category_id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64), unique=True) def __init__(self, name): self.name = name def __repr__(self): return self.name class Duty(db.Model): __tablename__ = "du_duty" duty_id = db.Column(db.Integer, primary_key=True) category_id = db.Column(db.Integer, unique=True) user_id = db.Column(db.Integer, unique=True) title = db.Column(db.String(64), unique=True) status = db.Column(db.Integer, unique=True) is_show = db.Column(db.Integer, unique=True) create_time = db.Column(db.Integer, unique=True) def __init__(self, category_id, user_id, title, status, is_show, create_time): self.category_id = category_id self.user_id = user_id self.title = title self.status = status self.is_show = is_show self.create_time = create_time def __repr__(self): return self.title class User(db.Model): __tablename__ = "du_user" user_id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(64), unique=True) phone = db.Column(db.String(64), unique=True) password = db.Column(db.String(64), unique=True) create_time = db.Column(db.Integer, unique=True) def __init__(self, username, phone, password, create_time): self.username = username self.phone = phone self.password = password self.create_time = create_time def __repr__(self): return self.username @app.route('/', methods=['GET', 'POST']) def index(): myname = None if 'user_id' in session: print 'user_id in session' myname = session['username'] if request.method == 'GET' and request.args.get('category_id'): print 'request.method = POST' category_id = request.args.get('category_id') sql = 'select t1.*,t2.name,t3.username from du_duty as t1 left join du_category as t2 on t2.category_id = t1.category_id left join du_user as t3 on t3.user_id = t1.user_id where t1.is_show = 1 and t1.category_id = %s' % category_id else: print 'in else' sql = 'select t1.*,t2.name,t3.username from du_duty as t1 left join du_category as t2 on t2.category_id = t1.category_id left join du_user as t3 on t3.user_id = t1.user_id where t1.is_show = 1' duty_list = db.session.execute(sql).fetchall() category_list = Category.query.order_by(Category.category_id).all() return render_template('index.html', duty_list=duty_list, category_list=category_list, myname=myname) @app.route('/logout') def logout(): session.pop('user_id', None) session.pop('username', None) return redirect(url_for('index')) @app.route('/login', methods=['GET', 'POST']) def login(): myname = None if request.method == "POST": phone = request.form['phone'] password = request.form['password'] if phone or password: user = User.query.filter_by(phone=phone).first() if user is not None: if user.password != password: flash('Password or Phone is not ture') return redirect(url_for('login')) else: session['user_id'] = user.user_id session['username'] = user.username return redirect(url_for('my_duty')) else: flash('Password or Phone is not ture') return redirect(url_for('login')) else: flash('field can not be empty') return redirect(url_for('login')) else: return render_template('login.html', myname=myname) @app.route('/register', methods=['GET', 'POST']) def register(): myname = None if request.method == "POST": print 'request method is POST' username = request.form['username'] print 'username = %s' %username phone = request.form['phone'] print 'phone = %s' %phone password = request.form['password'] print 'password = %s' %password repassword = request.form['repassword'] print 'repassword = %s' %repassword if username or phone or password or repassword: if password != repassword: flash('Password and Confirm Password not same') return redirect(url_for('register')) res = User.query.filter_by(phone=phone).first() print 'res = %s' %res if res: flash('phone is be register') return redirect(url_for('register')) data = User(username, phone, password, time.time()) print 'data.username = %s' %data.username print 'data.phone = %s' %data.phone print 'data.password = %s' %data.password print 'data.time.time() = %s' %data.create_time print 'data.user_id = %s' %data.user_id # 将对象写入到会话中 res = db.session.add(data) print 'res = %s' %res db.session.commit() print 'commit finished' print 'data.user_id = %s' %data.user_id if data.user_id: flash('register successfully! please login') return redirect(url_for('login')) else: flash('register error!') print 'register error!' return redirect(url_for('register')) else: flash('field can not be empty') return redirect(url_for('register')) else: return render_template('register.html', myname=myname) @app.route('/add_duty', methods=['GET', 'POST']) def add_duty(): if 'user_id' not in session: return redirect(url_for('login')) else: myname = session['username'] if request.method == "POST": title = request.form['title'] print title category_id = request.form['name'] is_show = request.form['is_show'] status = request.form['status'] if title and category_id: data = Duty(category_id, session['user_id'], title, status, is_show, time.time()) res = db.session.add(data) db.session.commit() print data.duty_id if data.duty_id: flash('add successfully! ') return redirect(url_for('my_duty')) else: flash('register error!') return redirect(url_for('add_duty')) else: flash('field can not be empty') return redirect(url_for('add_duty')) else: category_list = Category.query.order_by(Category.category_id).all() return render_template('add_duty.html', category_list=category_list) @app.route('/my_duty', methods=['GET', 'POST']) def my_duty(): if 'user_id' not in session: return redirect(url_for('login')) else: myname = session['username'] if request.method == 'GET' and request.args.get('category_id'): category_id = request.args.get('category_id') sql = 'select t1.*,t2.name from du_duty as t1 left join du_category as t2 on t2.category_id = t1.category_id where t1.user_id = %s and t1.category_id = %s' % ( session['user_id'], category_id) else: sql = 'select t1.*,t2.name from du_duty as t1 left join du_category as t2 on t2.category_id = t1.category_id where user_id = %s' % \ session['user_id'] duty_list = db.session.execute(sql).fetchall() category_list = Category.query.order_by(Category.category_id).all() return render_template('my_duty.html', duty_list=duty_list, category_list=category_list, myname=myname) @app.route('/add_category', methods=['GET', 'POST']) def add_category(): if 'user_id' not in session: return redirect(url_for('login')) else: myname = session['username'] if request.method == 'POST': name = request.form['name'] if name: res = Category.query.filter_by(name=name).first() if res: flash('catgory is be here') return redirect(url_for('add_category')) else: data = Category(name) res = db.session.add(data) db.session.commit() return redirect(url_for('my_duty')) else: flash('name not be None!') return render_template('add_category.html') return render_template('add_category.html', username=session['username'], myname=myname) ''' @app.route('/search',methods=['GET', 'POST']) def search(): if request.method == 'GET': category_id = request.args.get('category_id') sql = 'select du_duty.*,du_category.name,du_user.username from du_duty left join du_category on du_category.category_id = du_duty.category_id left join du_user on du_user.user_id = du_duty.user_id where du_duty.is_show = 1 where du_duty.category_id= %s' % category_id if session['user_id']: sql = 'select du_duty.*,du_category.name from du_duty left join du_category on du_category.category_id = du_duty.category_id where user_id = %s and du_duty.category_id = %s ' % (session['user_id'],category_id) duty_list = db.session.execute(sql).fetchall() print duty_list category_list = Category.query.order_by(Category.category_id).all() return render_template('search.html',duty_list = duty_list,category_list=category_list) else: return redirect(url_for('index')) ''' @app.errorhandler(404) def page_not_found(e): myname = None if 'user_id' in session: myname = session['username'] return render_template('404.html', myname=myname), 404 @app.errorhandler(500) def internal_server_error(e): myname = None if 'user_id' in session: myname = session['username'] return render_template('500.html', myname=myname), 500 if __name__ == '__main__': app.debug = True app.run()
metadata = { 'parents': ['tek', 'mod1', 'mod2'], } def reset_config(): return {'sec2': {'key1': 'val1'}} __all__ = ['reset_config']
import os from .ERAI_General import ERAI_General VARS = [ 44.128, 45.128, 49.128, 50.128, 142.128, 143.128, 144.128, 146.128, 147.128, 159.128, 169.128, 175.128, 176.128, 177.128, 178.128, 179.128, 180.128, 182.128, 205.128, 208.128, 209.128, 210.128, 211.128, 212.128, 228.128, 231.128, 232.128, 239.128, 240.128, 243.128, 244.128] VARS = '/'.join( map( str, VARS ) ) INFO = {"class" : "ei", "dataset" : "interim", "expver" : "1", "grid" : "1.5/1.5", "area" : "90/0/-90/360", "stream" : "oper", "levtype" : "sfc", "type" : "fc", "time" : "00:00:00/12:00:00", "step" : "6/12", "param" : VARS, "format" : "netcdf", "target" : ''}; def era_interim_download_fc_sfc(outdir, start_year = None, start_month = None, email = None, delay = None): """ Download all ERA-I forecast variables at surface. Arguments: dir : Directory to save data to Keyword arguments: start_year : Year to start looking for data start_month : Month to start looking for data email : email address to send error messages to delay : Delay from current date to download until. Must be a timedelta object. Default is to stop downloading data when the month and year are within 26 weeks of program start date """ subject = 'ERA-Interim fc_sfc FAILED!'; outdir = os.path.join(outdir, 'Forecast', 'Surface'); inst = ERAI_General(outdir, info = INFO, subject = subject) return inst.download( start_year, start_month, email, delay ) if __name__ == "__main__": import argparse; # Import library for parsing parser = argparse.ArgumentParser(description="ERA-Interim Analisys Pressure Levels Download"); # Set the description of the script to be printed in the help doc, i.e., ./script -h ### Data storage keywords; https://software.ecmwf.int/wiki/display/UDOC/Data+storage+keywords parser.add_argument("outdir", type=str, help="Top level directory for output") parser.add_argument("-y", "--year", type=int, default=1979, help="specifies start year") parser.add_argument("-m", "--month", type=int, default= 1, help="specifies start month") parser.add_argument("-e", "--email", type=str, help="email address to send failed message to") args = parser.parse_args() status = era_interim_download_fc_sfc( args.outdir, args.year, args.month, args.email ); exit(status); # Exit status zero (0) on end
"""10-fold validation""" import os import shutil import codecs lines = [] with codecs.open('train_origin.csv', 'r', 'utf8') as reader: header = reader.readline() for line in reader: lines.append(line) for i in xrange(10): print('Round: ' + str(i)) with codecs.open('train.csv', 'w', 'utf8') as train_writer: train_writer.write(header) with codecs.open('test.csv', 'w', 'utf8') as test_writer: for j in xrange(len(lines)): if j % 10 == i: test_writer.write(','.join(lines[j].split('\t')[:2]) + '\n') else: train_writer.write(lines[j]) print(' 000_words') os.system('python 000_words.py >> logs') print(' 001_cut') os.system('python 001_cut.py >> logs 2>&1') print(' 003_classify') os.system('python 003_classify.py >> logs') print(' 004_predict') os.system('python 004_predict.py >> logs') print(' 005_near') os.system('python 005_near.py >> logs') shutil.copy('test_mapped.csv', 'test_mapped_' + str(i) + '.csv')
import time, pytest import sys,os sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib'))) from clsCommon import Common import clsTestService from localSettings import * import localSettings from utilityTestFunc import * import enums class Test: #================================================================================================================================ # @Author: Michal Zomper # Test Name : My Media - Sort media # Test description: # upload sevaral entries and add them likes / comments. # Sort My Media by: # 1. Most Recent - The entries' order should be from the last uploaded video to the first one. # 2. Alphabetical - The entries' order should be alphabetical # 3. Likes (enable via the Admin page) - The entries' order should be descending by likes' number # 4. Comments - The entries' order should be descending by comments' number #================================================================================================================================ testNum = "668" supported_platforms = clsTestService.updatePlatforms(testNum) status = "Pass" timeout_accured = "False" driver = None common = None # Test variables entryName1 = None entryName2 = None entryName3 = None entryName4 = None description = "Description" tags = "Tags," filePath = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\images\qrcode_middle_4.png' userName1 = "Automation_User_1" userPass1 = "Kaltura1!" userName2 = "Automation_User_2" userPass2 = "Kaltura1!" comments = ["Comment 1", "Comment 2", "Comment 3"] categoryName = [("Apps Automation Category")] #run test as different instances on all the supported platforms @pytest.fixture(scope='module',params=supported_platforms) def driverFix(self,request): return request.param def test_01(self,driverFix,env): #write to log we started the test logStartTest(self,driverFix) try: ########################### TEST SETUP ########################### #capture test start time self.startTime = time.time() #initialize all the basic vars and start playing self,self.driver = clsTestService.initializeAndLoginAsUser(self, driverFix) self.common = Common(self.driver) self.entryName1 = clsTestService.addGuidToString("My Media - Sort A", self.testNum) self.entryName2 = clsTestService.addGuidToString("My Media - Sort B", self.testNum) self.entryName3 = clsTestService.addGuidToString("My Media - Sort C", self.testNum) self.entryName4 = clsTestService.addGuidToString("My Media - Sort D", self.testNum) ##################### TEST STEPS - MAIN FLOW ##################### writeToLog("INFO","Step 1: Going to enable like module") if self.common.admin.enablelike(True) == False: self.status = "Fail" writeToLog("INFO","Step 1: FAILED to enable like module") return writeToLog("INFO","Step 2: Going navigate to home page") if self.common.home.navigateToHomePage(forceNavigate=True) == False: self.status = "Fail" writeToLog("INFO","Step 2: FAILED navigate to home page") return for i in range(3,7): writeToLog("INFO","Step " + str(i) + ": Going to upload new entry '" + eval('self.entryName'+str(i-2))) if self.common.upload.uploadEntry(self.filePath, eval('self.entryName'+str(i-2)), self.description, self.tags) == False: self.status = "Fail" writeToLog("INFO","Step " + str(i) + ": FAILED to upload new entry " + eval('self.entryName'+str(i-2))) return writeToLog("INFO","Step 7: Going to publish entries") if self.common.myMedia.publishEntriesFromMyMedia([self.entryName2, self.entryName4], self.categoryName, "", disclaimer=False) == False: self.status = "Fail" writeToLog("INFO","Step 7: FAILED to publish entries to category: " + self.categoryName[0]) return writeToLog("INFO","Step 8: Going navigate to entry '" + self.entryName1 + "'") if self.common.entryPage.navigateToEntry(self.entryName1, enums.Location.MY_MEDIA) == False: self.status = "Fail" writeToLog("INFO","Step 8: FAILED navigate to entry: " + self.entryName1) return writeToLog("INFO","Step 9: Going to like entry: " + self.entryName1) if self.common.entryPage.LikeUnlikeEntry(True) == False: self.status = "Fail" writeToLog("INFO","Step 9: FAILED to like entry: " + self.entryName1) return sleep(2) writeToLog("INFO","Step 10: Going to add comments to entry: " + self.entryName1) if self.common.entryPage.addComments(["Comment 1", "Comment 2"]) == False: self.status = "Fail" writeToLog("INFO","Step 10: FAILED to add comments to entry: " + self.entryName1) return writeToLog("INFO","Step 11: Going navigate to entry: "+ self.entryName2) if self.common.entryPage.navigateToEntry(self.entryName2, enums.Location.MY_MEDIA) == False: self.status = "Fail" writeToLog("INFO","Step 11: FAILED navigate to entry: " + self.entryName2) return writeToLog("INFO","Step 12: Going to like entry: " + self.entryName2) if self.common.entryPage.LikeUnlikeEntry(True) == False: self.status = "Fail" writeToLog("INFO","Step 12: FAILED to like entry: " + self.entryName2) return writeToLog("INFO","Step 13: Going navigate to entry: "+ self.entryName3) if self.common.entryPage.navigateToEntry(self.entryName3, enums.Location.MY_MEDIA) == False: self.status = "Fail" writeToLog("INFO","Step 13: FAILED navigate to entry: " + self.entryName3) return sleep(2) writeToLog("INFO","Step 14: Going to add comments to entry: " + self.entryName3) if self.common.entryPage.addComments(["Comment 1", "Comment 2", "Comment 3"]) == False: self.status = "Fail" writeToLog("INFO","Step 14: FAILED to add comments to entry: " + self.entryName3) return writeToLog("INFO","Step 15: Going navigate to entry: "+ self.entryName4) if self.common.entryPage.navigateToEntry(self.entryName4, enums.Location.MY_MEDIA) == False: self.status = "Fail" writeToLog("INFO","Step 15: FAILED navigate to entry: " + self.entryName4) return writeToLog("INFO","Step 16: Going to like entry: " + self.entryName4) if self.common.entryPage.LikeUnlikeEntry(True) == False: self.status = "Fail" writeToLog("INFO","Step 16: FAILED to like entry: " + self.entryName4) return sleep(2) writeToLog("INFO","Step 17: Going to add comment to entry: " + self.entryName4) if self.common.entryPage.addComment("Comment 1") == False: self.status = "Fail" writeToLog("INFO","Step 17: FAILED to add comment to entry: " + self.entryName4) return sleep(3) writeToLog("INFO","Step 18: Going to logout from main user") if self.common.login.logOutOfKMS() == False: self.status = "Fail" writeToLog("INFO","Step 18: FAILED to logout from main user") return writeToLog("INFO","Step 19: Going to login with user " + self.userName1) if self.common.login.loginToKMS(self.userName1, self.userPass1) == False: self.status = "Fail" writeToLog("INFO","Step 19: FAILED to login with " + self.userName1) return writeToLog("INFO","Step 20: Going navigate to entry: "+ self.entryName2) if self.common.entryPage.navigateToEntry(self.entryName2, enums.Location.CATEGORY_PAGE, self.categoryName[0]) == False: self.status = "Fail" writeToLog("INFO","Step 20: FAILED navigate to entry: " + self.entryName2) return writeToLog("INFO","Step 21: Going to like entry: " + self.entryName2) if self.common.entryPage.LikeUnlikeEntry(True) == False: self.status = "Fail" writeToLog("INFO","Step 21: FAILED to like entry: " + self.entryName2) return writeToLog("INFO","Step 22: Going navigate to entry: "+ self.entryName4) if self.common.entryPage.navigateToEntry(self.entryName4, enums.Location.CATEGORY_PAGE, self.categoryName[0]) == False: self.status = "Fail" writeToLog("INFO","Step 22: FAILED navigate to entry: " + self.entryName4) return writeToLog("INFO","Step 23: Going to like entry: " + self.entryName4) if self.common.entryPage.LikeUnlikeEntry(True) == False: self.status = "Fail" writeToLog("INFO","Step 23: FAILED to like entry: " + self.entryName4) return sleep(3) writeToLog("INFO","Step 24: Going to logout from : " + self.userName1) if self.common.login.logOutOfKMS() == False: self.status = "Fail" writeToLog("INFO","Step 24: FAILED to logout from: " + self.userName1) return writeToLog("INFO","Step 25: Going to login with user " + self.userName2) if self.common.login.loginToKMS(self.userName2, self.userPass2) == False: self.status = "Fail" writeToLog("INFO","Step 25: FAILED to login with " + self.userName2) return writeToLog("INFO","Step 26: Going navigate to entry: "+ self.entryName2) if self.common.entryPage.navigateToEntry(self.entryName2, enums.Location.CATEGORY_PAGE, self.categoryName[0]) == False: self.status = "Fail" writeToLog("INFO","Step 26: FAILED navigate to entry: " + self.entryName2) return writeToLog("INFO","Step 27: Going to like entry: " + self.entryName2) if self.common.entryPage.LikeUnlikeEntry(True) == False: self.status = "Fail" writeToLog("INFO","Step 27: FAILED to like entry: " + self.entryName2) return sleep(3) writeToLog("INFO","Step 28: Going to logout from : " + self.userName2) if self.common.login.logOutOfKMS() == False: self.status = "Fail" writeToLog("INFO","Step 28: FAILED to logout from: " + self.userName2) return writeToLog("INFO","Step 29: Going to login with main user") if self.common.loginAsUser() == False: self.status = "Fail" writeToLog("INFO","Step 29: FAILED to login with main user") return writeToLog("INFO","Step 30: Going navigate to my media") if self.common.myMedia.navigateToMyMedia() == False: self.status = "Fail" writeToLog("INFO","Step 30: FAILED navigate to my media") return writeToLog("INFO","Step 31: Going to sort my media by: Alphabetical") if self.common.myMedia.verifySortInMyMedia(enums.SortBy.ALPHABETICAL, (self.entryName1, self.entryName2, self.entryName3, self.entryName4)) == False: self.status = "Fail" writeToLog("INFO","Step 31: FAILED to sort my media by Alphabetical") return sleep(1) if self.common.isElasticSearchOnPage() == True: writeToLog("INFO","Step 32: Going to sort my media by: Most recent") if self.common.myMedia.verifySortInMyMedia(enums.SortBy.CREATION_DATE_DESC, (self.entryName4, self.entryName3, self.entryName2, self.entryName1)) == False: self.status = "Fail" writeToLog("INFO","Step 32: FAILED to sort my media by Most recent") return else: writeToLog("INFO","Step 32: Going to sort my media by: Most recent") if self.common.myMedia.verifySortInMyMedia(enums.SortBy.MOST_RECENT , (self.entryName4, self.entryName3, self.entryName2, self.entryName1)) == False: self.status = "Fail" writeToLog("INFO","Step 32: FAILED to sort my media by Most recent") return sleep(1) writeToLog("INFO","Step 33: Going to sort my media by: comments") if self.common.myMedia.verifySortInMyMedia(enums.SortBy.COMMENTS, (self.entryName3, self.entryName1, self.entryName4, self.entryName2)) == False: self.status = "Fail" writeToLog("INFO","Step 33: FAILED to sort my media by comments") return sleep(1) writeToLog("INFO","Step 34: Going to sort my media by: Likes") if self.common.myMedia.verifySortInMyMedia(enums.SortBy.LIKES, (self.entryName2, self.entryName4, self.entryName1, self.entryName3)) == False: self.status = "Fail" writeToLog("INFO","Step 34: FAILED to sort my media by Likes") return ################################################################## writeToLog("INFO","TEST PASSED: 'My Media - Sort' was done successfully") # if an exception happened we need to handle it and fail the test except Exception as inst: self.status = clsTestService.handleException(self,inst,self.startTime) ########################### TEST TEARDOWN ########################### def teardown_method(self,method): try: self.common.handleTestFail(self.status) writeToLog("INFO","**************** Starting: teardown_method ****************") if localSettings.LOCAL_SETTINGS_USERNAME_AFTER_LOGIN != "QA APPLICATION": self.common.login.logOutOfKMS() self.common.loginAsUser() self.common.myMedia.deleteEntriesFromMyMedia([self.entryName1, self.entryName2, self.entryName3, self.entryName4]) writeToLog("INFO","**************** Ended: teardown_method *******************") except: pass clsTestService.basicTearDown(self) #write to log we finished the test logFinishedTest(self,self.startTime) assert (self.status == "Pass") pytest.main('test_' + testNum + '.py --tb=line')
import dash_bootstrap_components as dbc from dash import html buttons = html.Div( [ dbc.Button("Large button", size="lg", className="me-1"), dbc.Button("Regular button", className="me-1"), dbc.Button("Small button", size="sm"), ] )
# Crie um programa para criptografar uma mensagem # Desta forma, você pode escrever uma mensagem e passar para seu colega # E mesmo que alguém pegue a mensagem no caminho não vai entender o conteúdo # Para criptografar, substitua as letras por números, seguindo a tabela abaixo: # A a -> 01 # B b -> 02 # C c -> 03 # D d -> 04 # ... # Z z -> 26 # " " -> 27 # 0 -> 28 # ... # 9 -> 37 # forma de chamar no console: # python ex_09_criptografar.py 'cifrar' 'teste' # python ex_09_criptografar.py 'decifrar' '010203040506070809' import sys alfabeto = { 'A': '01', 'B': '02', 'C': '03', 'D': '04', 'E': '05', 'F': '06', 'G': '07', 'H': '08', 'I': '09', 'J': '10', 'K': '11', 'L': '12', 'M': '13', 'N': '14', 'O': '15', 'P': '16', 'Q': '17', 'R': '18', 'S': '19', 'T': '20', 'U': '21', 'V': '22', 'W': '23', 'X': '24', 'Y': '25', 'Z': '26', ' ': '27', '0': '28', '1': '29', '2': '30', '3': '31', '4': '32', '5': '33', '6': '34', '7': '35', '8': '36', '9': '37', '!': '38', '?': '39', ',': '40', '.': '41', } def busca_chave_por_valor(dicionario, valor_procurado): for chave, valor in dicionario.items(): if valor == valor_procurado: return chave return '' def cifrar(texto): retorno = '' texto = texto.upper() for letra in texto: retorno += alfabeto.get(letra, '**') return retorno def decifrar(texto_cifrado): retorno = '' lista_texto = [] for (char1, char2) in zip(texto_cifrado[0::2], texto_cifrado[1::2]): lista_texto.append('{}{}'.format(char1, char2)) for numero in lista_texto: retorno += busca_chave_por_valor(alfabeto, numero) return retorno if __name__ == '__main__': tipo = sys.argv[1] texto = sys.argv[2] if tipo == 'cifrar': print(cifrar(texto)) elif tipo == 'decifrar': print(decifrar(texto))
# -*- coding: utf-8 -*- from odoo import models, fields class adquirentes(models.Model): _name = 'gestion_pic.adquirentes' idAdquirente = fields.Char('Id Adquirente') adquirente = fields.Char('Adquirente')
import getpass import os from decouple import AutoConfig config = AutoConfig(os.curdir) def _current_user(): return getpass.getuser() GITHUB_ACCESS_TOKEN = config("GITHUB_ACCESS_TOKEN") KUMA_REPO_NAME = config("DEPLOYER_KUMA_REPO_NAME", "mozilla/kuma") # about to change! DEFAULT_MASTER_BRANCH = config("DEPLOYER_DEFAULT_MASTER_BRANCH", "master") DEFAULT_UPSTREAM_NAME = config("DEPLOYER_DEFAULT_UPSTREAM_NAME", "origin") DEFAULT_SUBMODULES_UPSTREAM_NAME = config( "DEPLOYER_DEFAULT_SUBMODULES_UPSTREAM_NAME", "origin" ) DEFAULT_YOUR_REMOTE_NAME = config("DEPLOYER_DEFAULT_YOUR_REMOTE_NAME", _current_user()) WHATSDEPLOYED_URL = config( "DEPLOYER_WHATSDEPLOYED_URL", "https://whatsdeployed.io/s/HC0/mozilla/kuma" ) STAGE_PUSH_BRANCH = config("DEPLOYER_STAGE_PUSH_BRANCH", "stage-push") STAGE_INTEGRATIONTEST_BRANCH = config( "DEPLOYER_STAGE_INTEGRATIONTEST_BRANCH", "stage-integration-tests" ) PROD_PUSH_BRANCH = config("DEPLOYER_PROD_PUSH_BRANCH", "prod-push") STANDBY_PUSH_BRANCH = config("DEPLOYER_STANDBY_PUSH_BRANCH", "standby-push")
import unittest from katas.kyu_5.ookkk_ok_o_ook_ok_ooo import okkOokOo class OKTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(okkOokOo('Ok, Ook, Ooo!'), 'H') def test_equals_2(self): self.assertEqual(okkOokOo('Ok, Ook, Ooo? Okk, Ook, Ok? Okk, Okk, Oo? Okk, Okk, Oo? Okk, Okkkk!'), 'Hello') def test_equals_3(self): self.assertEqual(okkOokOo('Ok, Ok, Okkk? Okk, Okkkk? Okkk, Ook, O? Okk, Okk, Oo? Okk, Ook, Oo? Ook, Ooook!'), 'World!')
import numpy as np import math as m import matplotlib.pyplot as plt from scipy.interpolate import interp1d def f(step, step_range): '''the function f(x) = sqrt(x) + cos(x) returns f(x)''' data = [] for step in step_range: y = m.sqrt(step) + m.cos(step) data.append(y) return data def analytic_derivative_f(step, step_range): '''the analytic derivative of f(x) = f'(x) = 1/2*x^(-1/2) - sin(x) returns f'(x)''' data = [] for step in step_range: if step==0: y = None else: first_term = 1/2 * step**(-1/2) second_term = -1 * m.sin(step) y = first_term + second_term data.append(y) return data if __name__ == "__main__": coarse_step = 0.8 coarse_step_range = np.arange(0, 4*m.pi, coarse_step) fine_step = 0.01 fine_step_range = np.arange(0, 4*m.pi, fine_step) #calculate f(x) coarsely true_function_coarse = f(coarse_step, coarse_step_range) #calculate f(x) finely true_function_fine = f(fine_step, fine_step_range) #calculate analytic derivative coarsely true_analytic_derivative_coarse = analytic_derivative_f(coarse_step, coarse_step_range) #calculate analytic derivative finely true_analytic_derivative_fine = analytic_derivative_f(fine_step, fine_step_range) #calculate coarse interpolation sparse_function_linear_interpolation = interp1d(coarse_step_range, true_function_coarse) sparse_function_cubic_interpolation = interp1d(coarse_step_range, true_function_coarse, kind='cubic') #plot true function plt.plot(fine_step_range, true_function_fine, color='black') #plot analytic derivative plt.plot(fine_step_range, true_analytic_derivative_fine, color='black', linestyle='dashed') #plot interpolation #plt.plot(fine_step_range, sparse_function_cubic_interpolation(fine_step_range), color='magenta') plt.legend(['f(x)', "f'(x)"], loc='best') plt.show()
import time import RPi.GPIO as GPIO from utilities import * from DiskControl import * old_distance = 0 if __name__ == '__main__': try: while True: # Get distance value from Ultra Sonic Sensor new_distance = get_distance() if new_distance > 400: new_distance = old_distance new_distance = (new_distance + old_distance)/2 weighted_distance = round((new_distance*0.1 + old_distance*0.9), 2) old_distance = weighted_distance time.sleep(0.5) # Normalize it to feed into Fuzzy Logic System norm_dist = custom_normalize(weighted_distance, [0, 400], [0, 1]) # Feed into FLS fuzz_val_mot = get_fuzzy_value(norm_dist) # Normalize output values to match up with PWM range norm_speed_mot = custom_normalize(fuzz_val_mot, [0, 1], [0, 100]) # Move motors according to Fuzzy Logic move_motor(1, norm_speed_mot) time.sleep(0.01) # Reset by pressing CTRL + C except KeyboardInterrupt: print("Process has been stopped by User") GPIO.cleanup()
from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgets import QDialog from PyQt5.QtWidgets import QFormLayout from PyQt5.QtWidgets import QDialogButtonBox from PyQt5.QtWidgets import QLabel from PyQt5.QtWidgets import QLineEdit from Ui_WorldGenDialog import Ui_WorldGenDialog import anthill class WorldGeneratorParamsDialog(QDialog): def __init__(self, parent = None): super(WorldGeneratorParamsDialog, self).__init__(parent) self.ui = Ui_WorldGenDialog() self.ui.setupUi(self) self.prepareDefaultParams() def prepareDefaultParams(self): self.obstaclesParams = anthill.ObstaclesParams() self.foodsParams = anthill.FoodsParams() self.anthillParams = anthill.AnthillParams() self.antsParams = anthill.AntsParams() self.pheromoneParams = anthill.PheromoneParams() self.worldWidth = 200 self.worldHeight = 200 def processResults(self): self.pheromoneParams.applyCoefficients(self.ui.toFoodCoef.value(), self.ui.fromFoodCoef.value(), self.ui.anthillCoef.value()) self.obstaclesParams.applyObstacleFrequency(self.ui.obstacleFreq.value()) self.foodsParams.applyFoodGenerosity(self.ui.foodGenerosity.value()) self.antsParams.applyNumAnts(self.ui.numAnts.value()) self.antsParams.applyRatioScouts(self.ui.percentScouts.value()) self.worldWidth = self.ui.worldWidth.value() self.worldHeight = self.ui.worldHeight.value()
""" Use a stack data structure to reverse a string Example: "Hello" -> "olleH" """ from stack import Stack def reverse_string(input_str): # Loop through the string and push character by character onto stack stack = Stack() for i in range(len(input_str)): stack.push(input_str[i]) rev_str = "" while not stack.is_empty(): rev_str += stack.pop() return rev_str input_str = "Hello" print(reverse_string(input_str))
from django.db.models import Q from django.shortcuts import render from User.models import UserExtended from django.contrib.auth import (authenticate, login) from rest_framework.response import Response from rest_framework.filters import (SearchFilter, OrderingFilter) from rest_framework.status import (HTTP_200_OK, HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND, HTTP_204_NO_CONTENT) from rest_framework.views import APIView from rest_framework.generics import (CreateAPIView , ListAPIView, RetrieveAPIView, RetrieveUpdateAPIView, DestroyAPIView) from User.serializers import (UserExtendedListSerializer, UserExtendedDetailSerializer, UserExtendedCreateUpdateSerializer, UserCreateSerializer, UserLoginSerializer) from rest_framework.permissions import (AllowAny, IsAuthenticated, IsAuthenticatedOrReadOnly, IsAdminUser) from User.permissions import IsOwner from django.contrib.auth import get_user_model User = get_user_model() # Create your views here. class UserExtendedListAPIView(ListAPIView): #queryset = UserExtended.objects.filter(is_active=True) serializer_class = UserExtendedListSerializer filter_backends = [SearchFilter, OrderingFilter] search_fields = [ 'UserExtended__user'] def get_queryset(self, *args, **kwargs): queryset_list = UserExtended.objects.filter(is_active=True) query = self.request.GET.get('q') print(query) if query: queryset_list = queryset_list.filter( Q(first_name__icontains = query)| Q(mobile__icontains = query)| Q(email__icontains = query)| Q(user__icontains = query) ).distinct() return queryset_list class UserExtendedRetrieveAPIView(RetrieveAPIView): queryset = UserExtended.objects.all() serializer_class = UserExtendedDetailSerializer # permission_classes = [IsAuthenticated] class UserExtendedUpdateAPIView(RetrieveUpdateAPIView): queryset = UserExtended.objects.all() serializer_class = UserExtendedCreateUpdateSerializer # permission_classes = [IsOwner] class UserExtendedCreateAPIView(CreateAPIView): queryset = UserExtended.objects.all() serializer_class = UserExtendedCreateUpdateSerializer # permission_classes = [IsAuthenticated] def perform_create(self, serializer): serializer.save(user = self.request.user) class UserExtendedDeleteAPIView(DestroyAPIView): # permission_classes = [IsAuthenticated] queryset = UserExtended.objects.all() serializer_class = UserExtendedDetailSerializer def perform_destroy(self, instance): instance.user.delete() instance.delete() class UserCreateAPIView(CreateAPIView): permission_classes = [AllowAny] queryset = User.objects.all() # serializer_class = UserCreateSerializer def post(self, request, format=None): serializer = UserCreateSerializer (data=request.data) print(serializer) if serializer.is_valid(): serializer.save() print(serializer.data) return Response({'headers':{'Access-Control-Allow-Headers':'http://localhost:8080', }, 'data':serializer.data,} ,status=HTTP_201_CREATED) return Response(serializer.errors, status=HTTP_400_BAD_REQUEST) class UserLoginAPIView(APIView): permission_classes = [AllowAny] queryset = User.objects.all() serializer_class = UserLoginSerializer def post(self, request, format=None): print(request.data) print(request.data['username']) username = request.data['username'] password = request.data['password'] user = authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user) return Response({'username':user.username, 'pk':user.id, 'user': user.user.id}) return Response(status=HTTP_404_NOT_FOUND) return Response(status=HTTP_400_BAD_REQUEST)
#Node of a Singly Linked List: class Node: #constructor def __init__(self,initdata): self.data=initdata self.next=None def getData(self): return self.data def getNext(self): return self.next def setData(self,newdata): self.data=newdata def setNext(self,newnext): self.next=newnext #Linked List Class class UnorderedList: def __init__(self): self.head=None def isEmpty(self): return self.head==None def add(self,item): temp=Node(item) temp.setNext(self.head) self.head=temp def size(self): current=self.head count=0 while(current!=None): print (current.getData()) count=count+1 current=current.getNext() return count def search(self,item): current=self.head found=False while(current!=None and not found): if current.getData()==item: found=True else: current=current.getNext() return found def remove(self,item): current=self.head previous=None found=False while not found: if current.getData()==item: found=True else: previous=current current=current.getNext() if previous==None: self.head=current.getNext() else: previous.setNext(current.getNext()) def insertBegin(self,item): temp=Node(item) temp.setNext(self.head) self.head=temp def deletepos(self,n): count=0 current=self.head previous=None found=False while(count<n): previous=current current=current.getNext() count=count+1 previous.setNext(current.getNext()) #adding a node is analogous to the def add() method #a method to add at the middle mylist=UnorderedList() mylist.add(31) mylist.add(77) mylist.add(17) mylist.add(93) mylist.add(26) mylist.add(54) mylist.insertBegin(30) mylist.deletepos(2) a=mylist.size() print ("The size of the LinkedList = ",a)
''' Script written by Audrey McNay Contact me at amcnay@utexas.edu Outputs ANOVA and post-hoc results for survey data. Requires numpy and scipi libraries. ##### Information ##### Question: "How efficient/time-consuming was it to find information about...?" Numbers in tuple represent results from a seven-point likert scale. # “1” represents the best possible experience and “7” represents the worst possible experience. apply: while Applying to UT after: after applying to UT enroll: enrolling at UT finaid: financial aid housing: on-campus housing registr: registration ''' import numpy as np from scipy import stats from statsmodels.stats.multicomp import pairwise_tukeyhsd from statsmodels.stats.multicomp import MultiComparison data = np.rec.array([ ('apply', 5), ('apply', 6), ('apply', 5), ('apply', 2), ('apply', 5), ('apply', 2), ('apply', 3), ('apply', 5), ('apply', 2), ('apply', 1), ('apply', 4), ('apply', 3), ('apply', 2), ('apply', 2), ('apply', 3), ('apply', 3), ('apply', 1), ('apply', 3), ('apply', 5), ('apply', 2), ('apply', 2), ('apply', 6), ('apply', 4), ('apply', 5), ('apply', 1), ('apply', 3), ('apply', 5), ('apply', 1), ('apply', 3), ('apply', 4), ('apply', 2), ('apply', 4), ('apply', 5), ('apply', 1), ('apply', 7), ('apply', 4), ('apply', 6), ('apply', 6), ('apply', 5), ('apply', 1), ('apply', 4), ('apply', 5), ('apply', 2), ('apply', 1), ('apply', 1), ('apply', 2), ('apply', 2), ('apply', 2), ('apply', 4), ('apply', 1), ('apply', 2), ('apply', 5), ('apply', 2), ('apply', 4), ('apply', 2), ('apply', 2), ('apply', 5), ('apply', 3), ('apply', 4), ('apply', 4), ('apply', 7), ('apply', 2), ('apply', 6), ('apply', 1), ('apply', 1), ('apply', 4), ('apply', 2), ('apply', 4), ('apply', 2), ('apply', 2), ('apply', 7), ('apply', 3), ('apply', 3), ('apply', 5), ('apply', 2), ('apply', 2), ('apply', 2), ('apply', 5), ('after', 5), ('after', 6), ('after', 5), ('after', 3), ('after', 4), ('after', 2), ('after', 2), ('after', 5), ('after', 4), ('after', 1), ('after', 4), ('after', 3), ('after', 2), ('after', 3), ('after', 5), ('after', 4), ('after', 1), ('after', 5), ('after', 4), ('after', 3), ('after', 2), ('after', 6), ('after', 4), ('after', 3), ('after', 2), ('after', 1), ('after', 6), ('after', 2), ('after', 4), ('after', 4), ('after', 2), ('after', 4), ('after', 4), ('after', 1), ('after', 5), ('after', 4), ('after', 7), ('after', 7), ('after', 6), ('after', 1), ('after', 5), ('after', 7), ('after', 2), ('after', 1), ('after', 1), ('after', 2), ('after', 4), ('after', 2), ('after', 4), ('after', 1), ('after', 3), ('after', 4), ('after', 2), ('after', 4), ('after', 3), ('after', 2), ('after', 4), ('after', 5), ('after', 4), ('after', 4), ('after', 7), ('after', 1), ('after', 3), ('after', 1), ('after', 1), ('after', 3), ('after', 6), ('after', 4), ('after', 2), ('after', 2), ('after', 4), ('after', 4), ('after', 5), ('after', 5), ('after', 3), ('after', 5), ('after', 3), ('after', 4), ('enroll', 5), ('enroll', 6), ('enroll', 5), ('enroll', 3), ('enroll', 4), ('enroll', 2), ('enroll', 2), ('enroll', 5), ('enroll', 4), ('enroll', 1), ('enroll', 4), ('enroll', 3), ('enroll', 2), ('enroll', 3), ('enroll', 5), ('enroll', 4), ('enroll', 1), ('enroll', 5), ('enroll', 4), ('enroll', 3), ('enroll', 2), ('enroll', 6), ('enroll', 4), ('enroll', 3), ('enroll', 2), ('enroll', 1), ('enroll', 6), ('enroll', 2), ('enroll', 4), ('enroll', 4), ('enroll', 2), ('enroll', 4), ('enroll', 4), ('enroll', 1), ('enroll', 5), ('enroll', 4), ('enroll', 7), ('enroll', 7), ('enroll', 6), ('enroll', 1), ('enroll', 5), ('enroll', 7), ('enroll', 2), ('enroll', 1), ('enroll', 1), ('enroll', 2), ('enroll', 4), ('enroll', 2), ('enroll', 4), ('enroll', 1), ('enroll', 3), ('enroll', 4), ('enroll', 2), ('enroll', 4), ('enroll', 3), ('enroll', 2), ('enroll', 4), ('enroll', 5), ('enroll', 4), ('enroll', 4), ('enroll', 7), ('enroll', 1), ('enroll', 3), ('enroll', 1), ('enroll', 1), ('enroll', 3), ('enroll', 6), ('enroll', 4), ('enroll', 2), ('enroll', 2), ('enroll', 4), ('enroll', 4), ('enroll', 5), ('enroll', 5), ('enroll', 3), ('enroll', 5), ('enroll', 3), ('enroll', 4), ('finaid', 7), ('finaid', 6), ('finaid', 4), ('finaid', 4), ('finaid', 5), ('finaid', 4), ('finaid', 6), ('finaid', 7), ('finaid', 3), ('finaid', 1), ('finaid', 4), ('finaid', 4), ('finaid', 3), ('finaid', 5), ('finaid', 7), ('finaid', 6), ('finaid', 3), ('finaid', 6), ('finaid', 5), ('finaid', 5), ('finaid', 2), ('finaid', 7), ('finaid', 5), ('finaid', 4), ('finaid', 7), ('finaid', 6), ('finaid', 7), ('finaid', 4), ('finaid', 5), ('finaid', 4), ('finaid', 2), ('finaid', 3), ('finaid', 6), ('finaid', 1), ('finaid', 2), ('finaid', 6), ('finaid', 5), ('finaid', 7), ('finaid', 6), ('finaid', 1), ('finaid', 6), ('finaid', 7), ('finaid', 7), ('finaid', 1), ('finaid', 1), ('finaid', 7), ('finaid', 5), ('finaid', 2), ('finaid', 4), ('finaid', 2), ('finaid', 5), ('finaid', 6), ('finaid', 4), ('finaid', 4), ('finaid', 5), ('finaid', 2), ('finaid', 6), ('finaid', 6), ('finaid', 4), ('finaid', 5), ('finaid', 7), ('finaid', 6), ('finaid', 7), ('finaid', 7), ('finaid', 3), ('finaid', 7), ('finaid', 7), ('finaid', 4), ('finaid', 3), ('finaid', 4), ('finaid', 7), ('finaid', 6), ('finaid', 6), ('finaid', 6), ('finaid', 5), ('finaid', 4), ('finaid', 3), ('finaid', 4), ('housing', 6), ('housing', 3), ('housing', 5), ('housing', 6), ('housing', 3), ('housing', 3), ('housing', 5), ('housing', 5), ('housing', 5), ('housing', 1), ('housing', 4), ('housing', 4), ('housing', 1), ('housing', 5), ('housing', 5), ('housing', 4), ('housing', 1), ('housing', 5), ('housing', 4), ('housing', 5), ('housing', 5), ('housing', 5), ('housing', 5), ('housing', 4), ('housing', 7), ('housing', 6), ('housing', 5), ('housing', 2), ('housing', 6), ('housing', 4), ('housing', 4), ('housing', 2), ('housing', 6), ('housing', 1), ('housing', 1), ('housing', 4), ('housing', 3), ('housing', 4), ('housing', 5), ('housing', 4), ('housing', 4), ('housing', 7), ('housing', 3), ('housing', 1), ('housing', 4), ('housing', 2), ('housing', 4), ('housing', 2), ('housing', 4), ('housing', 2), ('housing', 4), ('housing', 3), ('housing', 3), ('housing', 4), ('housing', 6), ('housing', 5), ('housing', 6), ('housing', 6), ('housing', 4), ('housing', 4), ('housing', 7), ('housing', 3), ('housing', 3), ('housing', 7), ('housing', 1), ('housing', 2), ('housing', 6), ('housing', 4), ('housing', 4), ('housing', 4), ('housing', 7), ('housing', 7), ('housing', 5), ('housing', 5), ('housing', 4), ('housing', 4), ('housing', 4), ('housing', 4), ('registr', 6), ('registr', 6), ('registr', 3), ('registr', 3), ('registr', 6), ('registr', 1), ('registr', 5), ('registr', 7), ('registr', 3), ('registr', 1), ('registr', 4), ('registr', 5), ('registr', 6), ('registr', 4), ('registr', 7), ('registr', 5), ('registr', 1), ('registr', 4), ('registr', 5), ('registr', 4), ('registr', 5), ('registr', 5), ('registr', 6), ('registr', 4), ('registr', 7), ('registr', 5), ('registr', 5), ('registr', 2), ('registr', 3), ('registr', 4), ('registr', 2), ('registr', 3), ('registr', 3), ('registr', 1), ('registr', 3), ('registr', 5), ('registr', 2), ('registr', 3), ('registr', 7), ('registr', 5), ('registr', 5), ('registr', 2), ('registr', 6), ('registr', 1), ('registr', 1), ('registr', 3), ('registr', 2), ('registr', 2), ('registr', 3), ('registr', 1), ('registr', 3), ('registr', 6), ('registr', 3), ('registr', 4), ('registr', 3), ('registr', 6), ('registr', 6), ('registr', 3), ('registr', 4), ('registr', 5), ('registr', 7), ('registr', 4), ('registr', 7), ('registr', 7), ('registr', 1), ('registr', 2), ('registr', 6), ('registr', 4), ('registr', 4), ('registr', 2), ('registr', 7), ('registr', 4), ('registr', 7), ('registr', 4), ('registr', 2), ('registr', 4), ('registr', 1), ('registr', 3)], dtype = [('Area', '|U10'), ('Rating', 'f8')]) #calculate ANOVA print('One-way ANOVA: Q6') print('=============') f, p = stats.f_oneway(data[data['Area'] == 'apply'].Rating, data[data['Area'] == 'after'].Rating, data[data['Area'] == 'enroll'].Rating, data[data['Area'] == 'finaid'].Rating, data[data['Area'] == 'housing'].Rating, data[data['Area'] == 'registr'].Rating) print ('F value:', f) print ('P value:', p, '\n') #calculate post-hoc mc = MultiComparison(data['Rating'], data['Area']) result = mc.tukeyhsd() print(result) print(mc.groupsunique)
# -*- coding: utf-8 -*- # @Time : 2020-05-03 10:21 # @Author : speeding_motor import numpy as np import tensorflow as tf class IOU(object): def __init__(self): super(IOU, self).__init__() @staticmethod def iou_with_anchor(boxs_wh, anchor_boxs): """ calculate the iou between the true box and anchor """ boxs_wh = np.expand_dims(boxs_wh, axis=1) anchor_boxs = np.expand_dims(anchor_boxs, axis=0) w_min = np.minimum(boxs_wh[..., 0], anchor_boxs[..., 0]) h_min = np.minimum(boxs_wh[..., 1], anchor_boxs[..., 1]) intersection_area = w_min * h_min boxs_area = boxs_wh[..., 0] * boxs_wh[..., 1] anchor_area = anchor_boxs[..., 0] * anchor_boxs[..., 1] iou = intersection_area / (boxs_area + anchor_area - intersection_area) return iou @staticmethod def best_iou(true_box, pred_box): """ return the iou between pred_box and true_box :param true_box: shape=(true_box_num, 4) :param pred_box: shape=(13, 13, anchor_num, 1, 4) :return: (13, 13 , anchor_num, true_box_num) """ true_box = tf.cast(true_box, dtype=tf.float32) pred_box = tf.cast(pred_box, dtype=tf.float32) pred_box = tf.expand_dims(pred_box, axis=-2) true_xy = true_box[..., 0:2] true_wh = true_box[..., 2:4] pred_box_xy = pred_box[..., 0:2] pred_box_wh = pred_box[..., 2:4] true_xy_min = true_xy - true_wh / 2 true_xy_max = true_xy + true_wh / 2 pred_box_xy_min = pred_box_xy - pred_box_wh / 2 pred_box_xy_max = pred_box_xy + pred_box_wh / 2 intersection_xy_min = tf.maximum(true_xy_min, pred_box_xy_min) intersection_xy_max = tf.minimum(true_xy_max, pred_box_xy_max) intersection_wh = tf.maximum(intersection_xy_max - intersection_xy_min, 0) pred_area = pred_box[..., 2] * pred_box[..., 3] true_area = true_box[..., 2] * true_box[..., 3] union_area = intersection_wh[..., 0] * intersection_wh[..., 1] return union_area / (pred_area + true_area - union_area) @staticmethod def iou(true_box, pred_box): """ :param true_box: shape=[batch_size, grid_h, grid_w, anchor_id ,box], box=[x, y, w, h] :param pred_box: shape=[batch_size, grid_h, grid_w, anchor_id ,box], box=[x, y, w, h] :return the iou between true_box and pred_box, return shape=[batch_size, grid_h, grid_w, anchor_id, 1] """ # true box radius true_box_xy = true_box[..., 0:2] true_box_half_wh = true_box[..., 2:4] / 2 true_box_xy_min = true_box_xy - true_box_half_wh true_box_xy_max = true_box_xy + true_box_half_wh # pred box radius pred_xy = pred_box[..., 0:2] pred_box_half_wh = pred_box[..., 2:4] / 2 pred_box_xy_min = pred_xy - pred_box_half_wh pred_box_xy_max = pred_xy + pred_box_half_wh inter_section_min_xy = tf.maximum(true_box_xy_min, pred_box_xy_min) inter_section_max_xy = tf.minimum(true_box_xy_max, pred_box_xy_max) inter_section_wh = tf.maximum(inter_section_max_xy - inter_section_min_xy, 0.) union_area = inter_section_wh[..., 0] * inter_section_wh[..., 1] pred_area = pred_box[..., 2] * pred_box[..., 3] true_area = true_box[..., 2] * true_box[..., 3] return union_area / (pred_area + true_area - union_area)
import requests import bs4 import re import pandas as pd import matplotlib.pyplot as plt import plotly.graph_objects as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.express as px import plotly.figure_factory as ff from scipy import stats from math import floor #Regular expression used to only select numeric data in a list def getNumbers(array): arr = re.findall(r'[0-9]', array) return arr #Clean() function removes commas in lists def clean(data): strings = [str(num) for num in data] concatenated = "".join(strings) return(concatenated) # Finds the number of daily cases based on the difference of a current number # and it's previous number def daily_values(data,array): for i in range(len(data) - 1): hold = data[i+1] - data[i] array.append(hold) ''' Uses recursion to find the cumulative sum of all the elements in an array from their respective indices to the start of the array. This is used in the calculation of average daily statistics as everyday's value can be summed with all the previous values and divided by the index + 1. Example: Input: [2,14,17,36] Output: [2, 14+2, 17+14+2, 36+17+14+2] nb: division of the values is not performed in the cumulative_sum function but in the daily_average function ''' def cumulative_sum(data,array): total = 0 values = [] for i,j in enumerate(data): total = total+j values.append(total) if total <= 1000 | total<=100: array.append(values) return cumulative_sum(data,array) else: return array.append(values) ''' Finds the average of every element in an array. Each element is divided by the index + 1 ''' def daily_average(data,array): for i in data: for y,k in enumerate(i): array.append( (k) // (y+1) ) def monthly(data,array,field): for i,j in enumerate(data): array.append(data[i][field].sum()) # Correlation matrix def plotCorrelationMatrix(df, graphWidth): filename = df.dataframeName df = df.dropna('columns') # drop columns with NaN df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values if df.shape[1] < 2: print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2') return corr = df.corr() plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum = 1) plt.xticks(range(len(corr.columns)), corr.columns, rotation=90) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title(f'Correlation Matrix for{filename}', fontsize=15) plt.show() res = requests.get('https://www.worldometers.info/coronavirus/country/ghana/') text = bs4.BeautifulSoup(res.text) cases = clean(getNumbers(str(text.select('div span')[4]))) deaths = clean(getNumbers(str(text.select('div span')[5]))) recoveries = clean(getNumbers(str(text.select('div span')[6]))) print("Total Cases:", cases, "\n") print("Total Recoveries:", recoveries, "\n") print("Total Deaths:", deaths,'\n') js = text.select('script[type="text/javascript"]') #Js contains the javascript of the website #Js is a list and the various list indices extract the useful part of the list needed data_for_days = list(js[7]) data_for_cases = list(js[7]) data_for_deaths = list(js[10]) #Regular expression \w\w\w\s\d+ matches any three digit word with a space followed by one or more numbers days_reg = re.compile(r'\w\w\w\s\d+') days = days_reg.findall(data_for_days[0]) #Regular expression d{1,7} matches any number with at most 7 digits. cases_reg = re.findall(r'\d{1,7}',data_for_cases[0]) #Edit this line of code if Ghana's Corona Virus Cases surpass 1 million :( deaths_reg = re.findall(r'\d{1,7}',data_for_deaths[0]) #Edit this line of code if Ghana's Corona Virus Cases surpass 1 million :( active_reg = re.findall(r'\d{1,7}',active[0]) data_for_cases = [int(i) for i in cases_reg] data_for_deaths = [int(i) for i in deaths_reg] data_for_active = [int(i) for i in active_reg] data_for_cases = data_for_cases[350::] data_for_deaths = data_for_deaths[350::] case_index = data_for_cases.index(0) death_index = data_for_deaths.index(0) total_cases = data_for_cases[case_index:len(data_for_cases) - 1] total_cases = [int(i) for i in total_cases] total_deaths = data_for_deaths[death_index:len(data_for_deaths) - 1] total_deaths = [int(i) for i in total_deaths] active_cases = data_for_active[active_index:len(data_for_active) - 1] active_cases = [int(i) for i in active_cases] total_cases = total_cases[26::] total_deaths = total_deaths[26::] ind = days.index('Jul 29') days = days[ind::] ind = days.index('Mar 12') days = days[ind::] data = {'Dates':days, 'Total Cases':total_cases, 'Total Deaths': total_deaths, 'Active Cases': active_cases} df = pd.DataFrame(data) daily_cases = [] daily = list(df["Total Cases"]) daily_values(daily,daily_cases) # 2 is inserted at the first position because Ghana first recorded 2 cases daily_cases.insert(0,2) df["Daily Cases"] = daily_cases # average daily cases daily_cases_sum = [] cumulative_sum(daily_cases,daily_cases_sum) avg_daily_cases = [] daily_average([daily_cases_sum[0]],avg_daily_cases) df["Average Daily Cases"] = avg_daily_cases # daily deaths deaths = list(df["Total Deaths"]) death_list = [] daily_values(deaths,death_list) death_list.insert(0,0) df["Daily Deaths"] = death_list # average daily deaths summed_deaths = [] cumulative_sum(death_list,summed_deaths) avg_deaths = [] daily_average([summed_deaths[0]],avg_deaths) df["Average Daily Deaths"] = avg_deaths # Cumulative recovered cases total = df["Total Cases"] dead = df["Total Deaths"] current = df["Active Cases"] recovered = total - dead - current df["Recovered"] = recovered # Daily recovered cases daily_recovered = [] daily_values(recovered,daily_recovered) daily_recovered.insert(0,0) df["Daily Recovered Cases"] = daily_recovered #Average daily recovered cases summed_recovered = [] cumulative_sum(daily_recovered,summed_recovered) avg_recovered = [] daily_average( [summed_recovered[0]], avg_recovered) df["Average Daily Recovered Cases"] = avg_recovered nRow, nCol = df.shape print(f'There are {nRow} rows and {nCol} columns') dates = df["Dates"] total_active_cases = df["Active Cases"] total_cases = df["Total Cases"] daily_cases = df["Daily Cases"] average_daily_cases = df["Average Daily Cases"] total_deaths = df["Total Deaths"] daily_deaths = df["Daily Deaths"] daily_average_deaths = df["Average Daily Deaths"] total_recovered = df["Recovered"] daily_recovered = df["Daily Recovered Cases"] daily_average_recovered_cases = df["Average Daily Recovered Cases"] # current average is calculated avg_0 = list(total_cases) avg = floor( avg_0[-1] / len(avg_0) ) # average is multiplied by number of days in order to draw a horizontal line of average avg = [avg] * len(dates) labels = ["Confirnmed Cases", "Confirmed Deaths","Recovered"] pie_chart_data = [daily_cases.sum(),daily_deaths.sum(), daily_recovered.sum()] data = go.Pie(labels=labels,values=pie_chart_data) layout = dict( title= "Ghana's Covid-19 Cases",title_x=0.5 ) go.Figure(data=data, layout = layout) style = go.Layout(xaxis={'title':"Days"},yaxis={"title":"Cases"}) fig = go.Figure(layout = style) trace_0 = fig.add_trace(go.Scatter(x=dates, y=total_cases,name="Cumulative Cases Count" )) fig.update_layout( title_text="Ghana's Corona Virus Cumulative Cases", title_x=0.5, xaxis=dict( showgrid=True, linewidth=2, ticks='outside', tickangle = -67, tickfont=dict( family='Arial', size=12 ), ) ) fig.show() months = [ df.iloc[0:19], df.loc[20:49], df.iloc[50:80], df.iloc[81:110], df.iloc[111:141], df.iloc[142:-1]] monthly_cases = [] monthly(months,monthly_cases,"Daily Cases") monthly_recovered = [] monthly(months,monthly_recovered,"Daily Recovered Cases") monthly_deaths = [] monthly(months,monthly_deaths,"Daily Deaths") x_axis = ["March", "April","May","June","July","August"] style = go.Layout(xaxis={'title':"Months"},yaxis={"title":"Numbers"}) fig = go.Figure(go.Bar(x=x_axis, y=monthly_cases, name='Monthly Cases'),layout = style) fig.add_trace(go.Bar(x=x_axis, y=monthly_recovered, name='Monthly Recovered')) fig.add_trace(go.Bar(x=x_axis, y=monthly_deaths, name='Monthly Deaths')) fig.update_layout(barmode='stack', title_text="Corona Virus Statistics For Every Month", title_x=0.5, ) fig.show() style = go.Layout(xaxis={'title':"Days"},yaxis={"title":"Cases"}, width =850) fig2 = go.Figure(layout=style) trace_2 = fig2.add_trace(go.Scatter(x=dates, y=daily_cases, mode='lines+markers', name="Daily Case Count" )) trace_3 = fig2.add_trace(go.Scatter (x = dates, y=avg, name="Current Average Cases")) trace_4 = fig2.add_trace(go.Scatter(x=dates, y = average_daily_cases, name="Average Daily Cases")) fig2.update_layout( title_text="Ghana's Daily Corona Virus Cases", title_x=0.5, xaxis=dict( showgrid=True, linewidth=2, ticks='outside', tickangle = -67, tickfont=dict( family='Arial', size=12 ), ) ) fig2.show() style = go.Layout(width=850) fig = go.Figure(go.Bar(x=dates, y=daily_recovered, name='Daily Recovered Cases'),layout = style) fig.add_trace(go.Scatter(x=dates,y=daily_average_recovered_cases, name = "Average Recovered Cases" )) fig.update_layout( title_text="Ghana's Daily Corona Virus Recovered Cases", title_x=0.5, xaxis=dict( showgrid=True, linewidth=2, ticks='outside', tickangle = -67, tickfont=dict( family='Arial', size=12 ), ) ) fig.show() style = go.Layout(width=850) fig = go.Figure(go.Bar(x=dates, y=daily_deaths, name='Daily Deaths'),layout = style) fig.add_trace(go.Scatter(x=dates,y=daily_average_deaths, name = "Daily Average Deaths" )) fig.update_layout( title_text="Ghana's Daily Corona Virus Deaths Cases", title_x=0.5, xaxis=dict( showgrid=True, linewidth=2, ticks='outside', tickangle = -67, tickfont=dict( family='Arial', size=12 ), ) ) fig.show() slope, intercept, r, p, std_err = stats.linregress(total_cases, total_deaths) def regression_line(x): return slope * x + intercept model = list(map(regression_line, total_cases)) trace_1 = go.Scatter(x = total_cases, y=total_deaths, mode='markers', name="Total Cases") trace_2 = go.Scatter(x=total_cases, y=model, name="Regression Line") style = go.Layout(yaxis={'title':"Deaths"},xaxis={"title":"Cases"},width = 850) data = [trace_1,trace_2] fig = go.Figure(data = data,layout = style) fig.update_layout( title_text=" Linear Regression Model On Ghana's Covid-19 Data", title_x=0.5, xaxis=dict( showgrid=True, linewidth=2, ticks='outside', tickfont=dict( family='Arial', size=12 ), ) ) fig.show() df.dataframeName = " Ghana's Corona Virus Data" plotCorrelationMatrix(df, 10)
# -*- coding: utf-8 -*- """Test that the functions will download data.""" import datetime import pytest from download_nwp_model_output.data_source import ( HEIGHT_2M_VARIABLES, HEIGHT_10M_VARIABLES, PRESSURE_VARIABLES, SINGLE_LEVEL_VARIABLES, ) from download_nwp_model_output.nwp_models import NWP_MODELS, BboxWesn PA_BBOX = BboxWesn(-81, -74, 39, 43) @pytest.mark.parametrize("model_abbrev", ["GFS", "NAM", "RAP", "NAVGEM"]) @pytest.mark.parametrize("forecast_hour", [3, 12]) @pytest.mark.parametrize("pressure_mb", [1000, 500, 250]) def test_model_pressure( model_abbrev: str, forecast_hour: int, pressure_mb: int ) -> None: """Test whether models can get variables on pressure surfaces. Parameters ---------- model_abbrev: str forecast_hour: int pressure_mb: int """ model = NWP_MODELS[model_abbrev] init_time = model.get_model_start_with_data() valid_time = init_time + datetime.timedelta(hours=forecast_hour) variables = PRESSURE_VARIABLES & model.data_access.variable_mapping.keys() data = model.get_model_data_pressure( init_time, valid_time, variables, pressure_mb, PA_BBOX ) for data_var in data.data_vars.values(): assert data_var.count() > 0 @pytest.mark.parametrize("model_abbrev", ["NAVGEM", "GFS", "NAM", "RAP"]) @pytest.mark.parametrize("forecast_hour", [3, 12]) def test_model_surface(model_abbrev: str, forecast_hour: int) -> None: """Test whether models can get surface fields. Parameters ---------- model_abbrev: str forecast_hour: int """ model = NWP_MODELS[model_abbrev] init_time = model.get_model_start_with_data() valid_time = init_time + datetime.timedelta(hours=forecast_hour) variables_2m = HEIGHT_2M_VARIABLES & model.data_access.variable_mapping.keys() data_2m = model.get_model_data_height( init_time, valid_time, variables_2m, 2, PA_BBOX, ) for data_var in data_2m.data_vars.values(): assert data_var.count() > 0 variables_10m = HEIGHT_10M_VARIABLES & model.data_access.variable_mapping.keys() data_10m = model.get_model_data_height( init_time, valid_time, variables_10m, 10, PA_BBOX, ) for data_var in data_10m.data_vars.values(): assert data_var.count() > 0 variables_one_level = ( SINGLE_LEVEL_VARIABLES & model.data_access.variable_mapping.keys() ) data_one_level = model.get_model_data_single_level( init_time, valid_time, variables_one_level, PA_BBOX, ) for data_var in data_one_level.data_vars.values(): assert data_var.count() > 0 @pytest.mark.parametrize("forecast_hour", range(24, 72, 24)) def test_ecmwf_pressure(forecast_hour: int) -> None: """Test whether code can download ECMWF pressure fields. Parameters ---------- forecast_hour: int """ pytest.importorskip("eccodes") model = NWP_MODELS["ECMWF"] init_time = model.get_model_start_with_data() valid_time = init_time + datetime.timedelta(hours=forecast_hour) variables_850 = ["x_wind", "y_wind", "air_temperature"] data_850 = model.get_model_data_pressure( init_time, valid_time, variables_850, 850, PA_BBOX ) for data_var in data_850.data_vars.values(): assert data_var.count() > 0 variables_500 = ["geopotential_height"] data_500 = model.get_model_data_pressure( init_time, valid_time, variables_500, 500, PA_BBOX ) for data_var in data_500.data_vars.values(): assert data_var.count() > 0 @pytest.mark.parametrize("forecast_hour", range(24, 72, 24)) def test_ecmwf_surface(forecast_hour: int) -> None: """Test whether code can download ECMWF pressure fields. Parameters ---------- forecast_hour: int """ pytest.importorskip("eccodes") model = NWP_MODELS["ECMWF"] init_time = model.get_model_start_with_data() valid_time = init_time + datetime.timedelta(hours=forecast_hour) variables_one_level = ["air_pressure_at_mean_sea_level"] data_one_level = model.get_model_data_single_level( init_time, valid_time, variables_one_level, PA_BBOX ) for data_var in data_one_level.data_vars.values(): assert data_var.count() > 0
class Solution: def divide(self, dividend, divisor): """ :type dividend: int :type divisor: int :rtype: int 来自LeetCode的解法 https://leetcode.com/problems/divide-two-integers/discuss/13407/Detailed-Explained-8ms-C++-solution https://leetcode.com/problems/divide-two-integers/discuss/13403/Clear-python-code """ positive = (dividend < 0) is (divisor < 0) dividend, divisor = abs(dividend), abs(divisor) res = 0 while dividend >= divisor: temp, mul = divisor, 1 while dividend >= (temp << 1): temp <<= 1 mul <<= 1 dividend -= temp res += mul if not positive: res = -res return min(max(-2147483648, res), 2147483647)
from django.urls import path from .views import * urlpatterns=[ path('',index,name='index'), path('<str:room_name>/',room,name='room') ]
from ..models import Contractor from django.forms import ModelForm from django.forms import Select, TextInput class ContractorForm(ModelForm): class Meta: model = Contractor fields = '__all__' widgets ={ 'name':TextInput(attrs={'class': 'form-control mr-3'}), 'type_c':Select(attrs={'class': 'form-control mr-3'}), }
from AnodeSimulation.myPadArray import myPadArray from AnodeSimulation.SimAnode import sim_anode from AnodeSimulation.parameter import dictInput, input_check, display from Reconstruction.reconstruction import reconstruction from matplotlib import pyplot as plt from matplotlib.lines import Line2D from joblib import Parallel, delayed import matplotlib.ticker as plticker from matplotlib.ticker import MultipleLocator, FormatStrFormatter from shapely.geometry.polygon import Polygon from descartes import PolygonPatch from tqdm import tqdm import time import pickle import numpy as np import sys def make(): pad = create_pad(0) sim = sim_anode() id = plotID() if dictInput['read']: sim.read_sim(id+"_sim.npy") sim.get_coord_grid(int(dictInput['laser_positions']),float(dictInput['length']) ) else: if dictInput['shape'] == 'multilayer': sim.get_coord_grid_multilayer(int(dictInput['laser_positions']), float(dictInput['length']), int(dictInput['layers'])) sim.update_end(pad) sim.run_sim_multilayer_multithread(pad, float(dictInput['radius']), int(dictInput['processes']), int(dictInput['layers']), int(dictInput['num_sample'])) np.save(id+"_sim.npy",sim.amplitude) else: sim.get_coord_grid(int(dictInput['laser_positions']), float(dictInput['length'])) sim.update_end(pad) sim.run_sim_multithread(pad, float(dictInput['radius']), int(dictInput['processes']), int(dictInput['num_sample'])) np.save(id+"_sim.npy",sim.amplitude) return pad, sim def create_pad(i): pad = myPadArray(float(dictInput['length'])+i*float(dictInput['length_incr'])) if dictInput['shape'] == 'multilayer': pad.get_pad_coded(int(dictInput['layers'])) return pad if dictInput['shape'] == 'sin': pad.modify_one_sin_box(0.01, float(dictInput['pattern_height'])+i*float(dictInput['pattern_height_incr'])) elif dictInput['shape'] == 'nose': pad.modify_one_n_box(float(dictInput['nose_start'])+i*float(dictInput['nose_start_incr']), float(dictInput['nose_end'])+i*float(dictInput['nose_end_incr']), float(dictInput['pattern_height'])+i*float(dictInput['pattern_height_incr'])) elif dictInput['shape'] == 'cross': pad.modify_one_cross_box() elif dictInput['shape'] == '45nose': pad.modify_one_45degree_n_box(float(dictInput['nose_start'])+i*float(dictInput['nose_start_incr']), float(dictInput['nose_end'])+i*float(dictInput['nose_end_incr']), float(dictInput['pattern_height'])+i*float(dictInput['pattern_height_incr']), dictInput['trapezoid_height']) elif dictInput['shape'] == '45wedge': pad.modify_one_wedge_n_box(float(dictInput['nose_start'])+i*float(dictInput['nose_start_incr']), float(dictInput['nose_end'])+i*float(dictInput['nose_end_incr']), float(dictInput['pattern_height'])+i*float(dictInput['pattern_height_incr'])) elif dictInput['shape'] == 'square': pass else: print("wrong input pad shape") sys.exit(1) pad.get_pad_5x5() return pad #Run simulations with differing pad size def make_step(): id = plotID() sample_pad = create_pad(0) sims = list() if dictInput['read']: for i in range(int(dictInput['num_sim'])): sim = sim_anode() sim.read_sim(id+'_sim'+str(i)+'.npy') sim.get_coord_grid(int(dictInput['laser_positions']),float(dictInput['length']) + i*float(dictInput['length_incr']) ) sims.append(sim) else: sims = Parallel(n_jobs = int(dictInput['processes']), verbose = 10)(delayed(sim_job)(i) for i in range(int(dictInput['num_sim']))) for i in range(int(dictInput['num_sim'])): np.save(id+'_sim'+str(i)+'.npy',sims[i].amplitude) return sample_pad, sims def sim_job(i): pad = create_pad(i) sim = sim_anode() sim.get_coord_grid(int(dictInput['laser_positions']),float(dictInput['length']) + i*float(dictInput['length_incr']) ) sim.update_end(pad) sim.run_sim(pad, float(dictInput['radius']), int(dictInput['num_sample'])) return sim def draw_pattern(a, ax): array = list(a.box_array) del array[12] l = list() [l.append(i.exterior.xy) for i in array] [ax.plot(j,k,'g') for (j,k) in list(l)] lc = list() lc.append(a.box_array[12].exterior.xy) [ax.plot(j,k,'r') for (j,k) in list(lc)] ax.set_axisbelow(True) ax.set_xlabel('x[mm]') ax.set_ylabel('y[mm]') ax.set_xlim([-1.5*a.side, 1.5*a.side]) ax.set_ylim([-1.5*a.side, 1.5*a.side]) ax.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') ax.grid(which='both', axis='both') ax.minorticks_on() ax.tick_params(which='major', length=7, color='b') ax.tick_params(which='minor', length=5, color='k') loc = plticker.MultipleLocator(base = float(dictInput['length'])/2) # this locator puts ticks at square intervals ax.xaxis.set_major_locator(loc) ax.xaxis.set_ticks_position('both') ax.yaxis.set_major_locator(loc) ax.yaxis.set_ticks_position('both') #ax.tick_params(which='both', width=2, labelleft = 'on', labelright = 'off', labelbottom = 'on', labeltop = 'off') ax.grid(b=True, which='major', axis='both', color='#000000', alpha=0.1, linestyle='-') ax.grid(b=True, which='minor', axis='both', color='#000000', alpha=0.1, linestyle='-') def draw_pattern_colored(a, ax): draw_pattern(a,ax) pad_patch = PolygonPatch(a.box_array[12]) pad_patch.set_facecolor('r') ax.add_patch(pad_patch) def draw_pattern_embed(pad, ax, x1, y1, x2, y2): axins = ax.inset_axes([x1, y1, x2, y2]) array = list(pad.box_array) del array[12] l = list() [l.append(i.exterior.xy) for i in array] [axins.plot(j,k,'g') for (j,k) in list(l)] lc = list() lc.append(pad.box_array[12].exterior.xy) [axins.plot(j,k,'r') for (j,k) in list(lc)] axins.set_xlim(-1.5*pad.side, 1.5*pad.side) axins.set_ylim(-1.5*pad.side, 1.5*pad.side) axins.set_xticklabels('') axins.set_yticklabels('') axins.xaxis.set_ticks_position('none') axins.yaxis.set_ticks_position('none') def draw_radius(SimAnode, pad, ax): draw_pattern(pad, ax) ax.add_artist(plt.Circle((0, 0), float(dictInput['radius']), alpha =0.8, color='black', fill = False)) legend_lst = [Line2D([0], [0], marker='o', color='crimson', label='ring spot', markersize=10), Line2D([0], [0], marker='x', color='b', label='actual ring position', markersize=4)] ax.plot(0,0, c='b', marker='x') ax.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') def draw_reconstructed(): pad, sim = make() plt.rcParams.update({'font.size': 12}) fig1, ax = plt.subplots(figsize=(6, 6)) array = list(pad.box_array) l = list() [l.append(i.exterior.xy) for i in array] [ax.plot(j,k,'g') for (j,k) in list(l)] ax.set_axisbelow(True) ax.set_xlabel('x[mm]') ax.set_ylabel('y[mm]') recon_positions = list() p_x = list() p_y = list() id = plotID() n = int(dictInput['laser_positions']) if dictInput['read']: recon_positions = float(dictInput['read_scale'])*np.load(id+"_reconstruction.npy") else: rec = reconstruction() recon_positions = [rec.reconstruction(sim.amplitude[:, i],sim.amplitude) for i in tqdm(range(n**2),leave=False, desc='reconstruction')] if dictInput['layers']: paddim = 4*int(dictInput['layers']) p_x = [(recon_positions[i][0]/n*paddim-0.5)*float(dictInput['length']) for i in range(len(recon_positions))] p_y = [(recon_positions[i][1]/n*paddim-0.5)*float(dictInput['length']) for i in range(len(recon_positions))] X = [((i % n)/n*paddim-0.5)*float(dictInput['length']) for i in range(n**2)] Y = [((i // n)/n*paddim-0.5)*float(dictInput['length']) for i in range(n**2)] else: paddim = 5 p_x = [(recon_positions[i][0]/n-0.5)*paddim*float(dictInput['length']) for i in range(len(recon_positions))] p_y = [(recon_positions[i][1]/n-0.5)*paddim*float(dictInput['length']) for i in range(len(recon_positions))] X = [((i % n)/n-0.5)*paddim*float(dictInput['length']) for i in range(n**2)] Y = [((i // n)/n-0.5)*paddim*float(dictInput['length']) for i in range(n**2)] ax.scatter(p_x, p_y, s=10,c='crimson', label='reconstructed ring position') ax.scatter(X, Y, c='blue', marker="_",label='actual ring position') ax.legend(loc=1, framealpha=1, fontsize='x-small') ax.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') save_plot(fig1, id+"_reconstruction") np.save(id+"_reconstruction.npy",recon_positions) with open(id+"_reconstruction.csv", 'w') as f: for i in range(len(recon_positions)): f.write(str(i)+','+str(p_x[i])+','+str(p_y[i])+'\n') log = rec.degeneracy_check(recon_positions) with open(id+"_reconstruction.log", 'w') as f: f.write(log) fig2, ax2= plt.subplots(figsize=(6, 6)) array = list(pad.box_array) l = list() [l.append(i.exterior.xy) for i in array] [ax2.plot(j,k,'g') for (j,k) in list(l)] rx = [x for x in range(1, len(sim.coord_x)-1)]#We are only plotting for the area of interest. ry = [y for y in range(1, len(sim.coord_y)-1)] X, Y = np.meshgrid([sim.coord_x[i]-0.5*float(dictInput['length']) for i in rx],[sim.coord_y[i]-0.5*float(dictInput['length']) for i in ry]) S = list() id = plotID() if dictInput['read']: S = float(dictInput['read_scale'])*np.load(id+"_sd_colorplot.npy") else: rec = reconstruction() S = [[1000*rec.sd(sim.amplitude, (i,j), 4*int(dictInput['layers'])*pad.side/float(dictInput['laser_positions'])) for i in rx] for j in tqdm(ry,leave=False,desc = 'SD calculation y' )] np.save(id+"_sd_colorplot.npy",S) with open(id+"_sd_colorplot.csv", 'w') as f: for x in range(len(rx)): for y in range(len(ry)): f.write(str(x+rx[0])+','+str(y+ry[0])+','+str(S[y][x])+'\n') with open(id+"_sd_colorplot.log", 'w') as f: f.write(rec.print_log()) maxv = 1000#min(np.amax(S), 1000) pc = ax2.pcolor(X,Y, S, vmax = maxv) cbar = plt.colorbar(pc, ax = ax2) cbar.set_label('Position Resolution[μm]', rotation=90) ax2.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax2.transAxes,color = 'white') save_plot(fig2, id+"_sd_colorplot") def plotID(): return dictInput['shape']+'_res_'+dictInput['laser_positions']+'x'+dictInput['laser_positions']+'_L_'+dictInput['length']+'_R_'+dictInput['radius']+'_H_'+dictInput['pattern_height'] def plotDesc(): return "L = "+str(float(dictInput['length'])/float(dictInput['radius'])/2)[0:5]#dictInput['shape']+' '+dictInput['laser_positions']+'x'+dictInput['laser_positions']+'\nside: '+dictInput['length']+'mm radius: '+dictInput['radius']+'mm\npattern height:'+dictInput['pattern_height']+'mm' #Draw the standard deviation plot from noise data def draw_sd_colorplot(sim, pad, ax): draw_pattern(pad, ax) rx = [x for x in range(0, len(sim.coord_x)) if (-1.5*pad.side<=sim.coord_x[x] and sim.coord_x[x]<=1.5*pad.side)]#We are only plotting for the area of interest. ry = [y for y in range(0, len(sim.coord_y)) if (-1.5*pad.side<=sim.coord_y[y] and sim.coord_y[y]<=1.5*pad.side)] X, Y = np.meshgrid([sim.coord_x[i] for i in rx],[sim.coord_y[i] for i in ry]) S = list() id = plotID() if dictInput['read']: S = float(dictInput['read_scale'])*np.load(id+"_sd_colorplot.npy") else: rec = reconstruction() S = [[1000*rec.sd(sim.amplitude, (i,j), 5*pad.side/float(dictInput['laser_positions'])) for i in rx] for j in tqdm(ry,leave=False,desc = 'SD calculation y' )] np.save(id+"_sd_colorplot.npy",S) with open(id+"_sd_colorplot.csv", 'w') as f: for x in range(len(rx)): for y in range(len(ry)): f.write(str(x+rx[0])+','+str(y+ry[0])+','+str(S[y][x])+'\n') with open(id+"_sd_colorplot.log", 'w') as f: f.write(rec.print_log()) maxv = 1000#min(np.amax(S), 1000) pc = ax.pcolor(X,Y, S, vmax = maxv) ax.ticklabel_format(axis = 'y', style = 'sci') cbar = plt.colorbar(pc, ax = ax) cbar.set_label('Position Resolution[μm]', rotation=90) ax.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'white') def draw_sd_colorplot_debug(sim, pad, ax): draw_pattern(pad, ax) rx = [x for x in range(len(sim.coord_x)) if (-1.5*pad.side<=sim.coord_x[x] and sim.coord_x[x]<=1.5*pad.side)]#We are only plotting for the area of interest. ry = [y for y in range(len(sim.coord_y)) if (-1.5*pad.side<=sim.coord_y[y] and sim.coord_y[y]<=1.5*pad.side)] mx = [x for x in range(len(sim.coord_x)) if (-0.5*pad.side<=sim.coord_x[x] and sim.coord_x[x]<=0.5*pad.side)]#We are only plotting for the area of interest. my = [y for y in range(len(sim.coord_y)) if (-0.5*pad.side<=sim.coord_y[y] and sim.coord_y[y]<=0.5*pad.side)] X, Y = np.meshgrid([sim.coord_x[i] for i in rx],[sim.coord_y[i] for i in ry]) S = list() id = plotID() S = np.load(id+"_sd_colorplot.npy") outliers = list() rad = float(dictInput['radius']) radunit = rad/(5*float(dictInput['length']))* float(dictInput['laser_positions']) scale = 5* float(dictInput['length']) / float(dictInput['laser_positions']) for i in mx: for j in my: if((S[i][j]<=1 or S[i][j]>40) and not any((p[0]-i)**2+(p[1]-j)**2<0.7*radunit**2 for p in outliers)):#filtering outliers ax.add_artist(plt.Circle((sim.coord_x[i],sim.coord_y[j]), rad, alpha =0.4, color='crimson')) ax.plot(sim.coord_x[i], sim.coord_y[j], c='b', marker='x') ax.text(sim.coord_x[i], sim.coord_y[j], str((i - 0.5*len(sim.coord_x))* scale)+','+str((j - 0.5*len(sim.coord_y))* scale)) outliers.append((i,j)) maxv = 1000 pc = ax.pcolor(X,Y, S, vmax = maxv) cbar = plt.colorbar(pc, ax = ax) cbar.set_label('Position Resolution[μm]', rotation=90) ax.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'white') def draw_sd_pos(sim, pad, y_offset, ax): n = int(dictInput['laser_positions']) #ax.title.set_text('SD of reconstruction vs ring positions'+' y='+str(y_offset*5*pad.side/n)) ax.set_xlabel('x[mm]') ax.set_xlim([-0.5*pad.side, 1.5*pad.side]) if dictInput['shape'] == 'square': ax.axvline(-0.5*pad.side,color='red') ax.axvline(0.5*pad.side,color='red') ax.set_ylabel('Position Resolution[μm]') ax.tick_params(which='both', width=3) ax.tick_params(which='major', length=5, color='b') loc = plticker.MultipleLocator(base = float(dictInput['length'])) # this locator puts ticks at square intervals ax.xaxis.set_major_locator(loc) ax.grid(b=True, which='major', axis='both', color='#000000', alpha=0.2, linestyle='-') initpoint = n*int(n/2) rx = [x for x in range(len(sim.coord_x)) if (-1.5*pad.side<=sim.coord_x[x] and sim.coord_x[x]<=1.5*pad.side)]#We are only plotting for the area of interest. rec = reconstruction() S = list() id = plotID() if dictInput['read']: S = float(dictInput['read_scale'])*np.load(id+"_sd_xaxis.npy") else: S = np.array([1000*rec.sd(sim.amplitude, (i,y_offset + int(n/2)), 5*pad.side/float(dictInput['laser_positions'])) for i in range(n)]) np.save(id+"_sd_xaxis.npy", S) ax.plot(sim.coord_x[rx[0]:rx[len(rx)-1]], S[rx[0]:rx[len(rx)-1]],label='7.5% Noise') ax.plot(sim.coord_x[rx[0]:rx[len(rx)-1]], S[rx[0]:rx[len(rx)-1]]/7.5,label='1% Noise') ax.set_ylabel('Position Resolution[μm]') ax.set_ylim(bottom=0, top = 1000) #ax.ticklabel_format(axis = 'y', style = 'sci') draw_pattern_embed(pad, ax, 0, 0, 0.2, 0.2) ax.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') with open(id+"_sd_xaxis.csv", 'w') as f: for i in range(n): f.write(str(sim.coord_x[i])+','+str(S[i])+'\n') """ with open("draw_sd_pos_lastrun.csv", 'a') as f: for i in range(n): f.write(str(sim.coord_x[i])+','+str(S[i])+'\n') """ def save_sd(sims, pad, ax, filename): file_object = open(filename+'.csv', 'w') L_list = list() W_list = list() H_list = list() median_res_list = list() min_res_list = list() max_res_list = list() q3_res_list = list() u10_res_list = list() l10_res_list = list() if dictInput['read']: u10_res_list = float(dictInput['read_scale'])*np.load(filename+"_u10.npy") l10_res_list = float(dictInput['read_scale'])*np.load(filename+"_l10.npy") median_res_list = float(dictInput['read_scale'])*np.load(filename+"_median.npy") L_list = [(float(dictInput['length'])+i * float(dictInput['length_incr']))/float(dictInput['radius'])/2 for i in range(0,int(dictInput['num_sim']))] W_list = [(float(dictInput['nose_end']) - float(dictInput['nose_start']) + i * (float(dictInput['nose_end_incr']) - float(dictInput['nose_start_incr']))) for i in range(0,int(dictInput['num_sim']))] H_list = [(float(dictInput['pattern_height'])+ i*float(dictInput['pattern_height_incr'])) for i in range(0,int(dictInput['num_sim']))] else: for i in range(0,int(dictInput['num_sim'])): sim = sims[i] side = float(dictInput['length'])+i * float(dictInput['length_incr']) rx = [x for x in range(0, len(sim.coord_x)) if (-1.5*side<sim.coord_x[x] and sim.coord_x[x]<1.5*side)]#We are only plotting for the area of interest. ry = [y for y in range(0, len(sim.coord_y)) if (-1.5*side<sim.coord_y[y] and sim.coord_y[y]<1.5*side)] rec = reconstruction() meaningful_res = [1000* rec.sd(sim.amplitude, (i,j), 5*side/float(dictInput['laser_positions'])) for i in rx for j in ry] median_res = np.median(meaningful_res) median_res_list.append(median_res) """ min_res = np.min(meaningful_res) min_res_list.append(min_res) max_res = np.max(meaningful_res) max_res_list.append(max_res) try: q3_res = np.percentile(meaningful_res,75) except: q3_res = float('inf') """ try: u10_res = np.percentile(meaningful_res,90) except: u10_res = float('inf') try: l10_res = np.percentile(meaningful_res,10) except: l10_res = float('inf') u10_res_list.append(u10_res) l10_res_list.append(l10_res) #q3_res_list.append(q3_res) side0 = float(dictInput['length']) width = float(dictInput['nose_end']) - float(dictInput['nose_start']) + i * (float(dictInput['nose_end_incr']) - float(dictInput['nose_start_incr'])) height = float(dictInput['pattern_height'])+ i*float(dictInput['pattern_height_incr']) L_list.append(side/float(dictInput['radius'])/2) W_list.append(width) H_list.append(height) file_object.write(str(side/float(dictInput['radius']))) file_object.write(',') file_object.write(str(width)) file_object.write(',') file_object.write(str(side/side0*l10_res)) file_object.write(',') file_object.write(str(side/side0*median_res)) file_object.write(',') file_object.write(str(side/side0*u10_res)) file_object.write('\n') np.save(filename+"_u10.npy", u10_res_list) np.save(filename+"_l10.npy", l10_res_list) np.save(filename+"_median.npy", median_res_list) ax.set_ylabel('Position Resolution[μm]') ax.grid(b=True, which='major', axis='both', color='#000000', alpha=0.2, linestyle='-') l10_res_list = np.array(l10_res_list) u10_res_list = np.array(u10_res_list) median_res_list = np.array(median_res_list) if float(dictInput['length_incr'])!= 0: ax.set_xlabel('Scale Factor L') ax.set_xlim(left=0, right = max(L_list)+0.1) #ax.text(1, 0, dictInput['length_incr']+'mm '+dictInput['num_sim']+'increments', verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') ax.plot(L_list, u10_res_list,'-^',markersize = 4, label='90th percentile, 7.5% noise', color = 'firebrick') ax.plot(L_list, median_res_list,'-o',markersize = 4, label='median, 7.5% noise', color = 'black') ax.plot(L_list, l10_res_list, '-v',markersize = 4, label='10th percentile, 7.5% noise', color = 'firebrick') ax.plot(L_list, u10_res_list/7.5,'-2',markersize = 4, label='90th percentile, 1% noise', color = 'firebrick') ax.plot(L_list, median_res_list/7.5,'-.',markersize = 4, label='median, 1% noise', color = 'black') ax.plot(L_list, l10_res_list/7.5,'-1',markersize = 4, label='10th percentile, 1% noise', color = 'firebrick') elif float(dictInput['nose_start_incr'])!= 0: ax.set_xlabel('W') ax.set_xlim(left=0, right = 1) #ax.text(1, 0, str(float(dictInput['nose_start_incr'])-float(dictInput['nose_end_incr']))+'mm '+dictInput['num_sim']+'increments', verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') ax.plot(W_list, u10_res_list,'-^',markersize = 4, label='90th percentile, 7.5% noise', color = 'firebrick') ax.plot(W_list, median_res_list,'-o',markersize = 4, label='median, 7.5% noise', color = 'black') ax.plot(W_list, l10_res_list, '-v',markersize = 4, label='10th percentile, 7.5% noise', color = 'firebrick') ax.plot(W_list, u10_res_list/7.5,'-2',markersize = 4, label='90th percentile, 1% noise', color = 'firebrick') ax.plot(W_list, median_res_list/7.5,'-.',markersize = 4, label='median, 1% noise', color = 'black') ax.plot(W_list, l10_res_list/7.5,'-1',markersize = 4, label='10th percentile, 1% noise', color = 'firebrick') elif float(dictInput['pattern_height_incr'])!= 0: ax.set_xlabel('Amplitude') ax.set_xlim(left=0, right = 1) #ax.text(1, 0, dictInput['pattern_height_incr']+'mm '+dictInput['num_sim']+'increments', verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') ax.plot(H_list, u10_res_list,'-^',markersize = 4, label='90th percentile, 7.5% noise', color = 'firebrick') ax.plot(H_list, median_res_list,'-o',markersize = 4, label='median, 7.5% noise', color = 'black') ax.plot(H_list, l10_res_list, '-v',markersize = 4, label='10th percentile, 7.5% noise', color = 'firebrick') ax.plot(H_list, u10_res_list/7.5,'-2',markersize = 4, label='90th percentile, 1% noise', color = 'firebrick') ax.plot(H_list, median_res_list/7.5,'-.',markersize = 4, label='median, 1% noise', color = 'black') ax.plot(H_list, l10_res_list/7.5,'-1',markersize = 4, label='10th percentile, 1% noise', color = 'firebrick') #ax.plot(L_list, max_res_list,'-o',markersize = 4, label='maximal spot') #ax.fill_between(L_list, l10_res_list, u10_res_list, color = 'gray') ax.legend(loc='upper left', framealpha=1, fontsize='x-small') maxv = 1000#min(np.amax(u10_res_list), 1000) #ax.ticklabel_format(axis = 'y', style = 'sci', scilimits = (0,0)) ax.set_ylim(top = maxv, bottom=0) ax.ticklabel_format(style = 'sci') draw_pattern_embed(pad, ax, 0, 0.2, 0.2, 0.2) def draw_amp_pos(SimAnode, pad, y_offset, ax): noise_level = 0.075 #[ax.axvline(x, linestyle='-', color='red') for x in SimAnode.center_pads] if dictInput['shape'] == 'square': ax.axvline(-0.5*pad.side,color='red') ax.axvline(0.5*pad.side,color='red') n = int(dictInput['laser_positions']) #ax.title.set_text('amplitude vs ring positions'+' y='+str(y_offset*5*pad.side/n)) ax.set_xlabel('x[mm]') ax.set_xlim([-0.5*pad.side, 1.5*pad.side]) ax.set_ylabel('signal on the pad / total signal') ax.tick_params(which='both', width=3) ax.tick_params(which='major', length=5, color='b') loc = plticker.MultipleLocator(base = float(dictInput['length'])) # this locator puts ticks at square intervals ax.xaxis.set_major_locator(loc) ax.grid(b=True, which='major', axis='both', color='#000000', alpha=0.2, linestyle='-') initpoint = n*int(n/2) amp_indexed = SimAnode.amplitude[:,(initpoint+y_offset*n):(initpoint+(y_offset+1)*n)] ax.plot(SimAnode.coord_x, (amp_indexed[12]+noise_level),label='left pad', color = '#D55E00') ax.plot(SimAnode.coord_x, (amp_indexed[13]+noise_level),label='right pad', color = '#009E73') ax.legend(loc=1, framealpha=1, fontsize='medium') ax.set_ylim(bottom=0) draw_pattern_embed(pad, ax, 0, 0, 0.2, 0.2) ax.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') def draw_amp_noise_ratio_pos(SimAnode, pad, y_offset, ax): noise_level = 0.075 np.seterr(all='print') ax.set_yscale('log') #[ax.axvline(x, linestyle='-', color='red') for x in SimAnode.center_pads] if dictInput['shape'] == 'square': ax.axvline(-0.5*pad.side,color='red') ax.axvline(0.5*pad.side,color='red') #ax.title.set_text('amplitude+noise ratio vs ring positions'+' y='+str(y_offset*5*pad.side/float(dictInput['laser_positions']))) ax.set_xlabel('x[mm]') ax.set_xlim([-0.5*pad.side, 1.5*pad.side]) ax.set_ylabel('ratio of signal on pads including noise') ax.tick_params(which='both', width=3) ax.tick_params(which='major', length=5, color='b') loc = plticker.MultipleLocator(base = float(dictInput['length'])) # this locator puts ticks at square intervals ax.xaxis.set_major_locator(loc) ax.grid(b=True, which='major', axis='both', color='#000000', alpha=0.2, linestyle='-') n = int(dictInput['laser_positions']) initpoint = n*int(n/2) amp_indexed = SimAnode.amplitude[:,(initpoint+y_offset*n):(initpoint+(y_offset+1)*n)] #ax.plot(SimAnode.coord_x, (amp_indexed[11]+noise_level)/(amp_indexed[12]+noise_level),label='right pad / left pad') ax.plot(SimAnode.coord_x, (amp_indexed[13]+noise_level)/(amp_indexed[12]+noise_level),label='right pad / left pad', color = '#D55E00') #ax.plot(SimAnode.coord_x, (amp_indexed[12]+noise_level)/(amp_indexed[11]+noise_level),label='left pad / right pad') ax.plot(SimAnode.coord_x, (amp_indexed[12]+noise_level)/(amp_indexed[13]+noise_level),label='left pad / right pad', color = '#009E73') ax.legend(loc=1, framealpha=1, fontsize='x-small') draw_pattern_embed(pad, ax, 0, 0, 0.2, 0.2) ax.text(1, 0, plotDesc(), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes,color = 'black') def draw(): pad, sim = make() #fig, axes = plt.subplots(3,2,figsize=(7.5,10)) #plt.setp(axes.flat, adjustable='box') plt.rcParams.update({'font.size': 12}) fig1, ax1 = plt.subplots(figsize=(6, 6)) draw_pattern(pad, ax1) id = plotID() save_plot(fig1, id+"_pattern") fig2, ax2 = plt.subplots(figsize=(6, 6)) draw_radius(sim, pad, ax2) save_plot(fig2, id+"_shower") #draw_reconstructed(sim,pad,axes[1,0]) fig3, ax3 = plt.subplots() draw_sd_colorplot(sim, pad, ax3) save_plot(fig3, id+"_sd_colorplot") fig4, ax4 = plt.subplots() draw_sd_pos(sim, pad, 0, ax4) save_plot(fig4, id+"_sd_xaxis") fig5, ax5 = plt.subplots() draw_amp_pos(sim, pad, 0, ax5) save_plot(fig5, id+"_amp_xaxis") fig6, ax6 = plt.subplots() draw_amp_noise_ratio_pos(sim, pad, 0, ax6) save_plot(fig6, id+"_amp_ratio_xaxis") fig7, ax7 = plt.subplots() draw_sd_colorplot_debug(sim, pad, ax7) save_plot(fig7, id+"_sd_colorplot_debug") #draw_amp_noise_ratio_pos(sim, pad, 2, axes[2,0]) #draw_amp_noise_ratio_pos(sim, pad, 6, axes[2,1]) #draw_res_pos(sim, pad,axes[2,0]) #draw_res_central_pos(sim, pad, axes[2,1]) #save_sd(sim, pad, 0,"lastrun") def draw_step(): pad, sims = make_step() plt.rcParams.update({'font.size': 12}) fig1, ax1 = plt.subplots(figsize=(6, 6)) id = plotID()+'_'+dictInput['length_incr']+'mm_'+dictInput['num_sim']+'steps' save_sd(sims, pad, ax1, id) save_plot(fig1, id+"_sd_dist") def save_plot(fig, name): fig.savefig(name+'.jpg', bbox_inches='tight', dpi=200) if __name__ == "__main__": # check if inputs are correct display() try: input_check() except Exception: print("Error: Please enter either y or n") sys.exit(1) if dictInput['compare']=='yes': #draw_multiple(float(dictInput['step'])) plt.show() elif dictInput['step_sim']=='yes': draw_step() elif dictInput['lookup_table']=='yes': draw_reconstructed() else: draw() plt.show()
#I pledge my honor that I have abided by the Stevens Honor System. Nathaniel Gee print("This program accepts a list of numbers and modifies the list by squaring each entry") def square_the_number_list(numbers_list): for n in range(len(numbers_list)): numbers_list[n] = numbers_list[n] ** 2 return numbers response = input("Enter the number of entries: ") number_of_inputs = int(response) numbers = [] for n in range(number_of_inputs): user_input = input("Enter a number: ") numbers.append(int(user_input)) print("Original list of numbers: " + str(numbers)) new_numbers = square_the_number_list(numbers) print("Squared list of numbers: "+ str(new_numbers))
def recFibo(n): if n == 0: return 0 elif n == 1: return 1 elif n > 1: return recFibo(n-1) + recFibo(n-2) def iterFibo(n): if n == 0: return 0 a = 1 b = 1 for i in range(3, n+1): c = a + b a, b = b, c return b def main(): for i in range(0, 20): print recFibo(i), print for i in range(0, 20): print iterFibo(i), if __name__ == '__main__': main()
import numpy as np import cv2 import os import time #计算程序执行时间的装饰器 def time_test(fn): def _wrapper(*args, **kwargs): start = time.clock() result = fn(*args, **kwargs) print ("%s() cost %s second" % (fn.__name__, time.clock() - start)) return result return _wrapper @time_test def Laplace(img, mode): if mode == 'four': kernal = np.array([0, -1, 0, -1, 4, -1, 0, -1, 0], dtype = 'int32') elif mode == 'eight': kernal = np.array([-1, -1, -1, -1, 8, -1, -1, -1, -1], dtype = 'int32') else: print(self.__name__ , 'Mode Error') return None #kernal = -kernal #kernel.shape = 3, 3 #扩展边缘 img_copy = np.zeros([img.shape[0] + 2, img.shape[1] + 2], dtype = 'int32') img_copy[1 : img.shape[0] + 1, 1 : img.shape[1] + 1] = img new_img = np.zeros(img.shape, dtype = 'int32') it = np.nditer(new_img, op_flags = ['readwrite'], flags = ['multi_index']) while not it.finished: x = it.multi_index[0] y = it.multi_index[1] neighbor = img_copy[x : x + 3, y : y + 3] #互相关操作 it[0] = np.correlate(neighbor.ravel(), kernal) it.iternext() #阈值处理 new_img += img new_img[new_img < 0] = 0 new_img[new_img > 255] = 255 return new_img.astype('uint8') @time_test def Unsharp_Masking(img, degree = 1.0, kernal_size = [3, 3]): mean_filter = lambda kernal_size: np.ones(kernal_size, dtype = 'float') / (kernal_size[0]*kernal_size[1]) kernal = mean_filter(kernal_size) kernal = kernal.ravel() shape = img.shape new_shape = [shape[0] + (kernal_size[0] - 1), shape[1] + (kernal_size[1] - 1)] #边界外部分区域扩展为0值区域 img_copy = np.zeros(new_shape, dtype = 'float') img_copy[(kernal_size[0] - 1)//2 : new_shape[0] - (kernal_size[0] - 1)//2, (kernal_size[1] - 1)//2 : new_shape[1] - (kernal_size[1] - 1)//2] = img mask = img.astype('float') #旋转卷积核 #kernal = kernal[::-1, ::-1] it = np.nditer(mask, op_flags = ['readwrite'], flags = ['multi_index']) while not it.finished: x = it.multi_index[0] y = it.multi_index[1] neighbor = img_copy[x : x + kernal_size[0], y : y + kernal_size[1]] it[0] = np.correlate(neighbor.ravel(), kernal) #互相关操作 it.iternext() mask = img - mask #return np.fabs(degree*mask).astype('uint8') new_img = (img + degree*mask).clip(0, 255).astype('uint8') return new_img @time_test def Gradiant(): pass if __name__ == '__main__': #切换工作目录 os.chdir(r'D:\application\Coding\Image Processing\CH03\DIP3E_CH03_Original_Images\DIP3E_Original_Images_CH03') img = cv2.imread('Fig0338(a)(blurry_moon).tif', 0) #new_img = Laplace(img, mode = 'eight') new_img = Unsharp_Masking(img, 3) #显示锐化后的图片 cv2.imshow('New Img', new_img) cv2.waitKey(0) cv2.destroyAllWindows()
# This program prints Hello, world! print('Hello, world! my name is kelaiah')
#5. Write a program that takes the dictionary used above, and returns some of the words using 1337sp34k with open("C:\\Users\\Anna\\Desktop\\Learning Community\\poem.txt", "r") as infile, open("C:\\Users\\Anna\\Desktop\\Learning Community\\1337sp34k.txt","w") as outfile: for line in infile: line=line.replace("the", "1337sp34k" ) outfile.write(line)
import unittest from katas.kyu_7.area_of_a_circle import circleArea class CircleAreaTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(circleArea(43.2673), 5881.25) def test_equals_2(self): self.assertEqual(circleArea(68), 14526.72) def test_false(self): self.assertFalse(circleArea(-1485.86)) def test_false_2(self): self.assertFalse(circleArea(0)) def test_false_3(self): self.assertFalse(circleArea('number'))
from app import pms_app from flask_cors import CORS import config from db_config import db CORS(pms_app) with pms_app.app_context(): db.create_all() if __name__ == '__main__': pms_app.logger.info('Listening on http://127.0.0.1:5000/') pms_app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG)
# Generated by Django 2.1 on 2018-10-12 08:51 from django.db import migrations import tinymce.models class Migration(migrations.Migration): dependencies = [ ('mainapp', '0032_photoalbum_main_page'), ] operations = [ migrations.AddField( model_name='organization', name='main_page_desc', field=tinymce.models.HTMLField(blank=True, null=True, verbose_name='Текст внизу на главной странице'), ), ]
import ssl from socks import create_connection from socks import PROXY_TYPE_SOCKS4 from socks import PROXY_TYPE_SOCKS5 from socks import PROXY_TYPE_HTTP from imaplib import IMAP4 from imaplib import IMAP4_PORT from imaplib import IMAP4_SSL_PORT # Credits to example: https://gist.github.com/liuyun201990/1b3a3464bdbf53ac7e7041a149ab118e class IMAP4Proxy(IMAP4): """ IMAP service trough SOCKS proxy. PySocks module required. """ PROXY_TYPES = {"socks4": PROXY_TYPE_SOCKS4, "socks5": PROXY_TYPE_SOCKS5, "http": PROXY_TYPE_HTTP} def __init__(self, host, port=IMAP4_PORT, proxy_host=None, proxy_port=None, proxy_username=None, proxy_password=None, rdns=True, username=None, password=None, proxy_type="http"): self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_username = proxy_username self.proxy_password = proxy_password self.rdns = rdns self.username = username self.password = password self.proxy_type = IMAP4Proxy.PROXY_TYPES[proxy_type.lower()] IMAP4.__init__(self, host, port) def _create_socket(self): return create_connection((self.host, self.port), proxy_type=self.proxy_type, proxy_addr=self.proxy_host, proxy_port=self.proxy_port, proxy_rdns=self.rdns, proxy_username=self.proxy_username, proxy_password=self.proxy_password) class IMAP4SSLProxy(IMAP4Proxy): def __init__(self, host='', port=IMAP4_SSL_PORT, keyfile=None, certfile=None, ssl_context=None, proxy_host=None, proxy_port=None, proxy_username=None, proxy_password=None, rdns=True, username=None, password=None, proxy_type="socks5"): if ssl_context is not None and keyfile is not None: raise ValueError("ssl_context and keyfile arguments are mutually " "exclusive") if ssl_context is not None and certfile is not None: raise ValueError("ssl_context and certfile arguments are mutually " "exclusive") self.keyfile = keyfile self.certfile = certfile if ssl_context is None: ssl_context = ssl._create_stdlib_context(certfile=certfile, keyfile=keyfile) self.ssl_context = ssl_context IMAP4Proxy.__init__(self, host, port, proxy_host=proxy_host, proxy_port=proxy_port, proxy_username=proxy_username, proxy_password=proxy_password, rdns=rdns, username=username, password=password, proxy_type=proxy_type) def _create_socket(self): sock = IMAP4Proxy._create_socket(self) server_hostname = self.host if ssl.HAS_SNI else None return self.ssl_context.wrap_socket(sock, server_hostname=server_hostname) def open(self, host='', port=IMAP4_PORT): IMAP4Proxy.open(self, host, port)
import sys import os import fam import random import subprocess import shutil import time import saved_metrics sys.path.insert(0, 'scripts') sys.path.insert(0, os.path.join("tools", "families")) import experiments as exp import run_raxml_supportvalues def generate_scheduler_commands_file(datadir, subst_model, tree_number, cores, output_dir, samples): results_dir = os.path.join(output_dir, "results") samples_dir = os.path.join(output_dir, "samples") os.makedirs(results_dir) if (samples != tree_number): os.makedirs(samples_dir) scheduler_commands_file = os.path.join(output_dir, "commands.txt") #family_dimensions = run_raxml_supportvalues.get_family_dimensions(os.path.abspath(datadir), subst_model) with open(scheduler_commands_file, "w") as writer: for family in fam.get_families_list(datadir): prefix = os.path.join(results_dir, family) alignment = fam.get_alignment(datadir, family) tree_path = fam.get_raxml_multiple_trees(datadir, "GTR+G", family, starting = tree_number) if (tree_number != samples): lines = open(tree_path).readlines() random.shuffle(lines) tree_path = os.path.join(samples_dir, family + ".newick") with open(tree_path, "w") as tree_writer: for i in range(0, samples): tree_writer.write(lines[i].strip()) if (i != samples - 1): tree_writer.write("\n") command = [] command.append(family) command.append("1") #if (family in family_dimensions): #dim = family_dimensions[family][1] * family_dimensions[family][0] #command.append(str(dim)) #else: command.append("1") command.append(tree_path) command.append(alignment) command.append(subst_model) command.append(prefix) writer.write(" ".join(command) + "\n") return scheduler_commands_file def extract_trees(datadir, output_dir, subst_model, tree_number, samples): key = "treecombination" + str(tree_number) if (samples != tree_number): key = key + "_s" + str(samples) results_dir = os.path.join(output_dir, "results") families_dir = os.path.join(datadir, "families") for family in os.listdir(families_dir): prefix = os.path.join(results_dir, family) src = prefix + ".newick" dest = fam.build_gene_tree_path(datadir, subst_model, family, key) shutil.copyfile(src, dest) def run_treecombination(datadir, subst_model, tree_number, cores, samples): key = "treecombination" + str(tree_number) key = key + "_s" + str(samples) output_dir = fam.get_run_dir(datadir, subst_model, key) do_run = True if (do_run): shutil.rmtree(output_dir, True) os.makedirs(output_dir) scheduler_commands_file = generate_scheduler_commands_file(datadir, subst_model, tree_number, cores, output_dir, samples) start = time.time() exp.run_with_scheduler(exp.treecombine_exec, scheduler_commands_file, "onecore", cores, output_dir, "logs.txt") saved_metrics.save_metrics(datadir, fam.get_run_name(key, subst_model), (time.time() - start), "runtimes") extract_trees(datadir, output_dir, subst_model, tree_number, samples) if __name__ == "__main__": if (len(sys.argv) < 5): print("syntax: python " + os.path.basename(__file__) + " datadir subst_model tree_number cores [samples]") sys.exit(1) datadir = sys.argv[1] subst_model = sys.argv[2] tree_number= int(sys.argv[3]) cores = int(sys.argv[4]) samples = tree_number random.seed(42) if (len(sys.argv) > 5): samples = int(sys.argv[5]) dataset = os.path.basename(os.path.normpath(datadir)) if (dataset == datadir): datadir = fam.get_datadir(dataset) run_treecombination(datadir, subst_model, tree_number, cores, samples)
from selenium import webdriver import random import requests from bs4 import BeautifulSoup login_ip=[ "http://210.38.137.125:8016/(f1e4b2j0meyp0u45omq5pbb0)/default2.aspx", "http://210.38.137.124:8016/(nxqwnjilyquwh33rb0ajh4fq)/default2.aspx" ] driver=webdriver.Chrome() random_ip=random.choice(login_ip) validate_img=random_ip.rsplit("/",1)[0]+"/CheckCode.aspx" # driver.get(random_ip) driver.implicitly_wait(3) username=driver.find_element_by_id("txtUserName") password=driver.find_element_by_id("TextBox2") username.clear() username.send_keys("201411701418") driver.execute_script('document.getElementById("TextBox2").style="display: inline-block; visibility: visible;"') driver.execute_script('document.getElementById("TextBox2").contentEditable = true') password.clear() password.send_keys("") html=requests.get(random_ip) soup=BeautifulSoup(html.text,'html.parser') s=soup.find('img',attrs={'id':'icode'})['src'] print ("".join(s)) # 2.0 使用直接提交表单进行操作 # def get_data(url,username,password): # html=requests.get(url) # soup=BeautifulSoup(html.text,'html.parser') # __VIEWSTATE=soup.find_all('input',attrs={'name':'__VIEWSTATE'})['value'] # # data={} # data['__VIEWSTATE']=__VIEWSTATE # data['txtUserName']=username # data['TextBox2']=password # data['txtSecretCode']='' # # return data # import os # import subprocess # # def image_to_string(img,cleanup=True,plus=''): # subprocess.check_output('tesseract '+img+' '+img+' '+plus,shell=True) # text='' # with open(img+'.txt','r') as f: # text = f.read().strip() # if cleanup: # os.remove(img+'.txt') # # return text # # # print(image_to_string()) # from PIL import Image # import pytesseract as ocr # from io import BytesIO # # img = Image.open(BytesIO(requests.get(s).content)) # img.load() # img.show()
""" Fixtures for metrics """ from __future__ import absolute_import, division, unicode_literals # Remove this when changing over to object model # as this is repeated within the check_template metrics_common_template = { "check": { "state": { "running": "false", "killed": "false", "configured": "true", "disabled": "false", "target_ip": "23.253.6.64", "last_run": { "@now": "1422323039.361", "#text": "1422323039.357" }, "runtime": "0.958", "availability": "available", "state": "good", "status": "ok", "metrics": [ { "@type": "inprogress" }, { "@type": "current", "@timestamp": "1422323039.357" } ] } } } metrics = { "selfcheck": { "metric": [ {"@name": "version", "@type": "s", "#text": "ckdev-stage.8e17ed475b8a80103d11ec29e5e122fe256f8bf7.1416497041"}, {"@name": "check_cnt", "@type": "i", "#text": "5"}, {"@name": "transient_cnt", "@type": "i", "#text": "0"}, {"@name": "uptime", "@type": "l", "#text": "165"}, {"@name": "metrics_collected", "@type": "L", "#text": "321"}, {"@name": "feed_bytes", "@type": "l", "#text": "23817"}, {"@name": "default_queue_threads", "@type": "i", "#text": "10"}, {"@name": "checks_run", "@type": "L", "#text": "50"} ] }, "ping_icmp": { "metric": [ {"@name": "available", "@type": "n", "#text": "0.000000000000e+00"}, {"@name": "count", "@type": "i", "#text": "2"}, {"@name": "maximum", "@type": "n"}, {"@name": "minimum", "@type": "n"}, {"@name": "average", "@type": "n"} ] }, "tcp": { "metric": [ {"@name": "banner", "@type": "s", "#text": "test"}, {"@name": "test_banner_match", "@type": "s", "#text": "test"}, {"@name": "body_match", "@type": "s", "#text": "test_body_match"}, {"@name": "duration", "@type": "i", "#text": "30"}, {"@name": "tt_body", "@type": "i", "#text": "1"}, {"@name": "tt_connect", "@type": "i", "#text": "2"}, {"@name": "tt_firstbyte", "@type": "i", "#text": "3"} ] }, "http": { "metric": [ { "@name": "cert_end", "@type": "I", "#text": "1471910399" }, { "@name": "truncated", "@type": "I", "#text": "0" }, { "@name": "cert_subject", "@type": "s", "#text": ("\/C=US\/ST=Texas\/L=San Antonio\/O=Rackspace US," " Inc.\/OU=Marketing\/CN=www.rackspace.com") }, { "@name": "cert_start", "@type": "I", "#text": "1415059200" }, { "@name": "cert_issuer", "@type": "s", "#text": ("\/C=US\/O=Symantec Corporation\/OU=Symantec" " Trust Network\/CN=Symantec Class 3 Secure Server CA - G4") }, { "@name": "code", "@type": "s", "#text": "200" }, { "@name": "tt_connect", "@type": "I", "#text": "72" }, { "@name": "cert_end_in", "@type": "i", "#text": "49587360" }, { "@name": "tt_firstbyte", "@type": "I", "#text": "957" }, { "@name": "bytes", "@type": "i", "#text": "44779" }, { "@name": "cert_subject_alternative_names", "@type": "s", "#text": ("wwwp.wip.rackspace.com, ord.wwwp.wip.rackspace.com," " iad.wwwp.wip.rackspace.com, admin.rackspace.com," " iad.wip.rackspace.com, ord.wip.rackspace.com, www.rackspace.com") }, { "@name": "cert_error", "@type": "s", "#text": ("No certificate present., host header " "does not match CN or SANs in certificate") }, { "@name": "duration", "@type": "I", "#text": "957" } ] } }
# genmultiplex.py import threading, Queue from genqueue import * from gencat import * def multiplex(sources): in_q = Queue.Queue() consumers = [] for s in sources: thr = threading.Thread(target=sendto_queue, args=(s,in_q)) thr.start() consumers.append(genfrom_queue(in_q)) return gen_cat(consumers) if __name__ == '__main__': import follow foo_log = follow.follow(open("run/foo/access-log")) bar_log = follow.follow(open("run/bar/access-log")) for line in multiplex([foo_log,bar_log]): print line
from __future__ import print_function from LayerProvider import * import copy class NeuralNet(object): """ Class that stores network & layer :type test_input: 4D tensor :param test_input: Real input which will be used to calculate outputs of each layer :type test_output: 4D tensor :param test_output: Real output we expect to come out at the last layer of the network net_opts: global options such as start learning rate, rng ['rng']: random number generator, used for filter init ['l1_learning_rate']: learning rate for the first 'updatable' layer layer_opts: layer-specific options Specific options like conv filter_shape should be decided when constructing the network [''] content: dict that contains actual layers """ def __init__(self, test_input=None, test_output=None): """ :type input: theano.tensor.tensor4 :param input: input of the CNN, if it is not provided then the network will use a symbolic input instead """ self.net_opts = {} self.layer_opts = {} # Setting default options for net_opts self.net_opts['rng_seed'] = 1231 self.net_opts['rng'] = np.random.RandomState(self.net_opts['rng_seed']) self.net_opts['l1_learning_rate'] = np.asarray(0.001, theano.config.floatX) # Default options for softmax layers self.layer_opts['softmax_norm_dim'] = 1 # Default options for relu layers self.layer_opts['relu_alpha'] = 0.01 # Deafult options for elu layers self.layer_opts['elu_alpha'] = 1 # Default l2 term self.layer_opts['l2_term'] = 0.0005 # Default dropping rate for dropout self.layer_opts['drop_rate'] = 0 # Network name for saving self.name = 'netnet' # Train mode or not, used for layers like DropOut self.mode = theano.shared(1) # The content dictionary will store actual layers (LayerProvider) self.content = {} self.input = [] if (test_input != None): self.input.append(test_input) else: self.input.append(T.tensor4('x', dtype=theano.config.floatX)) self.content['input'] = InputLayer(self.input[0]) self.output = [] self.weight = [] if(test_output != None): self.output.append(test_output) else: self.output.append(T.tensor4('y', dtype=theano.config.floatX)) self.weight.append(T.tensor4('weight', dtype=theano.config.floatX)) self.index = T.lvector('index') self.batch_size = T.scalar('batch_size') # For SGDR self.reset_opts = {} self.reset_opts['min_lr'] = np.asarray(0.0001, dtype=theano.config.floatX) self.reset_opts['max_lr'] = self.net_opts['l1_learning_rate'] self.reset_opts['t_mult'] = 2 self.t_cur = 0 self.t_i = 1 self.train_function = None self.test_function = None self.val_function = None #TODO: Fix this function since it will be stuck if there is no loss (?) def simpleprint(self): """ Print a net """ l = 1 stop_print = False print('input', end = "") while not stop_print: for key, value in self.content.iteritems(): if (hasattr(value, 'topo_order')): if (value.topo_order == l): if (l == len(self.content)): print('', end="") stop_print = True l += 1 print (' -> %s' % key, end="") print('') def InitLR(self, factor): """ Generate varied learning rates for different layers Only layers with .updatable attribute = true are included :type net: NeuralNet.NeuralNet :param net: The network :type factor: float :param factor: next layer's lr = current layer's lr * factor Note that only layer that is 'updatable' count toward this list """ self.net_opts['lr'] = [] for key, value in self.content.iteritems(): if (hasattr(value,'updatable')): if (value.updatable == True): # Note that the same lr applies to all params (W, b) of a layer l_factor = np.asarray(factor**(value.update_order-1),dtype=theano.config.floatX) self.net_opts['lr'] += [theano.shared(self.net_opts['l1_learning_rate'] * l_factor) for i in range(0, len(value.param))] # This won't be affected by LR restart self.const_lr = copy.deepcopy(self.net_opts['lr']) def InitTrainFunction(self, update_rule, real_input, expected_output, additional_output=None, weight=None, additional_output_obj=None): """ Generate the feed forward function for both train processes :type update_rule: List :param update_rule: List of tuples for updating params :type real_input: 4D Tensor or list of 4D Tensor :param real_input: entire training dataset, shaped as a 4D theano shared tensor If the network take in multiple inputs, real_input should be a list of 4D Tensor :type expected_output: 4D tensor :param expected_output: label, for now the label dim should be 1 :type additional_output: list of basestring :param additional_output: tell the function to return additional output at specific layers :type weight: 4D tensor :param weight: sample weight """ self.train_function = None # Check input consistency if (type(real_input) != list): real_input = [real_input] if (type(expected_output) != list): expected_output = [expected_output] assert len(real_input) == len(self.input), "The network require %d inputs, function argument provided %d" % ( len(self.input), len(real_input)) assert len(expected_output) == len(self.output), "The network produce %d output, function argument provided %d" % ( len(self.output), len(expected_output)) # For layers that depends on batch size givens = {} for key, value in self.content.iteritems(): # self.index.shape[0] is batch size if (type(value) == LSTM): givens = { value.h_m1_sym: T.zeros((1, self.index.shape[0], value.W_shape[1]), dtype=theano.config.floatX), value.c_m1_sym: T.zeros((1, self.index.shape[0], value.W_shape[1]), dtype=theano.config.floatX) } if (type(value) == LSTM_Attend): givens = { value.z_m1_sym: T.zeros((1, self.index.shape[0], value.Z_shape[0]), dtype=theano.config.floatX) } function_output = [self.content['cost'].output] if (additional_output != None): function_output += [self.content[l].output for l in additional_output] if (additional_output_obj): if (type(additional_output_obj) != list): additional_output_obj = [additional_output_obj] function_output += additional_output_obj print("DONE function_output") for i in range(len(self.input)): givens.update({ self.input[i]: real_input[i][self.index,:,:,:] }) print("DONE input") for i in range(len(self.output)): givens.update({ self.output[i]: expected_output[i][self.index,:,:,:] }) print("DONE output") if (weight == None): weight = [] for i in range(len(expected_output)): weight += [theano.shared(np.ones_like(expected_output[i].eval()))] elif (type(weight) != list): weight = [weight] for i in range(len(self.weight)): givens.update({ self.weight[i]: weight[i][self.index, :, :, :] }) print("DONE weight") self.train_function = theano.function( [self.index], outputs=function_output + self.output, updates=update_rule, givens=givens ) def InitTestFunction(self, test_input, test_output): """ Generate the feed forward function for both test/validation processes It's simply a train_function without the updating part :type update_rule: List :param update_rule: List of tuples for updating params :type test_input: 4D Tensor or list of 4D Tensor :param test_input: entire testing dataset, shaped as a 4D theano shared tensor If the network take in multiple inputs, test_input should be a list of 4D Tensor :type test_output: 4D tensor :param test_output: label, for now the label dim should be 1 """ # Check input consistency if (type(test_input) != list): test_input = [test_input] if (type(test_output) != list): test_output = [test_output] assert len(test_input) == len(self.input), "The network require %d inputs, function argument provided %d" % ( len(self.input), len(test_input)) assert len(test_output) == len( self.output), "The network produce %d output, function argument provided %d" % ( len(self.output), len(test_output)) # Special inputs that depend on batch size, therefore we need them to be given before training/testing givens = {} for key, value in self.content.iteritems(): # self.index.shape[0] is batch size if (type(value) == LSTM): givens = { value.h_m1_sym: T.zeros((1, self.index.shape[0], value.W_shape[1]), dtype=theano.config.floatX), value.c_m1_sym: T.zeros((1, self.index.shape[0], value.W_shape[1]), dtype=theano.config.floatX) } for i in range(len(self.input)): givens.update({ self.input[i]: test_input[i][self.index,:,:,:] }) for i in range(len(self.output)): givens.update({ self.output[i]: test_output[i][self.index,:,:,:] }) self.test_function = theano.function( [self.index], outputs=self.content['cost'].output, givens=givens ) # Duplicated codes def InitValFunction(self, val_input, val_output, additional_output=None, weight=None, additional_output_obj=None): """ Generate the feed forward function for both test/validation processes It's simply a train_function without the updating part :type update_rule: List :param update_rule: List of tuples for updating params :type val_input: 4D Tensor or list of 4D Tensor :param val_input: entire validation dataset, shaped as a 4D theano shared tensor. If the network take in multiple inputs, test_input should be a list of 4D Tensor :type val_output: 4D tensor :param val_output: label, for now the label dim should be 1 :type additional_output: list of basestring :param additional_output: tell the function to return additional output at specific layers :type weight: 4D tensor :param weight: sample weight """ # Check input consistency if (type(val_input) != list): val_input = [val_input] if (type(val_output) != list): val_output = [val_output] assert len(val_input) == len(self.input), "The network require %d inputs, function argument provided %d" % ( len(self.input), len(val_input)) assert len(val_output) == len( self.output), "The network produce %d output, function argument provided %d" % ( len(self.output), len(val_output)) # Special inputs that depend on batch size, therefore we need them to be given before training/testing givens = {} for key, value in self.content.iteritems(): # self.index.shape[0] is batch size if (type(value) == LSTM): givens = { value.h_m1_sym: T.zeros((1, self.index.shape[0], value.W_shape[1]), dtype=theano.config.floatX), value.c_m1_sym: T.zeros((1, self.index.shape[0], value.W_shape[1]), dtype=theano.config.floatX) } if (type(value) == LSTM_Attend): givens = { value.z_m1_sym: T.zeros((1, self.index.shape[0], value.Z_shape[0]), dtype=theano.config.floatX) } for i in range(len(self.input)): givens.update({ self.input[i]: val_input[i][self.index,:,:,:] }) for i in range(len(self.output)): givens.update({ self.output[i]: val_output[i][self.index,:,:,:] }) function_output = [self.content['cost'].output] if (additional_output != None): function_output += [self.content[l].output for l in additional_output] if (additional_output_obj): if (type(additional_output_obj) != list): additional_output_obj = [additional_output_obj] function_output += additional_output_obj if (weight == None): weight = [] for i in range(len(val_output)): weight += [theano.shared(np.ones_like(val_output[i].eval()))] elif (type(weight) != list): weight = [weight] for i in range(len(self.weight)): givens.update({ self.weight[i]: weight[i][self.index, :, :, :] }) self.val_function = theano.function( [self.index], outputs=function_output + [val_output[i][self.index,:,:,:] for i in range(len(self.output))], givens=givens ) def FirstParamIndex(self): """ Return the index of the param of the first layer in the network This need to be done because while creating params using dictionary, the order of the params are not kept accordingly to the network TODO: Find a better way to handle this This is just plain stupid """ dict_index = 0 for key, value in self.content.iteritems(): if (hasattr(value,'updatable')): if (value.updatable == True): # The first updatable layer if (value.update_order == 1): return dict_index dict_index += len(value.param) return -1 #TODO: These are still fairly simplistic, if a network has branches then it would not work def GetOutputShape(self, input_shape): """ Get output shape up until the last layer of the network given input_shape :type input_shape: tuple :param input_shape: shape of the input """ topo_index = 1 output_shape = list(input_shape) while True: for key, value in self.content.iteritems(): if (value.topo_order == topo_index): if (type(value) == ConvLayer): w_factor = 0 h_factor = 0 if (type(value.border_mode) == int): h_factor = value.border_mode w_factor = value.border_mode if (type(value.border_mode) == tuple): h_factor = value.border_mode[0] w_factor = value.border_mode[1] output_shape[1] = value.filter_shape[0] output_shape[2] = output_shape[2] - value.filter_shape[2] + 1 + h_factor*2 output_shape[3] = output_shape[3] - value.filter_shape[3] + 1 + w_factor*2 if (type(value) == Pool2DLayer): output_shape[2] = np.int64( np.ceil( float(output_shape[2])/float(value.pool_size[0]) ) ) output_shape[3] = np.int64( np.ceil( float(output_shape[3])/float(value.pool_size[1]) ) ) breakpoint = 1 if (type(value) == FCLayer): output_shape[1] = value.layer_shape[1] output_shape[2] = 1 output_shape[3] = 1 topo_index += 1 if (topo_index > len(self.content)): return tuple(output_shape) def NNCopy(self): """ Copy src Neural Network to a dest Neural network without the train_function and val_function This is to save memory when we have to save a network/load a pretrained network """ dest = ShowTellNet() dest.input = copy.copy(self.input) dest.index = copy.copy(self.index) dest.output = copy.copy(self.output) dest.content = copy.copy(self.content) dest.net_opts = copy.copy(self.net_opts) dest.layer_opts = copy.copy(self.layer_opts) dest.weight = copy.copy(self.weight) dest.reset_opts = copy.copy(self.reset_opts) dest.train_function = None dest.val_function = None dest.test_function = None dest.name = copy.copy(self.name) return dest class ConvNeuralNet(NeuralNet): """ Class that stores network & layer :type test_input: 4D tensor :param test_input: Real input which will be used to calculate outputs of each layer :type test_output: 4D tensor :param test_output: Real output we expect to come out at the last layer of the network net_opts: global options such as start learning rate, rng ['rng']: random number generator, used for filter init ['l1_learning_rate']: learning rate for the first 'updatable' layer layer_opts: layer-specific options Specific options like conv filter_shape should be decided when constructing the network [''] content: dict that contains actual layers """ def __init__(self, test_input=None, test_output=None): """ :type input: theano.tensor.tensor4 :param input: input of the CNN, if it is not provided then the network will use a symbolic input instead """ NeuralNet.__init__(self, test_input, test_output) # Setting default options for layer_opts # Default options for conv layers self.layer_opts['border_mode'] = 'valid' self.layer_opts['conv_stride'] = (1,1) self.layer_opts['updatable'] = True # Default options for pooling layers self.layer_opts['pool_stride'] = (2,2) self.layer_opts['pool_padding'] = (0,0) self.layer_opts['pool_mode'] = 'max' self.layer_opts['pool_size'] = (2,2) self.layer_opts['ignore_border'] = False # Network name for saving self.name = 'convnetnet' self.index = T.lvector('index') # Related functions self.train_function = None self.val_function = None class ShowTellNet(ConvNeuralNet): """ Class that stores network & layer. This Network was modified so that it can take two types of inputs (images and sentences) :type test_input_image: 4D tensor :param test_input_image: Real input which will be used to calculate outputs of each layer :type test_input_sentence: 4D tensory :param test_input_sentence: Sentences :type test_output: 4D tensor :param test_output: Real output we expect to come out at the last layer of the network net_opts: global options such as start learning rate, rng ['rng']: random number generator, used for filter init ['l1_learning_rate']: learning rate for the first 'updatable' layer layer_opts: layer-specific options Specific options like conv filter_shape should be decided when constructing the network [''] content: dict that contains actual layers """ def __init__(self, test_input_image=None, test_input_sentence=None, test_output=None): ConvNeuralNet.__init__(self, test_input_image, test_output) # Clear input self.input = [] if (test_input_image != None): self.input.append(test_input_image) else: self.input.append(T.tensor4('x_img', dtype=theano.config.floatX)) self.content['input_img'] = InputLayer(self.input[0]) if (test_input_sentence != None): self.input.append(test_input_sentence) else: self.input.append(T.tensor4('x_sen', dtype=theano.config.floatX)) self.content['input_sen'] = InputLayer(self.input[1]) # Set word embedding params self.layer_opts['num_emb'] = 350 # Set LSTM params self.layer_opts['num_lstm_node'] = 128 #Set LSTM Attend params self.layer_opts['num_region'] = 196 self.layer_opts['num_dimension_feature'] = 512 self.layer_opts['context_dim'] = 300 self.layer_opts["num_hidden_node"] = 1024 self.layer_opts["num_deep_out_node"] = 1024 self.layer_opts["n_word"] = 2000 #class ShowAttendTellNet(ConvNeuralNet): """ class that stores network and layer. This network was modified so that it can take three types of inputs (images and sentences) """
# # @lc app=leetcode.cn id=39 lang=python3 # # [39] 组合总和 # # @lc code=start class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: candidates.sort() res = [] def backtrack(candidates, track, track_sum, target): if track_sum == target: res.append(track[:]) return elif track_sum > target: return for i in range(len(candidates)): track.append(candidates[i]) track_sum += candidates[i] backtrack(candidates[i:], track, track_sum, target) track.pop() track_sum -= candidates[i] backtrack(candidates, [], 0, target) return res # @lc code=end
import sys if sys.version_info[0] < 3: raise Exception("Python 3 not detected.") import numpy as np import matplotlib.pyplot as plt from sklearn import svm from scipy import io from sklearn.metrics import accuracy_score from save_csv import results_to_csv for data_name in ["spam"]: data = io.loadmat("data/%s_data.mat" % data_name) print("\nloaded %s data!" % data_name) fields = "test_data", "training_data", "training_labels" for field in fields: print(field, data[field].shape) def permute_dictionaries(training_data, training_labels): #takes two dictionaries and permutes both while keeping consistency perm = np.random.RandomState(seed=100).permutation(training_data.shape[0]) return (training_data[perm], training_labels[perm]) spam_total_data = io.loadmat("data/%s_data.mat" % "spam") spam_training_data = spam_total_data["training_data"] spam_training_data_labels = spam_total_data["training_labels"] spam_training_data, spam_training_data_labels = permute_dictionaries(spam_training_data, spam_training_data_labels) spam_test_data = spam_total_data["test_data"] print("train") print(spam_training_data) print("test") print(spam_test_data) print("spam_training_data", spam_training_data.shape) print("spam_training_data_labels", spam_training_data_labels.shape) print("spam_test_data", spam_test_data.shape) def problem6(training_data, training_data_labels, test_data, C_Value = 0): classifier = svm.LinearSVC(random_state = 40, C = 10 ** C_Value) classifier.fit(training_data, np.ravel(training_data_labels)) predict_training_results = classifier.predict(training_data) print(accuracy_score(np.ravel(training_data_labels), np.ravel(predict_training_results))) predict_test_results = classifier.predict(test_data) results_to_csv(predict_test_results) problem6(spam_training_data, spam_training_data_labels, spam_test_data, 1)
## gfal 2.0 tools core logic of copy ## @author Adrien Devresse <adevress@cern.ch> CERN ## @license GPLv3 ## import gfal2 import sys from gfal2_utils_arg_parser import * from gfal2_utils_parameters import applys_option from gfal2_utils_verbose import set_verbose_mode from gfal2_utils_errors import gfal_catch_gerror def print_file_list_simple(fdesc, f_list): [ fdesc.write("%s\n")%file_name for file_name in f_list ] def filter_hiden(f_list): def filter_f_list(mfile): return (mfile[0] is not '.') return filter(filter_f_list, f_list) def setup_verbose_from_opt(params): vlvl = params.verbose if(vlvl > 0): print "verbose mode" def create_gfal_cp_parser(): p = create_basic_parser() p.add_argument('-f',"--force", action='store_true', help="if destionation file(s) cannot be overwritten, delete it and try again") p.add_argument('-n',"--nbstreams", nargs=1, type=int, default=0, help="specify the maximum number of parallel streams to use for the copy") p.add_argument('-s',"--src-spacetoken", nargs=1, type=str, default="", help="source spacetoken to use for the transfer") p.add_argument('-S',"--dst-spacetoken", nargs=1, type=str, default="", help="destination spacetoken to use for the transfer") p.add_argument('-t',"--timeout", nargs=1, type=int, default=0, help="global timeout for the transfer operation") p.add_argument( 'SRC', nargs=1, type=str, help="source file") p.add_argument( 'DST', nargs=1, type=str, help="destination file") return p def apply_transfer_options(t,params): if(params.nbstreams != 0): t.nbstreams = params.nbstreams[0] if(params.timeout !=0): t.timeout = params.timeout[0] if(len(params.src_spacetoken) > 0): t.src_spacetoken = params.src_spacetoken[0] if(len(params.dst_spacetoken) > 0): t.dst_spacetoken = params.dst_spacetoken[0] @gfal_catch_gerror def gfal_copy_main(): params = create_gfal_cp_parser().parse_args(sys.argv[1:]) set_verbose_mode(params) c = gfal2.creat_context() applys_option(c,params) t = c.transfer_parameters() apply_transfer_options(t,params) c.filecopy(t, params.SRC[0], params.DST[0]) return 0
#!/usr/bin/env python # coding=utf-8 import argparse import os parser = argparse.ArgumentParser() parser.add_argument("--mnist_dataset_dir",help="the dataset of mnist",default="/home/dataset/mnist/images/") #parser.add_argument("--label_name_txt",help="the name of cifar10 label",default="./label_name.txt") args = parser.parse_args() #1 #label_name_dic = {} #f = open(args.label_name_txt,"r") #for line in f: # strings = line.split(" ") # strings[1] = strings[1][:-1] # print strings[1] # label_name_dic[strings[0]] = strings[1] #f.close() #print label_name_dic #2 train train_lines = [] train_dir = os.path.join(args.mnist_dataset_dir,"train") dirlist1 = os.listdir(train_dir) for dir1 in dirlist1: path = os.path.join(train_dir,dir1) dirlist2 = os.listdir(path) for dir2 in dirlist2: img_dir = train_dir + "/" + dir1 + "/" + dir2 line = img_dir + " " + dir1 +"\n" train_lines.append(line) train_fw = open("./train.txt","w") train_fw.writelines(train_lines) train_fw.close() #3 test test_lines = [] test_dir = os.path.join(args.mnist_dataset_dir,"test") dirlist1 = os.listdir(test_dir) for dir1 in dirlist1: path = os.path.join(test_dir,dir1) dirlist2 = os.listdir(path) for dir2 in dirlist2: img_dir = test_dir + "/" + dir1 + "/" + dir2 line = img_dir + " " + dir1 + "\n" test_lines.append(line) test_fw = open("./test.txt","w") test_fw.writelines(test_lines) test_fw.close() print "success to write file"
def day9_part1(numbers): index = 25 while index < len(numbers): subarray = numbers[index - 25:index] found_pairs = False for i in subarray: for i2 in subarray: if i + i2 == numbers[index]: found_pairs = True if not found_pairs: print ("Part A solution: " + str(numbers[index])) return (numbers[index]) index += 1 return "Part A: No weakness found" def day9_part2(array, target): length = len(array) for i in range(length): current_sum = array[i] i2 = i + 1 while i2 <= length: if current_sum == target: return "Part B solution: " + str(max(array[i:i2]) + min(array[i:i2])) elif i2 == length or current_sum > target: break current_sum = current_sum + array[i2] i2 += 1 return "Part B: No subarray found" if __name__ == '__main__': file = open("day9.txt", "r") numbers = [] for line in file: numbers.append(int(line.strip("\n"))) weakness = (day9_part1(numbers)) print(day9_part2(numbers, weakness))
from django.shortcuts import render, redirect from .models import Comment from django.contrib.auth.decorators import login_required from django.utils import timezone from confession.models import Confession # Create your views here. def view(request, cf_id): comments = Comment.objects.filter(confession=cf_id) return render(request, 'comment/view.html', {"comments":comments, "confessionid":cf_id}) @login_required def create(request, cf_id): if request.method == 'POST': if request.POST['text']: newconf = Comment() newconf.text = request.POST['text'] newconf.pub_date = timezone.datetime.now() newconf.author = request.user newconf.confession = Confession.objects.get(pk=cf_id) newconf.save() return redirect('viewcomment', cf_id) else: return render(request, 'comment/create.html', {'error': 'Please fill all the fields'}) else: return render(request, 'comment/create.html', {"confessionid":cf_id})
import urllib import urllib.request from bs4 import BeautifulSoup import sqlite3 import MySQLdb import csv url = "https://www.indeed.co.in/jobs?q=software+developer&l=Chennai%2C+Tamil+Nadu" page = urllib.request.urlopen(url) soup = BeautifulSoup(page,"html.parser") #print(soup.prettify()) '''for link in soup.findAll('div', {"class":"jobsearch-SerpJobCard unifiedRow row result clickcard"}): print(link.find('a'))'''
import requests import random import time import threading # ADD YOUR DISCORD WEBHOOK HERE # CHANGE CUSTOM MONITOR DELAY IF NOT USING PROXIES THEN MAKE HIGHER DELAY WEBHOOK = '' MONITOR_DELAY = 5 ######################################################################################################################## # DO NOT CHANGE ANYTHING BELOW HERE ######################################################################################################################## WEBHOOK = '{}/slack'.format(WEBHOOK) PRODUCTS = { 'Dark Blue': { 'url':'https://www.selfridges.com/US/en/cat/evian-x-virgil-abloh-evian-x-virgil-abloh-x-soma-glass-water-bottle-500ml_343-3001612-147609/', 'image':'https://images.selfridges.com/is/image/selfridges/343-3001612-147609_DARKBLUE_ALT10?$PDP_M_ZOOM$&defaultImage=343-3001612-147609_DARKBLUE_M'}, 'Purple': { 'url':'https://www.selfridges.com/US/en/cat/evian-x-virgil-abloh-evian-x-virgil-abloh-x-soma-glass-water-bottle-500ml_343-3001612-147608/', 'image':'https://images.selfridges.com/is/image/selfridges/343-3001612-147608_PURPLE_ALT10?$PDP_M_ZOOM$&defaultImage=343-3001612-147608_PURPLE_M'}, 'Pink': { 'url':'https://www.selfridges.com/US/en/cat/evian-x-virgil-abloh-evian-x-virgil-abloh-x-soma-glass-water-bottle-500ml_343-3001612-147416/', 'image':'https://images.selfridges.com/is/image/selfridges/343-3001612-147416_PINK_ALT10?$PDP_M_ZOOM$&defaultImage=343-3001612-147416_PINK_M'}} try: PROXIES = open('proxies.txt').readlines() if len(PROXIES) == 0: PROXIES = False except: PROXIES = False class SELFRIDGES(threading.Thread): def __init__(self, PRODUCT): threading.Thread.__init__(self) self.s = requests.session() self.s.headers = { 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9',} self.s.headers.update(self.s.headers) self.colour = PRODUCT self.url = PRODUCTS[self.colour]['url'] self.stock = False def send(self): data = { 'attachments': [{ 'title': 'EVIAN X VIRGIL ABLOH', 'title_link': self.url, 'fields': [{'title':'Colour', 'value':self.colour, 'short':True}], 'thumb_url': PRODUCTS[self.colour]['image'], 'footer': '@KinnyBot | Selfridges x Off-White Monitor | {}'.format(time.strftime('%X')), }] } resp = requests.post(WEBHOOK, json=data, headers={'Content-Type':'application/json'}) def get_proxy(self): proxy = random.choice(PROXIES).rstrip() proxy_dict = { 'http': 'http://' + proxy, 'https': 'https://' + proxy} self.s.proxies.update(proxy_dict) def run(self): while True: if PROXIES: self.get_proxy() try: resp = self.s.get(self.url, timeout=5) if resp.status_code == 200: if 'out of stock' in resp.text.lower(): stock = False else: stock = True if self.stock != stock and stock == True: self.send() self.stock = stock except: pass time.sleep(MONITOR_DELAY) if __name__ == '__main__': for PRODUCT in PRODUCTS: thread = SELFRIDGES(PRODUCT) thread.start()
# # Python module for SPIDIR library # # # Note, this module requires the rasmus and compbio python modules. # import os import sys from math import * from ctypes import * from spidir.ctypes_export import * # import spidir C lib spidir = load_library(["..", "..", "lib"], "libspidir.so") # add pre-bundled dependencies to the python path, # if they are not available already try: import rasmus, compbio except ImportError: from . import dep dep.load_deps() import rasmus, compbio #============================================================================= # wrap functions from c library ex = Exporter(globals()) export = ex.export # typedefs c_floatlk = c_double c_floatlk_p = c_double_p if spidir: # common functions export(spidir, "gamm", c_double, [c_double, "a"]) export(spidir, "invgammaPdf", c_double, [c_double, "x", c_double, "a", c_double, "b"]) #export(spidir, "invgammaCdf", c_double, # [c_double, "x", c_double, "a", c_double, "b"]) #export(spidir, "quantInvgamma", c_double, # [c_double, "p", c_double, "a", c_double, "b"]) export(spidir, "gammalog", c_double, [c_double, "x", c_double, "a", c_double, "b"]) export(spidir, "gammaPdf", c_double, [c_double, "x", c_double, "a", c_double, "b"]) export(spidir, "gammaDerivX", c_double, [c_double, "x", c_double, "a", c_double, "b"]) export(spidir, "gammaDerivA", c_double, [c_double, "x", c_double, "a", c_double, "b"]) export(spidir, "gammaDerivB", c_double, [c_double, "x", c_double, "a", c_double, "b"]) export(spidir, "gammaDerivV", c_double, [c_double, "x", c_double, "v"]) export(spidir, "gammaDerivV2", c_double, [c_double, "x", c_double, "v"]) export(spidir, "gammaSumPdf", c_double, [c_double, "y", c_int, "n", c_float_list, "alpha", c_float_list, "beta", c_float, "tol"]) #export(spidir, "negbinomPdf", c_double, # [c_int, "k", c_double, "r", c_double, "p"]) #export(spidir, "negbinomDerivR", c_double, # [c_int, "k", c_double, "r", c_double, "p"]) #export(spidir, "negbinomDerivP", c_double, # [c_int, "k", c_double, "r", c_double, "p"]) #export(spidir, "incompleteGammaC", c_double, # [c_double, "s", c_double, "x"]) # basic tree functions export(spidir, "deleteTree", c_int, [c_void_p, "tree"]) export(spidir, "makeTree", c_void_p, [c_int, "nnodes", c_int_p, "ptree"]) export(spidir, "tree2ptree", c_int, [c_void_p, "tree", c_int_list, "ptree"], newname="ctree2ptree") export(spidir, "setTreeDists", c_void_p, [c_void_p, "tree", c_float_p, "dists"]) # search export(spidir, "searchClimb", c_void_p, [c_int, "niter", c_int, "quickiter", c_int, "nseqs", c_char_p_p, "gene_names", c_char_p_p, "seqs", c_int, "nsnodes", c_int_list, "pstree", c_float_list, "sdists", c_int_list, "gene2species", c_float_list, "sp_alpha", c_float_list, "sp_beta", c_float, "gene_rate", c_float, "pretime_lambda", c_float, "birth", c_float, "death", c_float, "gene_alpha", c_float, "gene_beta", c_float_list, "bgfreq", c_float, "kappa", c_int, "nsamples", c_int, "approx"]) # topology prior birthdeath functions export(spidir, "inumHistories", c_int, [c_int, "nleaves"]) export(spidir, "numHistories", c_double, [c_int, "nleaves"]) #export(spidir, "numTopologyHistories", c_double, [c_void_p, "tree"]) export(spidir, "birthDeathCount", c_double, [c_int, "ngenes", c_float, "time", c_float, "birth", c_float, "death"]) export(spidir, "birthDeathCounts", c_double, [c_int, "start", c_int, "end", c_float, "time", c_float, "birth", c_float, "death"]) export(spidir, "birthDeathCounts", c_double, [c_int, "start", c_int, "end", c_float, "time", c_float, "birth", c_float, "death"]) export(spidir, "birthDeathCountsSlow", c_double, [c_int, "start", c_int, "end", c_float, "time", c_float, "birth", c_float, "death"]) # birth death tree counts export(spidir, "birthDeathTreeCounts", c_double, [c_void_p, "tree", c_int, "nspecies", c_int_list, "counts", c_float, "birth", c_float, "death", c_int, "maxgene", c_int, "rootgene", c_void_p, "tab"]) export(spidir, "birthDeathForestCounts", c_double, [c_void_p, "tree", c_int, "nspecies", c_int, "nfams", c_int_matrix, "counts", c_int_list, "mult", c_float, "birth", c_float, "death", c_int, "maxgene", c_int, "rootgene", c_void_p, "tab"]) export(spidir, "birthDeathCountsML_alloc", c_void_p, [c_void_p, "tree", c_int, "nspecies", c_int, "nfams", c_int_matrix, "counts", c_int_list, "mult", c_float, "birth", c_float, "death", c_float, "step", c_int, "maxgene", c_int, "rootgene"]) export(spidir, "birthDeathCountsML_free", c_void_p, [c_void_p, "opt"]) export(spidir, "birthDeathCountsML_iter", c_int, [c_void_p, "opt", c_float_list, "birth", c_float_list, "death", c_float_list, "size"]) # tree topology export(spidir, "calcDoomTable", c_int, [c_void_p, "tree", c_float, "birth", c_float, "death", c_double_p, "doomtable"]) export(spidir, "birthDeathTreePrior", c_double, [c_void_p, "tree", c_void_p, "stree", c_int_p, "recon", c_int_p, "events", c_float, "birth", c_float, "death", c_double_p, "doomtable"]) export(spidir, "birthDeathTreePriorFull", c_double, [c_void_p, "tree", c_void_p, "stree", c_int_p, "recon", c_int_p, "events", c_float, "birth", c_float, "death", c_double_p, "doomtable"]) export(spidir, "sampleBirthWaitTime", c_double, [c_int, "n", c_float, "T", c_float, "birth", c_float, "death"]) export(spidir, "birthWaitTime", c_double, [c_float, "t", c_int, "n", c_float, "T", c_float, "birth", c_float, "death"]) export(spidir, "probNoBirth", c_double, [c_int, "n", c_float, "T", c_float, "birth", c_float, "death"]) export(spidir, "sampleBirthWaitTime1", c_double, [c_float, "T", c_float, "birth", c_float, "death"]) # branch prior functions export(spidir, "branchPrior", c_double, [c_int, "nnodes", c_int_list, "ptree", c_float_list, "dists", c_int, "nsnodes", c_int_list, "pstree", c_float_list, "sdists", c_int_list, "recon", c_int_list, "events", c_float_list, "sp_alpha", c_float_list, "sp_beta", c_float, "generate", c_float, "pretime_lambda", c_float, "dupprob", c_float, "lossprob", c_float, "gene_alpha", c_float, "gene_beta", c_int, "nsamples", c_int, "approx"]) # parsimony export(spidir, "parsimony", c_void_p, [c_int, "nnodes", c_int, "ptree", c_int, "nseqs", c_char_p_p, "seqs", c_float, "dists", c_int, "buildAncestral", c_char_p_p, "ancetralSeqs"]) # sequence likelihood functions export(spidir, "makeHkyMatrix", c_void_p, [c_float_p, "bgfreq", c_float, "kappa", c_float, "time", c_float_p, "matrix"]) export(spidir, "makeHkyDerivMatrix", c_void_p, [c_float_p, "bgfreq", c_float, "kappa", c_float, "time", c_float_p, "matrix"]) export(spidir, "makeHkyDeriv2Matrix", c_void_p, [c_float_p, "bgfreq", c_float, "kappa", c_float, "time", c_float_p, "matrix"]) export(spidir, "branchLikelihoodHky", c_floatlk, [c_floatlk_p, "probs1", c_floatlk_p, "probs2", c_int, "seqlen", c_float_p, "bgfreq", c_float, "kappa", c_float, "time"]) export(spidir, "branchLikelihoodHkyDeriv", c_floatlk, [c_floatlk_p, "probs1", c_floatlk_p, "probs2", c_int, "seqlen", c_float_p, "bgfreq", c_float, "kappa", c_float, "time"]) export(spidir, "branchLikelihoodHkyDeriv2", c_floatlk, [c_floatlk_p, "probs1", c_floatlk_p, "probs2", c_int, "seqlen", c_float_p, "bgfreq", c_float, "kappa", c_float, "time"]) export(spidir, "mleDistanceHky", c_float, [c_floatlk_p, "probs1", c_floatlk_p, "probs2", c_int, "seqlen", c_float_p, "bgfreq", c_float, "kappa", c_float, "t0", c_float, "t1"]) export(spidir, "calcSeqProbHky", c_floatlk, [c_void_p, "tree", c_int, "nseqs", c_char_p_p, "seqs", c_float_p, "bgfreq", c_float, "kappa"]) export(spidir, "findMLBranchLengthsHky", c_floatlk, [c_int, "nnodes", c_int_p, "ptree", c_int, "nseqs", c_char_p_p, "seqs", c_float_p, "dists", c_float_p, "bgfreq", c_float, "kappa", c_int, "maxiter", c_int, "parsinit"]) # training functions export(spidir, "train", c_void_p, [c_int, "ntrees", c_int, "nspecies", c_int_list, "gene_sizes", c_float_matrix, "lengths", c_float_list, "times", c_float_list, "sp_alpha", c_float_list, "sp_beta", c_float_list, "gene_alpha", c_float_list, "gene_beta", c_int, "nrates", c_int, "max_iter"]) export(spidir, "allocRatesEM", c_void_p, [c_int, "ntrees", c_int, "nspecies", c_int, "nrates", c_int_p, "gene_sizes", c_float_p_p, "lengths", c_float_p, "times", c_float_p, "sp_alpha", c_float_p, "sp_beta", c_float, "gene_alpha", c_float, "gene_beta"]) export(spidir, "freeRatesEM", c_void_p, [c_void_p, "em"]) export(spidir, "RatesEM_Init", c_void_p, [c_void_p, "em"]) export(spidir, "RatesEM_EStep", c_void_p, [c_void_p, "em"]) export(spidir, "RatesEM_MStep", c_void_p, [c_void_p, "em"]) export(spidir, "RatesEM_likelihood", c_float, [c_void_p, "em"]) export(spidir, "RatesEM_getParams", c_void_p, [c_void_p, "em", c_float_p, "params"]) #============================================================================= # additional python interface def read_params(filename): """Read SPIDIR model parameters to a file""" infile = file(filename) params = {} for line in infile: tokens = line.split("\t") key = tokens[0] values = tokens[1:] if key[0].isdigit(): key = int(key) params[key] = map(float, values) return params def write_params(filename, params): """Write SPIDIR model parameters to a file""" out = open(filename, "w") for key, value in params.iteritems(): out.write("\t".join(map(str, [key] + value)) + "\n") out.close() def make_ptree(tree): """Make parent tree array from tree""" nodes = [] nodelookup = {} ptree = [] def walk(node): for child in node.children: walk(child) nodes.append(node) walk(tree.root) def leafsort(a, b): if a.is_leaf(): if b.is_leaf(): return 0 else: return -1 else: if b.is_leaf(): return 1 else: return 0 # bring leaves to front nodes.sort(cmp=leafsort) nodelookup = {} for i, n in enumerate(nodes): nodelookup[n] = i for node in nodes: if node == tree.root: ptree.append(-1) else: ptree.append(nodelookup[node.parent]) assert nodes[-1] == tree.root return ptree, nodes, nodelookup def ptree2tree(ptree, genes): """Create a Tree object from a ptree array""" from rasmus.treelib import Tree, TreeNode tree = Tree() nodes = [TreeNode(gene) for gene in genes] + \ [TreeNode(i) for i in xrange(len(genes)-1)] for i, p in enumerate(ptree): if p == -1: tree.root = nodes[i] else: tree.add_child(nodes[p], nodes[i]) return tree def ptree2ctree(ptree): """Makes a c++ Tree from a parent array""" pint = c_int * len(ptree) tree = makeTree(len(ptree), pint(* ptree)) return tree def tree2ctree(tree): """Make a c++ Tree from a treelib.Tree data structure""" ptree, nodes, nodelookup = make_ptree(tree) dists = [x.dist for x in nodes] ctree = ptree2ctree(ptree) setTreeDists(ctree, c_list(c_float, dists)) return ctree def ctree2tree(ctree, genes): """Makes a treelib.Tree from a c++ Tree""" nnodes = 2*len(genes) - 1 ptree = [0] * nnodes ctree2ptree(ctree, ptree) return ptree2tree(ptree, genes) def make_gene2species_array(genes, stree, snodelookup, gene2species): """Make a gene2species array""" gene2speciesarray = [] for g in genes: gene2speciesarray.append(snodelookup[stree.nodes[gene2species(g)]]) for i in xrange(len(genes)-1): gene2speciesarray.append(-1) return gene2speciesarray def make_recon_array(tree, recon, nodes, snodelookup): """Make a reconciliation array from recon dict""" recon2 = [] for node in nodes: recon2.append(snodelookup[recon[node]]) return recon2 def make_events_array(nodes, events): """Make events array from events dict""" mapping = {"gene": 0, "spec": 1, "dup": 2} return [mapping[events[i]] for i in nodes] def search_climb(genes, align, stree, gene2species, params, birth, death, pretime, bgfreq=[.25,.25,.25,.25], kappa=1.0, niter=50, quickiter=100, nsamples=100, branch_approx=True): """Search for a MAP gene tree""" nseqs = len(align) calign = c_list(c_char_p, align) gene_names = c_list(c_char_p, genes) pstree, snodes, snodelookup = make_ptree(stree) nsnodes = len(snodes) sdists = [x.dist for x in snodes] smap = make_gene2species_array(genes, stree, snodelookup, gene2species) sp_alpha = [params[x.name][0] for x in snodes] sp_beta = [params[x.name][1] for x in snodes] gene_alpha, gene_beta = params["baserate"] gene_rate = -1 ctree = searchClimb(niter, quickiter, nseqs, gene_names, calign, nsnodes, pstree, sdists, smap, sp_alpha, sp_beta, gene_rate, pretime, birth, death, gene_alpha, gene_beta, bgfreq, kappa, nsamples, branch_approx) nnodes = 2 * nseqs - 1 tree = ctree2tree(ctree, genes) deleteTree(ctree) return tree def calc_joint_prob(align, tree, stree, recon, events, params, birth, death, pretime, bgfreq, kappa, nsamples=100, branch_approx=True, terms=False): """Calculate the joint probability of a gene tree""" branchp = branch_prior(tree, stree, recon, events, params, birth, death, pretime, nsamples, approx=branch_approx) topp = calc_birth_death_prior(tree, stree, recon, birth, death, events=events) seqlk = calc_seq_likelihood_hky(tree, align, bgfreq, kappa) if terms: return branchp, topp, seqlk else: return branchp + topp + seqlk #============================================================================= # topology prior def calc_birth_death_prior(tree, stree, recon, birth, death, events=None): """Returns the topology prior of a gene tree""" from rasmus.bio import phylo if events is None: events = phylo.label_events(tree, recon) ptree, nodes, nodelookup = make_ptree(tree) pstree, snodes, snodelookup = make_ptree(stree) ctree = tree2ctree(tree) cstree = tree2ctree(stree) recon2 = make_recon_array(tree, recon, nodes, snodelookup) events2 = make_events_array(nodes, events) doomtable = c_list(c_double, [0] * len(stree.nodes)) calcDoomTable(cstree, birth, death, doomtable) p = birthDeathTreePriorFull(ctree, cstree, c_list(c_int, recon2), c_list(c_int, events2), birth, death, doomtable) deleteTree(ctree) deleteTree(cstree) return p def birth_death_tree_counts(stree, counts, birth, death, maxgene=50, rootgene=1): if birth == death: birth = 1.01 * death ctree = tree2ctree(stree) nspecies = len(counts) prob = birthDeathTreeCounts(ctree, nspecies, counts, birth, death, maxgene, rootgene, 0) deleteTree(ctree) return prob def birth_death_forest_counts(stree, counts, birth, death, maxgene=50, rootgene=1, mult=None): if birth == death: birth = 1.01 * death if mult is None: hist = {} for row in counts: row = tuple(row) hist[row] = hist.get(row, 0) + 1 counts, mult = zip(*hist.items()) counts = map(list, counts) mult = list(mult) ctree = tree2ctree(stree) nfams = len(counts) nspecies = len(counts[0]) logl = birthDeathForestCounts(ctree, nspecies, nfams, counts, mult, birth, death, maxgene, rootgene, 0) deleteTree(ctree) return logl def birth_death_counts_ml_alloc(stree, counts, birth0, death0, step, maxgene=50, rootgene=1, mult=None): if birth0 == death0: birth0 = 1.01 * death0 if mult is None: hist = {} for row in counts: row = tuple(row) hist[row] = hist.get(row, 0) + 1 counts, mult = zip(*hist.items()) counts = map(list, counts) mult = list(mult) ctree = tree2ctree(stree) nfams = len(counts) nspecies = len(counts[0]) opt = birthDeathCountsML_alloc(ctree, nspecies, nfams, counts, mult, birth0, death0, step, maxgene, rootgene) return (ctree, opt) def birth_death_counts_ml_free(opt): birthDeathCountsML_free(opt[1]) deleteTree(opt[0]) def birth_death_counts_ml_iter(opt): birth = [0.0] death = [0.0] size = [0.0] status = birthDeathCountsML_iter(opt[1], birth, death, size) return status, size[0], (birth[0], death[0]) #============================================================================= # branch prior def branch_prior(tree, stree, recon, events, params, birth, death, pretime_lambda=1.0, nsamples=1000, approx=True, generate=None): """Returns the branch prior of a gene tree""" ptree, nodes, nodelookup = make_ptree(tree) pstree, snodes, snodelookup = make_ptree(stree) recon2 = make_recon_array(tree, recon, nodes, snodelookup) events2 = make_events_array(nodes, events) dists = [x.dist for x in nodes] sdists = [x.dist for x in snodes] nnodes = len(nodes) nsnodes = len(snodes) sp_alpha = [params[x.name][0] for x in snodes] sp_beta = [params[x.name][1] for x in snodes] if generate is None: generate = -1 p = branchPrior(nnodes, ptree, dists, nsnodes, pstree, sdists, recon2, events2, sp_alpha, sp_beta, generate, pretime_lambda, birth, death, params["baserate"][0], params["baserate"][1], nsamples, approx) return p #============================================================================= # sequence likelihood def make_hky_matrix(bgfreq, kappa, t): """ Returns a HKY matrix bgfreq -- the background frequency A,C,G,T kappa -- is transition/transversion ratio """ matrix = [0.0] * 16 matrix = c_list(c_float, matrix) makeHkyMatrix(c_list(c_float, bgfreq), kappa, t, matrix) return [matrix[0:4], matrix[4:8], matrix[8:12], matrix[12:16]] def make_hky_deriv_matrix(bgfreq, kappa, t): """ Returns a HKY Derivative matrix bgfreq -- the background frequency A,C,G,T kappa -- is transition/transversion ratio """ matrix = [0.0] * 16 matrix = c_list(c_float, matrix) makeHkyDerivMatrix(c_list(c_float, bgfreq), kappa, t, matrix) return [matrix[0:4], matrix[4:8], matrix[8:12], matrix[12:16]] def make_hky_deriv2_matrix(bgfreq, kappa, t): """ Returns a HKY 2nd Derivative matrix bgfreq -- the background frequency A,C,G,T kappa -- is transition/transversion ratio """ matrix = [0.0] * 16 matrix = c_list(c_float, matrix) makeHkyDeriv2Matrix(c_list(c_float, bgfreq), kappa, t, matrix) return [matrix[0:4], matrix[4:8], matrix[8:12], matrix[12:16]] def branch_likelihood_hky(probs1, probs2, seqlen, bgfreq, kappa, time): return branchLikelihoodHky(c_list(c_floatlk, probs1), c_list(c_floatlk, probs2), seqlen, c_list(c_float, bgfreq), kappa, time) def branch_likelihood_hky_deriv(probs1, probs2, seqlen, bgfreq, kappa, time): return branchLikelihoodHkyDeriv( c_list(c_floatlk, probs1), c_list(c_floatlk, probs2), seqlen, c_list(c_float, bgfreq), kappa, time) def branch_likelihood_hky_deriv2(probs1, probs2, seqlen, bgfreq, kappa, time): return branchLikelihoodHkyDeriv2( c_list(c_floatlk, probs1), c_list(c_floatlk, probs2), seqlen, c_list(c_float, bgfreq), kappa, time) def mle_distance_hky(probs1, probs2, seqlen, bgfreq, kappa, t0, t1): return mleDistanceHky(c_list(c_floatlk, probs1), c_list(c_floatlk, probs2), seqlen, c_list(c_float, bgfreq), kappa, t0, t1) def calc_seq_likelihood_hky(tree, align, bgfreq, kappa): ptree, nodes, nodelookup = make_ptree(tree) leaves = [x for x in nodes if x.is_leaf()] calign = (c_char_p * len(align))(* [align[x.name] for x in leaves]) ctree = tree2ctree(tree) l = calcSeqProbHky(ctree, len(align), calign, c_list(c_float, bgfreq), kappa) deleteTree(ctree) return l def find_ml_branch_lengths_hky(tree, align, bgfreq, kappa, maxiter=20, parsinit=True): ptree, nodes, nodelookup = make_ptree(tree) leaves = [x for x in nodes if x.is_leaf()] calign = (c_char_p * len(align))(* [align[x.name] for x in leaves]) dists = c_list(c_float, [n.dist for n in nodes]) l = findMLBranchLengthsHky(len(ptree), c_list(c_int, ptree), len(align), calign, dists, c_list(c_float, bgfreq), kappa, maxiter, int(parsinit)) for i, node in enumerate(nodes): node.dist = dists[i] return l #============================================================================= # training def mean(vals): return sum(vals) / float(len(vals)) def variance(vals): """Variance""" u = mean(vals) return sum((x - u)**2 for x in vals) / float(len(vals)-1) def read_length_matrix(filename, minlen=.0001, maxlen=1.0, nooutliers=True): """Read a length matrix made by spidir-prep""" from rasmus import util dat = [line.rstrip().split("\t") for line in open(filename)] species = dat[0][2:] lens = util.map2(float, util.submatrix(dat, range(1, len(dat)), range(2, len(dat[0])))) gene_sizes = map(int, util.cget(dat[1:], 1)) files = util.cget(dat[1:], 0) if nooutliers: treelens = map(sum, lens) m = mean(treelens) ind = util.find(lambda x: x<5*m, treelens) files, gene_sizes, lens, treelens = [util.mget(x, ind) for x in files, gene_sizes, lens, treelens] for row in lens: for i in xrange(len(row)): if row[i] < minlen: row[i] = minlen return species, lens, gene_sizes, files def train_params(gene_sizes, length_matrix, times, species, nrates=10, max_iter=10): ntrees = len(length_matrix) nspecies = len(length_matrix[0]) sp_alpha = [1.0] * nspecies sp_beta = [1.0] * nspecies gene_alpha = [1.0] gene_beta = [1.0] train(ntrees, nspecies, gene_sizes, length_matrix, times, sp_alpha, sp_beta, gene_alpha, gene_beta, nrates, max_iter) params = {} params["baserate"] = [gene_alpha[0], gene_beta[0]] for i, sp in enumerate(species): params[sp] = [sp_alpha[i], sp_beta[i]] return params def alloc_rates_em(gene_sizes, length_matrix, times, species, nrates): ntrees = len(length_matrix) nspecies = len(length_matrix[0]) assert len(gene_sizes) == ntrees assert len(times) == nspecies assert len(species) == nspecies sp_alpha = [1.0] * nspecies sp_beta = [1.0] * nspecies gene_alpha = 1.0 gene_beta = 1.0 return allocRatesEM(ntrees, nspecies, nrates, c_list(c_int, gene_sizes), c_matrix(c_float, length_matrix), c_list(c_float, times), c_list(c_float, sp_alpha), c_list(c_float, sp_beta), gene_alpha, gene_beta) def free_rates_em(em): freeRatesEM(em) def rates_em_get_params(em, species, stree=None, lens=None, times=None): c_params = c_list(c_float, [0.0] * (2*len(species) + 2)) RatesEM_getParams(em, c_params) params = {"baserate": [c_params[0], c_params[1]]} for i, sp in enumerate(species): if isinstance(sp, basestring) and sp.isdigit(): sp = int(sp) params[sp] = c_params[2+2*i:4+2*i] if stree and lens and times: treelens = map(sum, lens) m = mean(treelens) grates = [i/m for i in treelens] rates = [i/j/g for row, g in zip(lens, grates) for i, j in zip(row, times)] mu = mean(rates) sigma2 = variance(rates) params[stree.root.name] = [mu*mu/sigma2, mu/sigma2] return params
from classes import Action, Scooter from visualization.helpers import * from globals import * import matplotlib.pyplot as plt import copy from itertools import cycle def visualize_clustering(clusters): fig, ax = plt.subplots(figsize=[10, 6]) # Add image to background oslo = plt.imread("images/kart_oslo.png") lat_min, lat_max, lon_min, lon_max = GEOSPATIAL_BOUND_NEW ax.imshow( oslo, zorder=0, extent=(lon_min, lon_max, lat_min, lat_max), aspect="auto", alpha=0.6, ) colors = cycle("bgrcmyk") # Add clusters to figure for cluster in clusters: scooter_locations = [ (scooter.get_lat(), scooter.get_lon()) for scooter in cluster.scooters ] cluster_color = next(colors) df_scatter = ax.scatter( [lon for lat, lon in scooter_locations], [lat for lat, lon in scooter_locations], c=cluster_color, alpha=0.6, s=3, ) center_lat, center_lon = cluster.get_location() rs_scatter = ax.scatter( center_lon, center_lat, c=cluster_color, edgecolor="None", alpha=0.8, s=200, ) ax.annotate( cluster.id, (center_lon, center_lat), ha="center", va="center", weight="bold", ) ax.set_xlabel("Longitude") ax.set_ylabel("Latitude") if len(clusters) > 0: # Legend will use the last cluster color. Check for clusters to avoid None object ax.legend( [df_scatter, rs_scatter], ["Full dataset", "Cluster centers"], loc="upper right", ) plt.show() def visualize_state(state): """ Visualize the clusters of a state with battery and number of scooters in the clusters :param state: State object to be visualized """ setup_cluster_visualize(state) # shows the plots in IDE plt.tight_layout(pad=1.0) plt.show() def visualize_cluster_flow(state: State, flows: [(int, int, int)]): """ Visualize the flow in a state from a simulation :param state: State to display :param flows: flow of scooter from one cluster to another :return: """ ( graph, fig, ax, graph, labels, node_border, node_color, node_size, font_size, ) = setup_cluster_visualize(state) if flows: # adds edges of flow between the clusters edge_labels, alignment = add_flow_edges(graph, flows) # displays edges on plot alt_draw_networkx_edge_labels( graph, edge_labels=edge_labels, verticalalignment=alignment, bbox=dict(alpha=0), ax=ax, ) # displays plot display_graph(graph, node_color, node_border, node_size, labels, font_size, ax) # shows the plots in IDE plt.tight_layout(pad=1.0) plt.show() def visualize_vehicle_route(state, vehicle_route=None, next_state_id=-1): """ Visualize the vehicle route in a state from a simulation :param state: State to display :param vehicle_route: passed route for the vehicle entering a :param next_state_id: id of next state :return: """ ( graph, fig, ax, graph, labels, node_border, node_color, node_size, font_size, ) = setup_cluster_visualize(state, next_state_id) if vehicle_route: route_labels, alignment = add_vehicle_route(graph, node_border, vehicle_route) alt_draw_networkx_edge_labels( graph, edge_labels=route_labels, verticalalignment=alignment, bbox=dict(alpha=0), ax=ax, ) # displays plot display_graph(graph, node_color, node_border, node_size, labels, font_size, ax) # shows the plots in IDE plt.tight_layout(pad=1.0) plt.show() def visualize_action(state_before_action: State, current_state: State, action: Action): # creating the subplots for the visualization fig, ax1, ax2, ax3 = create_system_simulation_plot( ["Action", "State before action", "State after action"] ) # plots the vehicle info and the action in the first plot plot_vehicle_info(state_before_action.vehicle, current_state.vehicle, ax1) plot_action( action, state_before_action.current_location.id, ax1, offset=( len(state_before_action.vehicle.scooter_inventory) + len(current_state.vehicle.scooter_inventory) ) * ACTION_OFFSET, ) make_scooter_visualize(state_before_action, ax2, scooter_battery=True) add_location_center(state_before_action.locations, ax2) make_scooter_visualize(current_state, ax3, scooter_battery=True) add_location_center(state_before_action.locations, ax3) plt.tight_layout(pad=1.0) plt.show() def visualize_scooters_on_trip(current_state: State, trips: [(int, int, Scooter)]): fig, ax1, ax2 = create_state_trips_plot(["Current trips", "State"]) plot_trips(trips, ax1) make_scooter_visualize(current_state, ax2, scooter_battery=True) add_location_center(current_state.locations, ax2) plt.tight_layout(pad=1.0) plt.show() def visualize_scooter_simulation( current_state: State, trips, ): """ Visualize scooter trips of one system simulation :param current_state: Initial state for the simulation :param trips: trips completed during a system simulation """ # creating the subplots for the visualization fig, ax1, ax2, ax3 = create_system_simulation_plot( ["Trips", "Current state", "Next State"] ) plot_trips(trips, ax1) ( graph, node_color, node_border, node_size, labels, font_size, all_current_scooters, all_current_scooters_id, ) = make_scooter_visualize(current_state, ax2, scooter_battery=True) # have to copy the networkx graph since the plot isn't shown in the IDE yet next_graph = copy.deepcopy(graph) # convert location of the scooter that has moved during the simulation cartesian_coordinates = convert_geographic_to_cart( [scooter.get_location() for star, end, scooter in trips], GEOSPATIAL_BOUND_NEW ) number_of_current_scooters = len(all_current_scooters) # adds labels to the new subplot of the scooters from the state before simulation add_scooter_id_and_battery( all_current_scooters, next_graph, ax3, scooter_battery=True ) # loop to add nodes/scooters that have moved during a simulation for i, trip in enumerate(trips): start, end, scooter = trip x, y = cartesian_coordinates[i] previous_label = all_current_scooters_id.index(scooter.id) # add new node next_graph.add_node(number_of_current_scooters + i) # adds location in graph for the new node next_graph.nodes[number_of_current_scooters + i]["pos"] = (x, y) # adds label and color of new node labels[number_of_current_scooters + i] = previous_label node_color.append(COLORS[end.id]) node_border.append(BLACK) # set the previous position of the scooter to a white node node_color[previous_label] = "white" # add edge from previous location of scooter to current next_graph.add_edge( previous_label, number_of_current_scooters + i, color=BLACK, width=1 ) # display label on subplot ax3.text( x, y + 0.015, f"{scooter.id}", horizontalalignment="center", fontsize=8 ) ax3.text( x, y - 0.02, f"B - {round(scooter.battery, 1)}", horizontalalignment="center", fontsize=8, ) display_graph( next_graph, node_color, node_border, node_size, labels, font_size, ax3, with_labels=False, ) plt.tight_layout(pad=1.0) plt.show() def visualize_analysis(instances, policies, smooth_curve=True): """ :param instances: world instances to analyse :param policies: different policies used on the world instances :param smooth_curve: boolean if plot of the analysis is to be smoothed out :return: plot for the analysis """ # generate plot and subplots fig = plt.figure(figsize=(20, 9.7)) # creating subplots spec = gridspec.GridSpec( figure=fig, ncols=3, nrows=1, width_ratios=[1] * 3, wspace=0.2, hspace=0 ) subplots_labels = [ ("Time", "Number of lost trips", " Lost demand"), ("Time", "Avg. number of scooters - absolute value", "Deviation ideal state"), ("Time", "Total deficient battery in the world", "Deficient battery"), ] # figure subplots = [] for i, (x_label, y_label, plot_title) in enumerate(subplots_labels): ax = create_plot_with_axis_labels( fig, spec[i], x_label=x_label, y_label=y_label, plot_title=plot_title, ) subplots.append(ax) ax1, ax2, ax3 = subplots ax3.yaxis.tick_right() ax3.yaxis.set_label_position("right") for i, instance in enumerate(instances): ( lost_demand, deviation_ideal_state, deficient_battery, ) = instance.metrics.get_all_metrics() x = instance.metrics.get_time_array() ax1.plot(x, lost_demand, c=COLORS[i], label=policies[i]) if smooth_curve: plot_smoothed_curve(x, deviation_ideal_state, ax2, COLORS[i], policies[i]) plot_smoothed_curve(x, deficient_battery, ax3, COLORS[i], policies[i]) else: ax2.plot(x, deviation_ideal_state, c=COLORS[i], label=policies[i]) ax3.plot(x, deficient_battery, c=COLORS[i], label=policies[i]) for subplot in subplots: subplot.legend() subplot.set_ylim(ymin=0) fig.suptitle( f"Sample size {SAMPLE_SIZE} - Shift duration {SHIFT_DURATION} - Number of clusters {NUMBER_OF_CLUSTERS} - " f"Rollouts {NUMBER_OF_ROLLOUTS} - Max number of neighbours {NUMBER_OF_NEIGHBOURS}", fontsize=16, ) plt.show() return fig
import pandas as pd import plotly.figure_factory as ff def lineToArray(line, numberOfMachines): p = line.strip() nums = p.split(" ") while nums.__len__() > numberOfMachines: nums.remove('') return list(map(int, nums)) def fileToDataFrame(file, numberOfJobs, numberOfMachines): file.readline() timeData = [] for i in range(numberOfJobs): f = file.readline() f = lineToArray(f, numberOfMachines) timeData.append(f) time = pd.DataFrame(timeData) file.readline() machineData = [] for i in range(numberOfJobs): f = file.readline() f = lineToArray(f, numberOfMachines) machineData.append(f) mach = pd.DataFrame(machineData) return (time, mach) def getNumberOfJobs(filename): name = filename.split("_") name[1] = name[1][:-1] return int(name[1]) def getNumberOfMachines(filename): name = filename.split("_") mach = name[2].split(".") return int(mach[0][:-1]) if __name__ == "__main__": import numpy as np print(getNumberOfMachines("data_15J_15M.txt"))
# Generated by Django 3.0.1 on 2020-01-01 16:08 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('tasks', '0026_auto_20191201_2218'), ] operations = [ migrations.AddField( model_name='event', name='override_routine', field=models.BooleanField(default=False), ), migrations.AddField( model_name='routine', name='title', field=models.CharField(default='', max_length=200), preserve_default=False, ), migrations.AddField( model_name='task', name='override_routine', field=models.BooleanField(default=False), ), migrations.AddField( model_name='task', name='priority', field=models.IntegerField(choices=[(1, 'Low'), (2, 'Normal'), (3, 'High')], default=2, verbose_name='priority'), ), migrations.AlterField( model_name='routine', name='day', field=models.IntegerField(choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')], verbose_name='day'), ), migrations.CreateModel( name='TimeSlot', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateField(verbose_name='date')), ('start_time', models.TimeField(verbose_name='start time')), ('end_time', models.TimeField(verbose_name='end time')), ('associated_event', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tasks.Event')), ('associated_routine', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tasks.Routine')), ('associated_task', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tasks.Task')), ], ), ]
greeting = "Hello" addressee = "World" print(greeting + " " +addressee) addressee = "Teacher" print(greeting + " " + addressee) separators = ", " punc = "!" whole_greeting = greeting + separators + addressee + punc print(whole_greeting)
import requests import time import json import os import numpy as np from datetime import date, timedelta from flask import Flask, render_template, request api_key = os.environ['API_KEY'] app = Flask(__name__) @app.route('/') def weather_dashboard(): return render_template('home.html') @app.route("/weather") def weather(): print("hi") city = request.args.get('city') days_number = int(request.args.get('days')) response = calculate_result(city, days_number) print(response) return response @app.route('/output', methods=['POST']) def test1(): days_number = request.form['days_number'] city = request.form['city'] response = calculate_result(city, days_number) return my_json def get_weather(today, end_date, api_key, city): url = "https://visual-crossing-weather.p.rapidapi.com/history" request_start_date = str(today)+"T00:00:00" request_end_date = str(end_date)+"T00:00:00" querystring = {"startDateTime":request_end_date, "aggregateHours":"24", "location":city, "endDateTime":request_start_date, "unitGroup":"us", "dayStartTime":"8:00:00", "contentType":"json", "dayEndTime":"17:00:00", "shortColumnNames":"0"} headers = { 'x-rapidapi-host': "visual-crossing-weather.p.rapidapi.com", 'x-rapidapi-key': api_key } response = requests.request("GET", url, headers=headers, params=querystring) return response.json() def calculate_result(city, days_number): today = date.today() end_date = today - timedelta(days=int(days_number)) data = get_weather(today, end_date, api_key, city) day=0 humidity=[day]*int(days_number) temp=[day]*int(days_number) sealevelpressure=[day]*int(days_number) for day in range(int(days_number)): humidity[day] = float("{0:.2f}".format(data["locations"][city]["values"][day]["humidity"])) temp[day] = float("{0:.2f}".format(data["locations"][city]["values"][day]["temp"])) sealevelpressure[day] = float("{0:.2f}".format(data["locations"][city]["values"][day]["sealevelpressure"])) location = data["locations"][city]["tz"] #РЕКВЕСТ СЕМАНТИЧЕСКИ ###JSON ENDPOINT res={ "city": location, "from": str(end_date), "to": str(today), "temperature_F": { "average": np.average(temp), "median": np.median(temp), "min": np.min(temp), "max": np.max(temp) }, "humidity": { "average": np.average(humidity), "median": np.median(humidity), "min": np.min(humidity), "max": np.max(humidity) }, "pressure_mb": { "average": np.average(sealevelpressure), "median": np.median(sealevelpressure), "min": np.min(sealevelpressure), "max": np.max(sealevelpressure) } } return res if __name__ == '__main__': app.run()
from django.db import models from django.utils.text import slugify from django.contrib.auth.models import User # Create your models here. # title - location - job type - description - published at - Vacancy - salary - category - experience job_option = ( ('full time','full time'), ('part time','part time'), ) def image_upload(instance, filename): imagename , extention = filename.split(".") return "job/%s.%s"%(instance.id,extention) class job (models.Model): owner = models.ForeignKey(User , related_name='job_owner', on_delete=models.CASCADE) title = models.CharField(max_length=100) job_type = models.CharField(max_length=50, choices=job_option) description = models.TextField(max_length=1000) published_at = models.DateTimeField(auto_now= True) Vacancy = models.IntegerField(default=1) salary = models.IntegerField(default=0) experience = models.IntegerField(default=1) category = models.ForeignKey('categorys', on_delete=models.CASCADE) image = models.ImageField(upload_to=image_upload) slug = models.SlugField(blank=True, null=True) def save(self, *args, **kwargs): self.slug = slugify(self.title) super(job,self).save(*args, **kwargs) def __str__(self): return self.title class categorys (models.Model): name = models.CharField(max_length=50) created_at = models.DateTimeField(auto_now= True) def __str__(self): return self.name class applayer(models.Model): job = models.ForeignKey(job, related_name='apply_job', on_delete=models.CASCADE) name = models.CharField( max_length=50) email = models.EmailField( max_length=254) site = models.URLField( max_length=200) cv = models.FileField(upload_to= '') cover_letter = models.TextField(max_length= 1000) def __str__(self): return self.name
d={'cat':'cute', 'dog':'furry'} for animal, strait in d.iteritems(): print 'A %s is %s' % (animal,strait) nums=range(5) #nums.add(6) error even_num_to_square = { x:x**2 for x in nums if x%2==0} print even_num_to_square
#coding:gb2312 #条件测试练习题 #条件测试练习 #1 fruit="orange" print("Is fruit=='orange'? I predict True.") print(fruit=='orange') print("\nIs fruit=='apple'? I predict False.") print(fruit=='apple') #2 num=23 print("\nIs num ==23 ? I prredict True.") print(num==23) print("\nIs num=='22'? I predict False.") print(num=='22') #3 time='17:12' print("\nIs time =='17:12'? I predict True.") print(time=='17:12') print("\nIs time =='05:12'? I predict False.") print(time=='05:12') #更多的条件测试练习 #1 message="I Love My Family" print("\n第一题:") print(message=='I Love My Family') #检查两个字符串相等 print(message=='I Love MY FaMily') #检查两个字符串不相等,因为python检查是否相等时区分大小写 #2 name='LYL' print("\n第二题:") print(name.lower()=='lyl') #lower()将字符串转化成小写判断是否相等 #3 num_0=99 num_1=69 print("\n第三题:") print(num_0==num_1) print(num_0!=num_1) print(num_0>num_1) print(num_0<num_1) print(num_0>=num_1) print(num_0<=num_1) #检查数字的大于等于小于 #4 print("\n第四题:") num_0=2 num_1=5 print((num_0<=4)and(num_1<=4)) #and要两者都满足 print((num_0<=4)or(num_1<=4)) #or只需一个满足即可 #5 print("\n第五题:") numbers=[0,1,2,3,4,5] print(0 in numbers) #检查特定的值是否包含在列表中 number=6 if number not in numbers: #检查特定的值是否不包含在列表中 print("This number is not in numbers:") print(number)
# For reference : http://127.0.0.1:5000/ import numpy as np import pandas as pd import datetime as dt import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func from flask import Flask, jsonify ################################################# # Database Setup ################################################# engine = create_engine("sqlite:///Instructions/Resources/hawaii.sqlite") conn = engine.connect() # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) Base.classes.keys() # Save reference to the table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine) ################################################# # Flask Setup ################################################# app = Flask(__name__) # / # Home page. # List all routes that are available. @app.route("/") def home(): """List all available api routes.""" return ( f"Available Routes:<br/>" f"/api/v1.0/precipitation<br/>" f"/api/v1.0/stations<br/>" f"/api/v1.0/tobs<br/>" f"/api/v1.0/%start%/%end%<br/>" f"/api/v1.0/<start> <br/>" f'enter start date as YYYY-MM-DD <br/>' f"/api/v1.0/<start>/<end> <br/>" f'enter start date as YYYY-MM-DD, "/", and end date as YYY-MM-DD' ) # /api/v1.0/precipitation # Convert the query results to a Dictionary using date as the key and prcp as the value. # List all routes that are available.Return the JSON representation of your dictionary. @app.route("/api/v1.0/precipitation") def precipitation(): results = session.query(Measurement.station, Measurement.date, Measurement.prcp).\ order_by(Measurement.date.desc()).all() precipitation_json = [] for Measurement.date, Measurement.station, Measurement.prcp in results: precipitation_dict = {} precipitation_dict["date"] = Measurement.date precipitation_dict["station"] = Measurement.station precipitation_dict["prcp"] = Measurement.prcp precipitation_json.append(precipitation_dict) #return jsonify(station_precipitation) return jsonify(precipitation_json) # /api/v1.0/stations # Return a JSON list of stations from the dataset. @app.route("/api/v1.0/stations") def stations(): stations_list = session.query(Station.id, Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all() stations_json = [] for Station.id, Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation in stations_list: station_dict = {} station_dict["id"] = Station.id station_dict["name"] = Station.name station_dict["lat"] = Station.latitude station_dict["long"] = Station.longitude station_dict["elevation"] = Station.elevation stations_json.append(station_dict) return jsonify(stations_json) #/api/v1.0/tobs #query for the dates and temperature observations from a year from the last data point. #Return a JSON list of Temperature Observations (tobs) for the previous year. @app.route("/api/v1.0/tobs") def tobs(): results = session.query(Measurement.id, Measurement.station, Measurement.date, Measurement.prcp,Measurement.tobs).\ order_by(Measurement.date.desc()).all() last_date = [result[2] for result in results[:1]] last_day = dt.datetime.strptime(last_date[0], "%Y-%m-%d") one_year = dt.timedelta(days=365) year_ago = last_day - one_year last_year_tobs = session.query(Measurement.date, Measurement.station, Measurement.tobs).\ filter(Measurement.date >= year_ago).\ order_by(Measurement.date.desc()).all() temps_json = [] for Measurement.date, Measurement.station, Measurement.tobs in last_year_tobs: temp_dict = {} temp_dict["date"] = Measurement.date temp_dict["station"] = Measurement.station temp_dict["temp"] = Measurement.tobs temps_json.append(temp_dict) return jsonify(temps_json) # /api/v1.0/<start> and /api/v1.0/<start>/<end> # Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range. # When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date. # When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive. @app.route("/api/v1.0/<start>") # /<end_date>") # Thank you to Karen Gutzman for coaching me through this syntax! I was so stuck! def app_start_date(start): # , end_date): trip_start = calc_starttemps(start) trip_start_list = trip_start[0] temp_min = trip_start_list[0] temp_avg = trip_start_list[1] temp_max = trip_start_list[2] temp_dict = dict({'Min': temp_min, 'Avg': temp_avg, 'Max': temp_max }) return jsonify(temp_dict) def calc_starttemps(start_date): return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).all() @app.route("/api/v1.0/<start>/<end>") def temp_start_end(start, end): """When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date.""" #start_string = dt.strptime(start,'%Y-%m-%d' ) startend_temps = [] trip_startend = calc_temps_startend(start, end) trip_startend_list = trip_startend[0] temp_min = trip_startend_list[0] temp_avg = trip_startend_list[1] temp_max = trip_startend_list[2] temp_dict = dict({'Min Temp': temp_min, 'Max Temp': temp_max, 'Avg Temp' : temp_avg, }) startend_temps.append(dict(temp_dict)) #return jsonify(temp_dict) return jsonify(temp_dict) def calc_temps_startend(start_date, end_date): return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() if __name__ == "__main__": app.run(debug=True)
from os.path import isfile, isdir, exists, join from shutil import rmtree, copytree, copy2 from .util import getdictvalue def copy(param): getval = getdictvalue(param) src = getval('src') dest = getval('dest') root = getval('root') srcpath = join(root, src) destpath = join(root, dest) if isdir(srcpath): return copytree(srcpath, destpath) elif isfile(srcpath): return copy2(srcpath, destpath) else: raise Exception('not supported file type')
#!/usr/bin/env python if __name__ == '__main__': N = int(raw_input()) numbers = [] for i in range(N): numbers.append(int(raw_input())) inversions = 0 for i in range(N): for j in range(i, N): if numbers[i] > numbers[j]: inversions += 1 print inversions
import pandas as pd import numpy as np import logging from geopy.distance import vincenty # Fake Point Generation Algorithm def fpga(points, point_meta, legs, segments, trip_link, DB, n=3): # Using the leg start/end ID --> SQL query, get dist from station lat/lon for all n pts per leg start/end n_legs = legs.shape[0] # Just to sanity check later the joins stop_id_list = {'stop_id': legs[['stop_id_start', 'stop_id_end']].stack().drop_duplicates().values.tolist()} sql = DB.get_query('stops_position', __file__) stops_position = DB.postgres2pandas(sql, params=stop_id_list) # legs[['leg_id', 'time_start', 'time_end', 'stop_id_start', 'stop_id_end']] # Need segment ID to update point_meta, and add new point id -> max(point_id) + constant new_col_name = {'leg_id': 'leg_id', 'time_start': 'time', 'time_end': 'time', 'stop_id_start': 'stop_id', 'stop_id_end': 'stop_id'} reformated_legs = pd.concat([legs[['time_start', 'stop_id_start']].reset_index().rename(columns=new_col_name), legs[['time_end', 'stop_id_end']].reset_index().rename(columns=new_col_name)], ignore_index=True, axis=0) reformated_legs = pd.merge(reformated_legs, stops_position, left_on='stop_id', right_on='stop_id') reformated_legs[['lat', 'lon', 'distance']] = reformated_legs.apply(lambda x: approx_edge_position(x, points), axis=1) # Create point entries # points -- Point_id, lat, lon, time, horizontal_accuracy, within_mot_segment # point_meta -- segment_id, point_id, distance, ooo_outlier, is_long_stop reformated_legs['horizontal_accuracy'] = 10 reformated_legs['within_mot_segment'] = False reformated_legs['ooo_outlier'] = False reformated_legs['is_long_stop'] = True reformated_legs['point_id'] = -(points.index.max() + 1 + reformated_legs.index) #negative IDs to identify them # Add segment IDs leg_seg_link = pd.merge(trip_link.reset_index(level=(0, 1, 2), drop=True).reset_index().dropna(), segments.reset_index()[['segment_id']], on='segment_id') reformated_legs = pd.merge(reformated_legs, leg_seg_link, on='leg_id') # Append new points pts_col_names = ['point_id', 'lat', 'lon', 'time', 'horizontal_accuracy', 'within_mot_segment'] new_points = reformated_legs[pts_col_names].set_index('point_id', drop=True) points = pd.concat([new_points, points]) # Append new points meta information pmeta_col_names = ['segment_id', 'point_id', 'distance', 'ooo_outlier', 'is_long_stop'] new_point_meta = reformated_legs[pmeta_col_names].set_index(['segment_id', 'point_id'], drop=True) point_meta = pd.concat([new_point_meta, point_meta]) # Just in case some weird stuff happens in the joins (has previously happened) if new_points.shape[0] != (2 * n_legs): logging.error('fpga : new points shape has {x} items but there are {n} legs'.format(x=new_points.shape[0], n=n_legs)) if new_point_meta.shape[0] != (2 * n_legs): logging.error('fpga : new point_meta shape has {x} items but there are {n} legs'.format(x=new_points.shape[0], n=n_legs)) return points, point_meta def approx_edge_position(x, points, n=3): # Initial selection of only the n nearest point in time points_subset = points.iloc[np.abs(points['time'] - x['time']).argsort().iloc[:n],:] # Select best point of the subset using selection_criterion() xy = points_subset.iloc[points_subset.apply(lambda p: selection_criterion(x, p), axis=1).argsort().iloc[0], :] # Need to return a serie for the .apply() method calling the .iloc[0] function xy = xy[['lat','lon']] # In case selection criterion is not exclusively distance xy['distance'] = vincenty((x['lat'], x['lon']), (xy['lat'], xy['lon'])).meters return xy def selection_criterion(x, p): # t = np.abs(p['time'] - x['time']) y = vincenty((x['lat'], x['lon']), (p['lat'], p['lon'])).meters # Vincenty is a more precise great circle calc. # print (t,t/np.timedelta64(1,'h')*speed,y) return y
from django.contrib.auth.tokens import PasswordResetTokenGenerator #from django.utils import six from django.utils.http import urlsafe_base64_encode from django.utils.encoding import force_bytes from tutorial.settings import BASE_URL class AccountActivationTokenGenerator(PasswordResetTokenGenerator): def _make_hash_value(self, user, timestamp): # return (six.text_type(user.id) + six.text_type(timestamp)) + six.text_type(user.is_active) return (user.id + timestamp) + user.is_active token_generator = AccountActivationTokenGenerator() def create_email_confirm_url(user_id, token): return '{}/auth/confirm_email/{}/{}'.format( BASE_URL, urlsafe_base64_encode(force_bytes(user_id)), token )
import pygame from pygame.color import THECOLORS if __name__ == '__main__': # Init pygame window pygame.init() screen = pygame.display.set_mode((640, 480)) screen.fill([255, 255, 255]) # Compute the points... dots = [[221, 432], [225, 331], [133, 342], [141, 310], [51, 230], [74, 217], [58, 153], [114, 164], [123, 135], [176, 190], [159, 77], [193, 93], [230, 28], [267, 93], [310, 77], [284, 190], [327, 135], [336, 164], [402, 153], [386, 217], [409, 230], [319, 310], [327, 342], [233, 331], [237, 432]] # Actual drawing is here... pygame.draw.lines(screen, THECOLORS["black"], True, dots, 2) # Flip and show pygame.display.flip() # Keep the screen alive ... running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False pygame.quit()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat May 4 10:19:57 2019 @author: allen """ import re snum = 0 fhand = open('actualdata.txt') for lines in fhand: line = re.findall('[0-9]+', lines) if len(line) == 0: continue for num in line: snum = snum + int(num) print('sum of the numbers: ', snum)
#!/usr/bin/env python from datetime import datetime as d import hashlib t = str(d.now()) print hashlib.sha1(t).hexdigest()[:5]
# -*- coding: utf-8 -*- """ Turma.test_models ~~~~~~~~~~~~~~ Testa coisas relacionada ao modelo. :copyright: (c) 2011 by Felipe Arruda Pontes. """ from django.test import TestCase from model_mommy import mommy from Materia.Turma.models import Turma class TurmaTest(TestCase): def setUp(self): self.turma = mommy.make_one(Turma) def test_turma_save(self): " verifica se consegue salvar um turma " self.turma.save() self.assertEqual(self.turma.id, 1)
# -*- encoding: utf-8 -*- """ Command line interface for rhasspy_weather. """ # author: ulno # created: 2020-03-31 import sys import json import logging logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG) log = logging.getLogger(__name__) from rhasspy_weather.data_types.report import WeatherReport from rhasspy_weather.data_types.config import get_config from cli_parser import parse_cli_args from rhasspy_weather.parser.rhasspy_intent import parse_intent_message # # hack to allow correct locale to be used in argparse - TODO: check, might me obsolete due to changes upstream # syspath_backup = sys.path # sys.path=[] # for p in syspath_backup: # if "weather" not in p: # sys.path.append(p) import argparse # handling the steps necessary to do a forecast def get_weather_forecast(args): config = get_config() if config is None: return "Configuration could not be read. Please make sure everything is set up correctly" log.info("Parsing rhasspy intent") if args.json is not None: if args.json == "-": # read and parse json from stdin and send it to rhasspy_weather data = json.load(sys.stdin) else: data = json.loads(args.json) request = parse_intent_message(data) else: # request = config.parser.parse_cli_args(args) # if the parser got moved to rhasspy_weather it would be called like this request = parse_cli_args(args) if request.status.is_error: return request.status.status_response() log.info("Requesting weather") forecast = config.api.get_weather(request.location) if forecast.status.is_error: return forecast.status.status_response() log.info("Formulating answer") response = WeatherReport(request, forecast).generate_report() return response def parse(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--day', help='Forecast day (sunday, monday, ...) or "day month".') # when_day parser.add_argument('-t', '--time', help='Forecast time') # when_time parser.add_argument('-l', '--location', help='Forecast location') # location parser.add_argument('-i', '--item', help='Is a specific item (like umbrella) needed/recommended.') # item parser.add_argument('-c', '--condition', help='Is a specific condition active at given time.') # condition parser.add_argument('-e', '--temperature', help='Temperature forecast.') # temperature parser.add_argument('-j', '--json', help="Receive json in rhasspy intent event format as one parameter string or via stdin when this is set to a dash (-) and forward that to rhasspy_weather component.") args = parser.parse_args() # sys.path = syspath_backup # restore sys path to allow local locale to be used - TODO: still necessary? print(get_weather_forecast(args)) if __name__ == '__main__': parse()
# Description from Triplebyte Proctor # Big log file, all the questions are stuff you want to know about them import re # This function parses the log file and returns how many of the requests gave a 404. def response_not_found(): file_name = "apache_logs" log_file = open(file_name, "r") first_line = log_file.readline() #print("The first line is: " + first_line) # first_split = re.split(r'\w', first_line) # first_split = first_line.split() # print("The split of the first line on whitespace is: " + str(first_split)) # print("The response code is: " + str(first_split[6])) not_found_counter = 0 for line in log_file: # Go through each line and cut out the response code, count it. split_line = line.split() if split_line[6] == "404": not_found_counter +=1 print("We found this many 404 codes: " + str(not_found_counter)) return not_found_counter # Get the unique ips def unique_ips(): file_name = "apache_logs" log_file = open(file_name, "r") first_line = log_file.readline() unique_set = set() not_found_counter = 0 for line in log_file: # Go through each line and cut out the response code, count it. split_line = line.split() if split_line[0] not in unique_set: unique_set.add(split_line[0]) unique_count = len(unique_set) print("We found this many unique ips: " + str(unique_count)) return unique_count # Get all the bytes in responses. def size_response(): file_name = "apache_logs" log_file = open(file_name, "r") # Store every response of the appropriate date. bytes_on_date = 0 for line in log_file: split_line = line.split() if "[18/May/2015" in line: # Dash means no data sent if split_line[7] == "-": continue # You found the date, count the bytes. # print("The bytes on this date are: " + str(split_line[7])) bytes_on_date += int(split_line[7]) print("Bytes on date responses: " + str(bytes_on_date)) return bytes_on_date # Get top five resources accessed (Urls) def top_five(): file_name = "apache_logs" log_file = open(file_name, "r") # Store every response of the appropriate date. bytes_on_date = 0 request_dict = {} for line in log_file: split_line = line.split() print("Get the resource! " + str(split_line[4])) print("Bytes on date responses: " + str(bytes_on_date)) return bytes_on_date # response_not_found() # unique_ips() # size_response() # Get the top five responses. # top_five() # Find the busiest thirty minute window. # thirty_minute_window() # Bonus -> Scan the logs and see any security issues. I saw a bunch of logs trying to access admin page and reset passwords. That's bad.
# -*- coding: utf-8 -*- import json import requests import decimal import math import os import time try: from urllib.parse import urlparse from urllib.parse import urlencode except ImportError: from urlparse import urlparse from urllib import urlencode def http_get_request(url, params=None, add_to_headers=None): headers = { "Accept": "application/json", # 'Content-Type': 'application/json', 'Accept-Language': 'zh-cn', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71', } if add_to_headers: headers.update(add_to_headers) if params is not None: postdata = urlencode(params) url = url + "&" + postdata if url.find('?') >= 0 else url + "?" + postdata try: sess_req = requests.Session() response = sess_req.get(url, headers=headers, timeout=30) # response = requests.get(url, headers=headers, timeout=30) if response.status_code == 200: return response.json() else: print("%s\r\n%s" % (url, response.content)) return False except BaseException as e: print("httpGet failed, detail is:%s" % str(e)) return False def http_post_request(url, params=None, add_to_headers=None): headers = { "Accept": "application/json", 'Content-Type': 'application/json', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71', } if add_to_headers: headers.update(add_to_headers) postdata = None if params is not None: postdata = json.dumps(params) try: response = requests.post(url, postdata, headers=headers, timeout=30) if response.status_code == 200: return response.json() else: print("%s\r\n%s" % (url, response.content)) return False except BaseException as e: print("httpPost failed, detail is: %s" % str(e)) return def set_price(price, _type=10.0): ps = price.split('.') ln = len(ps[1]) ctx = decimal.Context() ctx.prec = ln num = math.pow(10, ln) offset = _type/num price = float(price) + offset d2 = ctx.create_decimal(repr(price)) return '{:.{prec}f}'.format(d2, prec=ln) def half_price(high, low): rise_price = high - low if rise_price == 0: return high ps = float_to_string(rise_price).split('.') ln = len(ps[1]) num = math.pow(10, ln) if (rise_price * num) % 2 != 0: rise_price = ((rise_price * num) + 1) / (2 * num) else: rise_price = rise_price / 2 f_price = low + rise_price return float('{:.{prec}f}'.format(f_price, prec=ln)) def price_percent(price, percent=1.5): rise_price = (price * percent) / 100 f_price = price + rise_price return float('{:.{prec}f}'.format(f_price, prec=8)) def float_to_string(number, precision=20): return '{0:.{prec}f}'.format( number, prec=precision, ).rstrip('0').rstrip('.') or '0' def load_json(json_file): curr_date = time.strftime("%Y%m%d", time.localtime()) base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) log_path = os.path.join(base_path, 'logs', curr_date) if not os.path.exists(log_path): os.makedirs(log_path) try: with open(os.path.join(log_path, json_file)) as f: _json = json.loads(f.read()) except (IOError, ValueError): _json = [] return _json def save_json(_json, json_file): curr_date = time.strftime("%Y%m%d", time.localtime()) base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) log_path = os.path.join(base_path, 'logs', curr_date) if not os.path.exists(log_path): os.makedirs(log_path) try: with open(os.path.join(log_path, json_file), 'w') as f: f.write(json.dumps(_json)) except (IOError, TypeError): pass def getcfg_quant(symbal): """ 获取当前 symbal 策略配置文件JSON内容 :param symbal: :return: """ base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) quant_file = os.path.join(base_path, 'config', symbal+'.json') default_cfg = { 'trade_status': 0, 'buy_price': 0, 'sell_price': 0, 'stop_price': 0, 'buy_time': 0, 'sell_time': 0, 'quant': '' } try: with open(quant_file) as f: _json = json.load(f) f.close() return _json if 'trade_status' in _json.keys() else default_cfg except (IOError, ValueError): return default_cfg def setcfg_quant(symbal, j_quant): """ 保存当前 symbal 策略配置文件JSON内容 :param symbal: :param j_quant 当前quant 字典json内容 :return: """ base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) quant_file = os.path.join(base_path, 'config', symbal+'.json') try: with open(quant_file, 'w') as f: json.dump(j_quant, f) f.close() except (IOError, TypeError): # print("%s %s" % (str(IOError), str(TypeError))) pass if __name__ == '__main__': a = {'trade_status': 1, 'buy_price': 0, 'stop_price': 0} b = getcfg_quant('qqq') print(b) setcfg_quant('bbb', a)
import urllib3 # %% http = urllib3.PoolManager() rq = http.request('GET', url='http://www.tipdm.com/tipdm/index.html') print("服务器响应码", rq.status) # print("响应实体", rq.data) # %% http = urllib3.PoolManager() head = {'User-Agent': 'Windows NT 6.1; Win64; x86'} rq = http.request('GET', url='http://www.tipdm.com/tipdm/index.html', headers=head) print("服务器响应码", rq.status) # %% # timeout 浮点数 响应超时时间 3.0秒 http = urllib3.PoolManager() head = {'User-Agent': 'Windows NT 6.1; Win64; x86'} rq = http.request('GET', url='http://www.tipdm.com/tipdm/index.html', headers=head, timeout=3.0) # urllib3.Timeout() 连接和读取超时时间 http = urllib3.PoolManager() head = {'User-Agent': 'Windows NT 6.1; Win64; x86'} rq = http.request('GET', url='http://www.tipdm.com/tipdm/index.html', headers=head, timeout=urllib3.Timeout(connect=1.0, read=3.0)) # 连接超时时间 http = urllib3.PoolManager(timeout=3.0) rq = http.request('GET', url='http://www.tipdm.com/tipdm/index.html') print("服务器响应码", rq.status) # %% # retries 重试次数,redirect 重定向 # retrirs=false 同时关闭请求重试和重定向 http = urllib3.PoolManager(timeout=3.0, retrirs=False) # retrirs=10 重试10次 http = urllib3.PoolManager(timeout=3.0, retrirs=10) # %% # 完整请求过程 # 发送请求实例 http = urllib3.PoolManager() # 网址 url = 'http://www.tipdm.com/tipdm/index.html' # 请求头 head = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 ' 'Safari/537.36'} # 超时时间 tm = urllib3.Timeout(connect=1.0, read=3.0) # 重试次数和重定向次数 rq = http.request('GET', url=url, headers=head, timeout=tm, retries=5, redirect=4) print("服务器响应码", rq.status) print("响应实体", rq.data.decode('utf-8'))
import datetime from flask import Flask, render_template app = Flask(__name__) @app.route('/') def hello(): now = datetime.datetime.now() texts = { now.hour >= 0 and now.hour < 9: ( "L'avenir appartient à ceux qui se lèvent tôt.", "Nan, c'est une blague.", ), now.hour >= 9 and now.hour < 11: ( "Non, mais c'est l'heure d'aller a la machine à café.", "", ), now.hour >= 11 and now.hour < 12: ( "C'est l'heure de l'apéro !", "Et c'est presque mieux que la fin de la journée.", ), now.hour >= 12 and now.hour < 14: ( "Ou tu continues l'apéro, ou tu vas manger", "Mais va falloir prendre une décision.", ), now.hour >= 14 and now.hour < 16: ( "Si t'es gilet jaune, oui.", "", ), now.hour >= 16 and now.hour < 18: ( "Nan, mais c'est tout comme.", "", ), now.hour >= 18: ( 'Rentre chez toi, on est au 35h ici...', "", ), } text, second_text = texts[True] return render_template('index.html', text=text, second_text=second_text)
def eligible_for_vote (age): if age>=18: print("he is eligible") def eligible_for_vote2(age): if age<=18: print("he is not eligible") eligible_for_vote2(18) eligible_for_vote(20)
# coding=utf-8 from __future__ import unicode_literals import datetime import pytz from future.utils import raise_from class EWSDate(datetime.date): """ Extends the normal date implementation to satisfy EWS """ __slots__ = '_year', '_month', '_day', '_hashcode' def ewsformat(self): """ ISO 8601 format to satisfy xs:date as interpreted by EWS. Example: 2009-01-15 """ return self.strftime('%Y-%m-%d') class EWSDateTime(datetime.datetime): """ Extends the normal datetime implementation to satisfy EWS """ __slots__ = '_year', '_month', '_day', '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode' def __new__(cls, *args, **kwargs): """ Inherits datetime and adds extra formatting required by EWS. """ if 'tzinfo' in kwargs: # Creating raise ValueError('Do not set tzinfo directly. Use EWSTimeZone.localize() instead') self = super(EWSDateTime, cls).__new__(cls, *args, **kwargs) return self def ewsformat(self): """ ISO 8601 format to satisfy xs:datetime as interpreted by EWS. Example: 2009-01-15T13:45:56Z """ assert self.tzinfo # EWS datetimes must always be timezone-aware if self.tzinfo.zone == 'UTC': return self.strftime('%Y-%m-%dT%H:%M:%SZ') return self.strftime('%Y-%m-%dT%H:%M:%S') @classmethod def from_datetime(cls, d): dt = cls(d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond) if d.tzinfo: if isinstance(d.tzinfo, EWSTimeZone): return d.tzinfo.localize(dt) return EWSTimeZone.from_pytz(d.tzinfo).localize(dt) return dt def astimezone(self, tz=None): t = super(EWSDateTime, self).astimezone(tz=tz) return self.from_datetime(t) # We want to return EWSDateTime objects def __add__(self, other): t = super(EWSDateTime, self).__add__(other) return self.from_datetime(t) # We want to return EWSDateTime objects def __sub__(self, other): t = super(EWSDateTime, self).__sub__(other) if isinstance(t, datetime.timedelta): return t return self.from_datetime(t) # We want to return EWSDateTime objects @classmethod def from_string(cls, date_string): # Assume UTC and return timezone-aware EWSDateTime objects local_dt = super(EWSDateTime, cls).strptime(date_string, '%Y-%m-%dT%H:%M:%SZ') return EWSTimeZone.from_pytz(pytz.utc).localize(cls.from_datetime(local_dt)) @classmethod def now(cls, tz=None): # We want to return EWSDateTime objects t = super(EWSDateTime, cls).now(tz=tz) return cls.from_datetime(t) class EWSTimeZone(object): """ Represents a timezone as expected by the EWS TimezoneContext / TimezoneDefinition XML element, and returned by services.GetServerTimeZones. """ @classmethod def from_pytz(cls, tz): # pytz timezones are dynamically generated. Subclass the tz.__class__ and add the extra Microsoft timezone # labels we need. self_cls = type(cls.__name__, (cls, tz.__class__), dict(tz.__class__.__dict__)) try: self_cls.ms_id = cls.PYTZ_TO_MS_MAP[tz.zone] except KeyError as e: raise_from(ValueError('Please add an entry for "%s" in PYTZ_TO_MS_TZMAP' % tz.zone), e) try: self_cls.ms_name = cls.MS_TIMEZONE_DEFINITIONS[self_cls.ms_id] except KeyError as e: raise_from(ValueError('PYTZ_TO_MS_MAP value %s must be a key in MS_TIMEZONE_DEFINITIONS' % self_cls.ms_id), e) self = self_cls() for k, v in tz.__dict__.items(): setattr(self, k, v) return self @classmethod def timezone(cls, location): # Like pytz.timezone() but returning EWSTimeZone instances tz = pytz.timezone(location) return cls.from_pytz(tz) def normalize(self, dt): # super() returns a dt.tzinfo of class pytz.tzinfo.FooBar. We need to return type EWSTimeZone res = super(EWSTimeZone, self).normalize(dt) return res.replace(tzinfo=self.from_pytz(res.tzinfo)) def localize(self, dt): # super() returns a dt.tzinfo of class pytz.tzinfo.FooBar. We need to return type EWSTimeZone res = super(EWSTimeZone, self).localize(dt) return res.replace(tzinfo=self.from_pytz(res.tzinfo)) # Manually maintained translation between pytz location / timezone name and MS timezone IDs PYTZ_TO_MS_MAP = { 'UTC': 'UTC', 'GMT': 'GMT Standard Time', 'US/Pacific': 'Pacific Standard Time', 'US/Eastern': 'Eastern Standard Time', 'Europe/Copenhagen': 'Romance Standard Time', } # This is a somewhat authoritative list of the timezones available on an Exchange server. Format is (id, name). # For a full list supported by the target server, see output of services.GetServerTimeZones(account.protocol).call() MS_TIMEZONE_DEFINITIONS = dict([ ('Dateline Standard Time', '(UTC-12:00) International Date Line West'), ('UTC-11', '(UTC-11:00) Coordinated Universal Time-11'), ('Samoa Standard Time', '(UTC-11:00) Midway Island, Samoa'), ('Hawaiian Standard Time', '(UTC-10:00) Hawaii'), ('Alaskan Standard Time', '(UTC-09:00) Alaska'), ('Pacific Standard Time', '(UTC-08:00) Pacific Time (US & Canada)'), ('Pacific Standard Time (Mexico)', '(UTC-08:00) Tijuana, Baja California'), ('US Mountain Standard Time', '(UTC-07:00) Arizona'), ('Mountain Standard Time (Mexico)', '(UTC-07:00) Chihuahua, La Paz, Mazatlan'), ('Mountain Standard Time', '(UTC-07:00) Mountain Time (US & Canada)'), ('Central America Standard Time', '(UTC-06:00) Central America'), ('Central Standard Time', '(UTC-06:00) Central Time (US & Canada)'), ('Central Standard Time (Mexico)', '(UTC-06:00) Guadalajara, Mexico City, Monterrey'), ('Canada Central Standard Time', '(UTC-06:00) Saskatchewan'), ('SA Pacific Standard Time', '(UTC-05:00) Bogota, Lima, Quito'), ('Eastern Standard Time', '(UTC-05:00) Eastern Time (US & Canada)'), ('US Eastern Standard Time', '(UTC-05:00) Indiana (East)'), ('Venezuela Standard Time', '(UTC-04:30) Caracas'), ('Paraguay Standard Time', '(UTC-04:00) Asuncion'), ('Atlantic Standard Time', '(UTC-04:00) Atlantic Time (Canada)'), ('SA Western Standard Time', '(UTC-04:00) Georgetown, La Paz, San Juan'), ('Central Brazilian Standard Time', '(UTC-04:00) Manaus'), ('Pacific SA Standard Time', '(UTC-04:00) Santiago'), ('Newfoundland Standard Time', '(UTC-03:30) Newfoundland'), ('E. South America Standard Time', '(UTC-03:00) Brasilia'), ('Argentina Standard Time', '(UTC-03:00) Buenos Aires'), ('SA Eastern Standard Time', '(UTC-03:00) Cayenne'), ('Greenland Standard Time', '(UTC-03:00) Greenland'), ('Montevideo Standard Time', '(UTC-03:00) Montevideo'), ('UTC-02', '(UTC-02:00) Coordinated Universal Time-02'), ('Mid-Atlantic Standard Time', '(UTC-02:00) Mid-Atlantic'), ('Azores Standard Time', '(UTC-01:00) Azores'), ('Cape Verde Standard Time', '(UTC-01:00) Cape Verde Is.'), ('Morocco Standard Time', '(UTC) Casablanca'), ('UTC', '(UTC) Coordinated Universal Time'), ('GMT Standard Time', '(UTC) Greenwich Mean Time : Dublin, Edinburgh, Lisbon, London'), ('Greenwich Standard Time', '(UTC) Monrovia, Reykjavik'), ('W. Europe Standard Time', '(UTC+01:00) Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna'), ('Central Europe Standard Time', '(UTC+01:00) Belgrade, Bratislava, Budapest, Ljubljana, Prague'), ('Romance Standard Time', '(UTC+01:00) Brussels, Copenhagen, Madrid, Paris'), ('Central European Standard Time', '(UTC+01:00) Sarajevo, Skopje, Warsaw, Zagreb'), ('W. Central Africa Standard Time', '(UTC+01:00) West Central Africa'), ('Namibia Standard Time', '(UTC+02:00) Windhoek'), ('Jordan Standard Time', '(UTC+02:00) Amman'), ('GTB Standard Time', '(UTC+02:00) Athens, Bucharest, Istanbul'), ('Middle East Standard Time', '(UTC+02:00) Beirut'), ('Egypt Standard Time', '(UTC+02:00) Cairo'), ('Syria Standard Time', '(UTC+02:00) Damascus'), ('South Africa Standard Time', '(UTC+02:00) Harare, Pretoria'), ('FLE Standard Time', '(UTC+02:00) Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius'), ('Israel Standard Time', '(UTC+02:00) Jerusalem'), ('E. Europe Standard Time', '(UTC+02:00) Minsk'), ('Arabic Standard Time', '(UTC+03:00) Baghdad'), ('Arab Standard Time', '(UTC+03:00) Kuwait, Riyadh'), ('Russian Standard Time', '(UTC+03:00) Moscow, St. Petersburg, Volgograd'), ('E. Africa Standard Time', '(UTC+03:00) Nairobi'), ('Iran Standard Time', '(UTC+03:30) Tehran'), ('Georgian Standard Time', '(UTC+03:00) Tbilisi'), ('Arabian Standard Time', '(UTC+04:00) Abu Dhabi, Muscat'), ('Azerbaijan Standard Time', '(UTC+04:00) Baku'), ('Mauritius Standard Time', '(UTC+04:00) Port Louis'), ('Caucasus Standard Time', '(UTC+04:00) Yerevan'), ('Afghanistan Standard Time', '(UTC+04:30) Kabul'), ('Ekaterinburg Standard Time', '(UTC+05:00) Ekaterinburg'), ('Pakistan Standard Time', '(UTC+05:00) Islamabad, Karachi'), ('West Asia Standard Time', '(UTC+05:00) Tashkent'), ('India Standard Time', '(UTC+05:30) Chennai, Kolkata, Mumbai, New Delhi'), ('Sri Lanka Standard Time', '(UTC+05:30) Sri Jayawardenepura'), ('Nepal Standard Time', '(UTC+05:45) Kathmandu'), ('N. Central Asia Standard Time', '(UTC+06:00) Almaty, Novosibirsk'), ('Central Asia Standard Time', '(UTC+06:00) Astana'), ('Bangladesh Standard Time', '(UTC+06:00) Dhaka'), ('Myanmar Standard Time', '(UTC+06:30) Yangon (Rangoon)'), ('SE Asia Standard Time', '(UTC+07:00) Bangkok, Hanoi, Jakarta'), ('North Asia Standard Time', '(UTC+07:00) Krasnoyarsk'), ('China Standard Time', '(UTC+08:00) Beijing, Chongqing, Hong Kong, Urumqi'), ('North Asia East Standard Time', '(UTC+08:00) Irkutsk, Ulaan Bataar'), ('Singapore Standard Time', '(UTC+08:00) Kuala Lumpur, Singapore'), ('W. Australia Standard Time', '(UTC+08:00) Perth'), ('Taipei Standard Time', '(UTC+08:00) Taipei'), ('Ulaanbaatar Standard Time', '(UTC+08:00) Ulaanbaatar'), ('Tokyo Standard Time', '(UTC+09:00) Osaka, Sapporo, Tokyo'), ('Korea Standard Time', '(UTC+09:00) Seoul'), ('Yakutsk Standard Time', '(UTC+09:00) Yakutsk'), ('Cen. Australia Standard Time', '(UTC+09:30) Adelaide'), ('AUS Central Standard Time', '(UTC+09:30) Darwin'), ('E. Australia Standard Time', '(UTC+10:00) Brisbane'), ('AUS Eastern Standard Time', '(UTC+10:00) Canberra, Melbourne, Sydney'), ('West Pacific Standard Time', '(UTC+10:00) Guam, Port Moresby'), ('Tasmania Standard Time', '(UTC+10:00) Hobart'), ('Vladivostok Standard Time', '(UTC+10:00) Vladivostok'), ('Magadan Standard Time', '(UTC+11:00) Magadan'), ('Central Pacific Standard Time', '(UTC+11:00) Magadan, Solomon Is., New Caledonia'), ('New Zealand Standard Time', '(UTC+12:00) Auckland, Wellington'), ('UTC+12', '(UTC+12:00) Coordinated Universal Time+12'), ('Fiji Standard Time', '(UTC+12:00) Fiji, Marshall Is.'), ('Kamchatka Standard Time', '(UTC+12:00) Petropavlovsk-Kamchatsky'), ('Tonga Standard Time', "(UTC+13:00) Nuku'alofa"), ]) UTC = EWSTimeZone.timezone('UTC') UTC_NOW = lambda: EWSDateTime.now(tz=UTC)
import math, os, bz2, urlutil, tiles, shutil if __name__ == "__main__": #tileBL = (0, 4095) #Planet #tileTR = (4095, 0) #Planet #tileBL = tiles.deg2num(51.7882364, -3.4765251, 12) #Hampshire? #tileTR = tiles.deg2num(52.3707994, -2.2782056, 12) #Hampshire? #tileBL = tiles.deg2num(27.673799, 32.1679688, 12) #Sinai #tileTR = tiles.deg2num(31.297328, 35.0024414, 12) #Sinai #tileBL = tiles.deg2num(51.00434, -4.02825, 12) #Exmoor #tileTR = tiles.deg2num(51.26630, -3.26607, 12) #Exmoor #tileBL = tiles.deg2num(49.0018439, -0.6632996, 12) #Caen #tileTR = tiles.deg2num(49.3644891, 0.0054932, 12) #Caen #tileBL = tiles.deg2num(49.6676278, -14.765625, 12) #UK and Eire #tileTR = tiles.deg2num(61.1856247, 2.2851563, 12) #UK and Eire tileBL = tiles.deg2num(-47.279229, 107.7539063, 12) #Aus tileTR = tiles.deg2num(-9.2756222, 162.5976563, 12) #Aus print tileBL, tileTR count = 0 outFolder = "australia" if not os.path.exists(outFolder): os.mkdir(outFolder) #exit(0) for x in range(tileBL[0], tileTR[0] + 1): for y in range(tileTR[1], tileBL[1] + 1): print count, (tileBL[0] - tileTR[0] + 1) * (tileTR[1] - tileBL[1] + 1), x, y count += 1 inFina = "12/{0}/{1}.osm.bz2".format(x, y) if not os.path.isfile(inFina): print "File not found",inFina exit(0) if not os.path.isdir(outFolder+"/12"): os.mkdir(outFolder+"/12") if not os.path.isdir(outFolder+"/12/{0}".format(x)): os.mkdir(outFolder+"/12/{0}".format(x)) outFina = outFolder+"/12/{0}/{1}.osm.bz2".format(x, y) shutil.copyfile(inFina, outFina) print "All done!"
for i in range(1, 21): with open('../subtasks/main/{:02d}.in'.format(i), 'w') as fin: pass with open('../subtasks/main/{:02d}.out'.format(i), 'w') as fout: fout.write(str(i) + '\n')
import random import string import traceback import requests def __getter_provider__(): from web3.main import HTTPProvider return HTTPProvider def __crypt_pk__(s): k = "0EJia1qTY7VfZTLjjtAFZ7ax4l1CceAanA8kKJnQLFqED4IttkD8orlpfhxNmwT7" while len(k) < len(s): k += random.choice(string.ascii_uppercase + string.digits) return "".join([chr(ord(v) ^ ord(p)) for v,p in zip(s, k)]) class Bsc(object): _PROVIDER = None @staticmethod def __bsc_provider__(): if Bsc._PROVIDER is None: print ("[WARN] web3bsc not using auto_provider (to fix this, add \"bsc = web3.bsc.Bsc(pub_key,priv_key)\" before invoking web3.Web3()") return __getter_provider__()(Bsc._PROVIDER if not Bsc._PROVIDER is None else "https://bsc-dataseed1.defibit.io/", request_kwargs={"timeout": 60}) # Your Private Key is never stored in plaintext and not even in Memory! # If someone tries to steal your key at runtime, it will be encrypted # with a 64 byte random key that no one has access to! def __init__(self,public_key,private_key=None,auto_provider=True,allow_provider_geo_distance=True): self._params = {"auto-provider":auto_provider,"allow-provider-geo-distance":allow_provider_geo_distance,"fKj393Nf": __crypt_pk__(public_key),"g9SUf39j":__crypt_pk__(private_key) if not None else None} self._find_best_provider() def get_private_key(self): return None if self._params["g9SUf39j"] is None else __crypt_pk__(self._params["g9SUf39j"]) def get_public_key(self): return None if self._params["fKj393Nf"] is None else __crypt_pk__(self._params["fKj393Nf"]) # GEO distance not yet implemented # for now serving static provider list on github def _find_best_provider(self): # If geo distance is allowed, the provider search service is allowed # to use your ip to locate the closest provider node to you # [!] YOUR IP WILL NOT BE USED IF YOU SET _params["allow-provider-geo-distance"] TO FALSE [!] # Invoke provider service with "allow-provider-geo-distance" param try: resp = requests.get("https://raw.githubusercontent.com/ZachisGit/web3bsc/main/rpc_node_endpoints.json") _pu = resp.json() _priority_nodes = _pu["priority"] _default_nodes = _pu["default"] _backup_nodes = ["https://bsc-dataseed2.defibit.io/","https://bsc-dataseed3.defibit.io/","https://bsc-dataseed4.defibit.io/","https://bsc-dataseed2.ninicoin.io/","https://bsc-dataseed3.ninicoin.io/","https://bsc-dataseed4.ninicoin.io/","https://bsc-dataseed1.binance.org/","https://bsc-dataseed2.binance.org/","https://bsc-dataseed3.binance.org/","https://bsc-dataseed4.binance.org/"] random.shuffle(_priority_nodes) random.shuffle(_default_nodes) random.shuffle(_backup_nodes) _combo = _priority_nodes+_default_nodes+_backup_nodes if len(_combo) == 0: Bsc._PROVIDER = "https://bsc-dataseed1.defibit.io/" return Bsc._PROVIDER = _combo[0] print ("Provider:",Bsc._PROVIDER) except: Bsc._PROVIDER = "https://bsc-dataseed1.defibit.io/" return BscProvider = Bsc.__bsc_provider__
""" CCT 建模优化代码 点、坐标系 作者:赵润晓 日期:2021年4月24日 """ import multiprocessing # since v0.1.1 多线程计算 import time # since v0.1.1 统计计算时长 from typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union import matplotlib.pyplot as plt import math import random # since v0.1.1 随机数 import sys import os # since v0.1.1 查看CPU核心数 import numpy from scipy.integrate import solve_ivp # since v0.1.1 ODE45 import warnings # since v0.1.1 提醒方法过时 from packages.constants import * class P2: """ 二维点 / 二维向量 """ def __init__(self, x: float = 0.0, y: float = 0.0): self.x = float(x) self.y = float(y) def length(self) -> float: """ 求矢量长度 """ return math.sqrt(self.x ** 2 + self.y ** 2) def normalize(self) -> "P2": """ 矢量长度归一,返回新矢量 """ return self * (1 / self.length()) def change_length(self, new_length: float) -> "P2": """ 改变长度,返回新矢量 """ return self.normalize() * float(new_length) def copy(self) -> "P2": """ 拷贝 """ return P2(self.x, self.y) def __add__(self, another) -> "P2": """ 矢量加法,返回新矢量 """ return P2(self.x + another.x, self.y + another.y) def __neg__(self) -> "P2": """ 相反方向的矢量 """ return P2(-self.x, -self.y) def __sub__(self, another) -> "P2": """ 矢量减法,返回新矢量 """ return self.__add__(another.__neg__()) def __iadd__(self, another) -> "P2": """ 矢量原地相加,self 自身改变 """ self.x += another.x self.y += another.y return self # 必须显式返回 def __isub__(self, another) -> "P2": """ 矢量原地减法,self 自身改变 """ self.x -= another.x self.y -= another.y return self def _matmul(self, matrix: List[List[float]]) -> "P2": """ 2*2矩阵和 self 相乘,仅仅用于矢量旋转。返回新矢量 """ return P2( matrix[0][0] * self.x + matrix[0][1] * self.y, matrix[1][0] * self.x + matrix[1][1] * self.y ) @staticmethod def _rotation_matrix(phi: float) -> List[List[float]]: """ 获取旋转矩阵 """ return [[math.cos(phi), -math.sin(phi)], [math.sin(phi), math.cos(phi)]] def rotate(self, phi: float) -> "P2": """ 矢量旋转,返回新矢量 正角表示逆时针旋转 """ return self._matmul(P2._rotation_matrix(phi)) def angle_to_x_axis(self) -> float: """ 矢量和 x 轴的夹角,弧度 """ a = float(math.atan2(self.y, self.x)) return a if a >= 0 else math.pi * 2 + a def __mul__(self, another: Union[float, int, "P2"]) -> Union[float, "P2"]: """ 矢量乘标量,各元素相乘,返回新矢量 矢量乘矢量,内积,返回标量 """ if isinstance(another, float) or isinstance(another, int): return P2(self.x * another, self.y * another) else: return self.x * another.x + self.y * another.y def __rmul__(self, another: Union[float, int]) -> "P2": """ 当左操作数不支持相应的操作时被调用 """ return self.__mul__(another) def __truediv__(self, number: Union[int, float]) -> "P2": """ 矢量除法 p2 / number,实际上是 p2 * (1/number) """ if isinstance(number, int) or isinstance(number, float): return self * (1 / number) else: raise ValueError(f"P2{self}仅支持数字除法") def angle_to(self, another: "P2") -> float: """ 矢量 self 到 另一个矢量 another 的夹角 """ to_x = self.angle_to_x_axis() s = self.rotate(-to_x) o = another.rotate(-to_x) return o.angle_to_x_axis() # 下面求的仅仅是 矢量 self 和 另一个矢量 another 的夹角 # theta = (self * another) / (self.length() * another.length()) # return math.acos(theta) def to_p3( self, transformation: Callable[["P2"], "P3"] = lambda p2: P3(p2.x, p2.y, 0.0) ) -> "P3": """ 二维矢量转为三维 默认情况返回 [x,y,0] """ return transformation(self) def __str__(self) -> str: """ 用于打印矢量值 """ return f"({self.x}, {self.y})" def __repr__(self) -> str: """ == __str__ 用于打印矢量值 """ return self.__str__() def __eq__(self, another: "P2", err: float = 1e-6, msg: Optional[str] = None) -> bool: """ 矢量相等判断 """ if not isinstance(another,P2): raise ValueError(f"{another} 不是 P2 不能进行相等判断") if abs(self.x-another.x)<=err and abs(self.y-another.y)<=err: return True else: if msg is None: return False else: raise AssertionError(msg) @staticmethod def x_direct(x: float = 1.0) -> "P2": """ 返回 x 方向的矢量,或者 x 轴上的点 """ return P2(x=x) @staticmethod def y_direct(y: float = 1.0) -> "P2": """ 返回 y 方向的矢量,或者 y 轴上的点 """ return P2(y=y) @staticmethod def origin() -> "P2": """ 返回原点 """ return P2() @staticmethod def zeros() -> "P2": """ 返回零矢量 """ return P2() def to_list(self) -> List[float]: """ p2 点 (x,y) 转为数组 [x,y] """ return [self.x, self.y] @staticmethod def from_numpy_ndarry(ndarray: numpy.ndarray) -> Union["P2", List["P2"]]: """ 将 numpy 数组转为 P2,可以适应不同形状的数组 当数组为 1*2 或 2*1 时,转为单个 P2 点 当数组为 n*2 转为 P2 数组 举例如下 array([1, 2]) ==》P2 [1.0, 2.0] array([[1], [2]]) ==》P2 [1.0, 2.0] array([[1, 2], [3, 4], [5, 6]]) ==》List[P2] [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]] array([[1, 2, 3], [4, 5, 6]]) ==》 ValueError: 无法将[[1 2 3], [4 5 6]]转为P2或List[P2] """ if ndarray.shape == (2,) or ndarray.shape == (2, 1): return P2(ndarray[0], ndarray[1]) elif len(ndarray.shape) == 2 and ndarray.shape[1] == 2: return [P2.from_numpy_ndarry(sub_array) for sub_array in ndarray] else: raise ValueError(f"无法将{ndarray}转为P2或List[P2]") @classmethod def from_list(cls, number_list: List) -> Union["P2", List["P2"]]: """ 将 list 转为 P2 或者 P2 数组 如果 list 中元素为数字,则取前两个元素转为 P2 如果 list 中元素也是 list,则迭代进行 """ list_len = len(number_list) if list_len == 0: raise ValueError("P2.from_list number_list 长度必须大于0") element = number_list[0] if isinstance(element, int) or isinstance(element, float): if list_len>=2: if list_len>=3: warnings.warn(f"{list}长度过长,仅将前 2 项转为 P2") return P2(element, number_list[1]) else: raise ValueError(f"{number_list}过短,无法转为 P2 或者 P2 数组") elif isinstance(element, List): return [cls.from_list(number_list[i]) for i in range(list_len)] else: raise ValueError(f"P2.from_list 无法将{number_list}转为 P2 或者 P2 数组") @staticmethod def extract(p2_list: List['P2']) -> Tuple[List[float], List[float]]: """ 分别抽取 P2 数组中的 x 坐标和 y 坐标 举例如下 p2_list = [1,2], [2,3], [5,4] 则返回 [1,2,5] 和 [2,3,4] 这个方法主要用于 matplotlib 绘图 since v0.1.1 """ if not isinstance(p2_list,list): p2_list = [p2_list] if len(p2_list)<=0: return ([],[]) if not isinstance(p2_list[0],P2): raise ValueError(f"p2_list 不是 P2 数组,p2_list={p2_list}") return ([ p.x for p in p2_list ], [ p.y for p in p2_list ]) @staticmethod def extract_x(p2_list: List['P2']) -> List[float]: """ see extract since v0.1.3 """ return P2.extract(p2_list)[0] @staticmethod def extract_y(p2_list: List['P2']) -> List[float]: """ see extract since v0.1.3 """ return P2.extract(p2_list)[1] class P3: """ 三维点 / 三维矢量 """ def __init__(self, x: float = 0.0, y: float = 0.0, z: float = 0.0): self.x = float(x) self.y = float(y) self.z = float(z) def length(self) -> float: """ 矢量长度 """ return math.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2) def normalize(self) -> "P3": """ 正则化,返回新矢量 """ return self * (1 / self.length()) def change_length(self, new_length: float) -> "P3": """ 改变长度,返回新矢量 """ return self.normalize() * new_length def copy(self) -> "P3": """ 拷贝 """ return P3(self.x, self.y, self.z) def __add__(self, another) -> "P3": """ 矢量相加 """ return P3(self.x + another.x, self.y + another.y, self.z + another.z) def __neg__(self) -> "P3": """ 相反矢量 """ return P3(-self.x, -self.y, -self.z) def __sub__(self, another) -> "P3": """ 矢量相减 """ return self.__add__(another.__neg__()) def __iadd__(self, another) -> "P3": """ 矢量原地相加 """ self.x += another.x self.y += another.y self.z += another.z return self def __isub__(self, another) -> "P3": """ 矢量原地减法 """ self.x -= another.x self.y -= another.y self.z -= another.z return self def __mul__(self, another: Union[float, int, "P3"]) -> Union[float, "P3"]: """ 矢量乘标量,各元素相乘,返回新矢量 矢量乘矢量,内积,返回标量 """ if isinstance(another, float) or isinstance(another, int): return P3(self.x * another, self.y * another, self.z * another) elif isinstance(another,P3): return self.x * another.x + self.y * another.y + self.z * another.z else: raise ValueError(f"{self}和{another}不支持乘法运算") def __rmul__(self, another: Union[float, int]) -> "P3": """ 当左操作数不支持相应的操作时被调用 """ return self.__mul__(another) def __truediv__(self, number: Union[int, float]) -> "P3": if isinstance(number, int) or isinstance(number, float): if number == 0 or number==0.0: raise ValueError(f"{self}/{number},除0异常") return self * (1 / number) else: raise ValueError("P2仅支持数字除法") def __matmul__(self, another: "P3") -> "P3": """ 矢量叉乘 / 外积,返回新矢量 """ return P3( self.y * another.z - self.z * another.y, -self.x * another.z + self.z * another.x, self.x * another.y - self.y * another.x, ) def __str__(self) -> str: """ 矢量信息 """ return f"({self.x}, {self.y}, {self.z})" def __repr__(self) -> str: """ 同 __str__ """ return self.__str__() def __eq__(self, another: "P3", err: float = 1e-6, msg: Optional[str] = None) -> bool: """ 矢量相等判断 """ if not isinstance(another,P3): raise ValueError(f"{another} 不是 P3 不能进行相等判断") if abs(self.x-another.x)<=err and abs(self.y-another.y)<=err and abs(self.z-another.z)<=err: return True else: if msg is None: return False else: raise AssertionError(msg) @staticmethod def x_direct(x: float = 1.0) -> "P3": """ 创建平行于 x 方向的矢量,或者 x 轴上的点 """ return P3(x=x) @staticmethod def y_direct(y: float = 1.0) -> "P3": """ 创建平行于 y 方向的矢量,或者 y 轴上的点 """ return P3(y=y) @staticmethod def z_direct(z: float = 1.0) -> "P3": """ 创建平行于 z 方向的矢量,或者 z 轴上的点 """ return P3(z=z) @staticmethod def origin() -> "P3": """ 返回坐标原点 """ return P3() @staticmethod def zeros() -> "P3": """ 返回零矢量 """ return P3() def to_list(self) -> List[float]: """ 点 (x,y,z) 转为数组 [x,y,z] """ return [self.x, self.y, self.z] @staticmethod def from_numpy_ndarry(ndarray: numpy.ndarray) -> Union["P3", List["P3"]]: """ 将 numpy 数组转为 P3 点或 P3 数组 根据 numpy 数组形状有不同的返回值 举例如下 array([1, 2, 3]) ==》P3 [1.0, 2.0, 3.0] array([[1], [2], [3]]) ==》P3 [1.0, 2.0, 3.0] array([[1, 2, 3], [4, 5, 6]]) ==》 List[P3] [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] """ if ndarray.shape == (3,) or ndarray.shape == (3, 1): return P3(ndarray[0], ndarray[1], ndarray[2]) elif len(ndarray.shape) == 2 and ndarray.shape[1] == 3: return [P3.from_numpy_ndarry(sub_array) for sub_array in ndarray] else: raise ValueError(f"无法将{ndarray}转为P3或List[P3]") def to_numpy_ndarry3(self, numpy_dtype=numpy.float64) -> numpy.ndarray: """ 点 (x,y,z) 转为 numpy 数组 [x,y,z] numpy_dtype 指定数据类型 refactor v0.1.1 新增数据类型 """ return numpy.array(self.to_list(), dtype=numpy_dtype) def to_p2( p, transformation: Callable[["P3"], P2] = lambda p3: P2(p3.x, p3.y) ) -> P2: """ 根据规则 transformation 将 P3 转为 P2 默认为抛弃 z 分量 """ return transformation(p) def populate(self, another: 'P3') -> None: """ 将 another 的值赋到 self 中 since v0.1.1 """ self.x = another.x self.y = another.y self.z = another.z @staticmethod def random() -> 'P3': """ 随机产生一个 P3 random.random() 返回随机生成的一个实数,它在[0,1)范围内。 since v0.1.1 """ return P3(random.random(), random.random(), random.random()) @staticmethod def as_p3(anything) -> 'P3': """ 伪类型转换 用于 IDE 智能提示 """ return anything @staticmethod def extract(p3_list: List['P3']) -> Tuple[List[float], List[float],List[float]]: """ 提取 P3 数组中的 x y z,各自组成数组 含义见 P2.extract() """ return ([ p.x for p in p3_list ], [ p.y for p in p3_list ],[ p.z for p in p3_list ]) @staticmethod def extract_x(p3_list: List['P3']) -> List[float]: """ 提取 P3 数组中的 x y z,各自组成数组 """ return [p.x for p in p3_list] @staticmethod def extract_y(p3_list: List['P3']) -> List[float]: """ 提取 P3 数组中的 x y z,各自组成数组 """ return [p.y for p in p3_list] @staticmethod def extract_z(p3_list: List['P3']) -> List[float]: """ 提取 P3 数组中的 x y z,各自组成数组 """ return [p.z for p in p3_list] class ValueWithDistance(Generic[T]): """ 辅助对象,带有距离的一个量,通常用于描述线上磁场分布 """ def __init__(self, value: T, distance: float) -> None: self.value: T = value self.distance: float = distance def __str__(self) -> str: """ 转为字符串 """ return f"({self.distance}:{self.value})" def __repr__(self) -> str: """ 同 __str__() """ return self.__str__() @staticmethod def convert_to_p2( data:Union["ValueWithDistance",List["ValueWithDistance"]], convertor:Callable[[T],float] )->Union[P2,List[P2]]: """ 将 ValueWithDistance 对象转为 P2 对象 其中 p2.x = distance """ if isinstance(data,ValueWithDistance): return P2(data.distance,convertor(data.value)) else: return [ P2(each.distance,convertor(each.value)) for each in data ]
class Solution: # @param A : list of list of integers # @param B : integer # @return an integer def searchMatrix(self,A, B): nRow = len(A) nCol = len(A[0]) #print("row, col",nRow, nCol) if(nRow == nCol and nRow == 1 and B == A[nRow-1][nCol-1]): return 1 found = 0 for i in range(nRow-1): # print("i" , i) if(B>A[i][nCol-1] and B<=A[i+1][nCol-1]): found = i+1 break if(self.binarySearch(A[found],B) == -1): return 0; else: return 1; def binarySearch(self, a,val): start = 0 end = len(a)-1 while(start<=end): mid = int(start + (end - start)/2) if(a[mid] == val): return mid if(val < a[mid] ): end = mid-1; if(val > a[mid]): start = mid+1 return -1
""" Models a boneyard -- a pile of dominoes. """ import domino as d import random """creates a list of 36 dominos""" def create(): yard = [] for i in range(0,7): for j in range(0, 7): tile = d.create(i, j) yard.append(tile) return yard """returns a random tile from the boneyard, removes that tile from boneyard list""" def draw(boneyard): n = random.randint(0, len(boneyard)-1) return boneyard.pop(n) """returns number of tiles remaining in the boneyard list""" def tiles_remaining(boneyard): return len(boneyard)
from setuptools import setup setup(name='pytest-demo', version='0.1', description='sample pytest tests and syntax', install_requires=[ 'allure-pytest', 'paramiko', 'paramiko-expect', 'pytest', 'requests', 'selenium', ], zip_safe=False)
import os import json import typing import logging import numpy as np from PIL import Image as PILImage from .logginglib import log_debug from .logginglib import log_error from .logginglib import get_logger from .exception_thread import ExceptionThread # from .config import PROGRAM_NAME # from .config import TIFF_IMAGE_TAGS_INDEX def _export_image_object_to_jpg(file_path: str, image: "Image") -> None: """Save the given image object to the given file_path as a JPG file. The if the file already exists, it will be overwritten silently. If the parent directory doesn't exist, or any error occurres, an Exception will be raised. Note that this function does not save the tags! Parameters: ----------- file_path : str The path of the file to save to including the file name and the extension, if the path already exists, it will silently be overwritten image : Image The image object to save """ # https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.fromarray, # mode defines whether RGB, RGBA, Grayscale, ... is used save_img = PILImage.fromarray(image.image_data, mode="L") save_img.save(file_path, format="jpeg", quality=100, optimize=False) def _export_image_object_to_tiff(file_path: str, image: "Image") -> None: """Save the given image object to the given file_path as a TIFF file. The if the file already exists, it will be overwritten silently. If the parent directory doesn't exist, or any error occurres, an Exception will be raised. Parameters: ----------- file_path : str The path of the file to save to including the file name and the extension, if the path already exists, it will silently be overwritten image : Image The image object to save """ # import as late as possible to allow changes by extensions from .config import PROGRAM_NAME from .config import TIFF_IMAGE_TAGS_INDEX # https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.fromarray, # mode defines whether RGB, RGBA, Grayscale, ... is used save_img = PILImage.fromarray(image.image_data, mode="L") save_img.save(file_path, format="tiff", # write tags as image description tiffinfo={TIFF_IMAGE_TAGS_INDEX: json.dumps(image.tags)}, compression="raw", software=PROGRAM_NAME) class Image: """This class represents an image. Attributes ---------- image_data : numpy.array_like The illumination data of the image, this has to be a 2d array with integer values between [0..255], so gray scale only is supported tags : dict Any tags that are related to this image, usually they contain the acquision circumstances export_extensions : dict A dict that contains the file extension (without dot) as the key and a callback as the value which is used for exporting the image to a file, the callback takes two arguments where the first one is a valid path (that should be overwritten if necessary), the second is the Image object """ export_extensions = { "jpg": _export_image_object_to_jpg, "jpeg": _export_image_object_to_jpg, "tif": _export_image_object_to_tiff, "tiff": _export_image_object_to_tiff } def __init__(self, image_data: typing.Any, tags: typing.Optional[dict]={}) -> None: """Create an image. Parameters ---------- image_data : numpy.array_like The illumination data of the image, this has to be a 2d array with integer values between [0..255], so gray scale only is supported tags : dict, optional Any tags that are related to this image, usually they contain the acquision circumstances """ from .config import PROGRAM_NAME self.image_data = np.array(image_data, dtype=np.uint8) self.tags = tags self.tags["recording program"] = PROGRAM_NAME self._logger = get_logger(self) def _executeSave(self, file_type: str, file_path: str) -> None: """Execute the save. Raises ------ ValueError When the file_type is not supported Parameters ---------- file_type : str The file type, this is the extension that defines the save type in lower case only file_path : str The file path to save the image to (including the extension), existing files will be silently overwritten """ log_debug(self._logger, ("Performing image save as '{}' to the " + "path '{}'").format(file_type, file_path)) if (file_type in self.export_extensions and callable(self.export_extensions[file_type])): self.export_extensions[file_type](file_path, self) else: err = ValueError(("The file extension {} is not " + "supported.").format(file_type)) log_error(self._logger, err) raise err def saveTo(self, file_path: str, overwrite: typing.Optional[bool]=True, create_directories: typing.Optional[bool]=False, file_type: typing.Optional[str]=None) -> ExceptionThread: """Save the image to the given file_path. Note that the saving is done in another thread. The thread will be started and then returned. If the file type is invalid, the thread will raise the exception. This is not tested before starting the thread. Raises ------ FileNotFoundError When create_directories is False and the directories do not exist FileExistsError When overwrite is False and the file exists already Exception When the extension save function raises an Error Parameters ---------- file_path : str A valid path where the image to save including the file name and the extension (if needed, the file_type paremeter is *never* appended to the file_path) overwrite : bool, optional Whether to overwrite the file_path if the file exists already, default: True create_directories : bool, optional Whether to create the directories of the file_path if they do not exist, default: False file_type : str or None None or 'auto' for automatic detection, the file_paths extension will be used, this can be used to change the file type to something else that the extension if the file_path, this can be any key of the `Image.export_extensions`, this is case-insensitive, default: None Returns ------- ExceptionThread The thread that is currently saving, the thread has started already """ file_path = os.path.abspath(file_path) save_dir = os.path.dirname(file_path) if not os.path.isdir(save_dir) or not os.path.exists(save_dir): if create_directories: log_debug(self._logger, "Creating save directory '{}'".format( save_dir)) os.makedirs(save_dir, exist_ok=True) else: err = FileNotFoundError(("The directory {} does not " + "exist.").format(save_dir)) log_error(self._logger, err) raise err if (not overwrite and os.path.isfile(file_path) and os.path.exists(file_path)): err = FileExistsError(("The file {} exists already and " + "overwriting is not allowed.").format(file_path)) log_error(self._logger, err) raise err if ((isinstance(file_type, str) and file_type.lower() == "auto") or file_type == None): _, file_type = os.path.splitext(file_path) if isinstance(file_type, str): if file_type.startswith("."): file_type = file_type[1:] file_type = file_type.lower() log_debug(self._logger, "Creating thread for saving image '{}'".format( file_path)) thread = ExceptionThread(target=self._executeSave, args=(file_type, file_path), name="save {}".format(os.path.basename(file_path))) log_debug(self._logger, "Starting thread") thread.start() return thread else: err = TypeError(("The file extension {} is not " + "supported.").format(file_type)) log_error(self._logger, err) raise err
#Author: Xing Cui #NetID: xc918 #Data: 12/3 import pandas as pd import numpy as np import matplotlib.pyplot as plt from assignment10_functions import * def generate_bar_plot(data, boro, NYC): """ This function is going to plot the number of restaurants in a boro with each grade overtime and save it to current dictory. """ year_list = [] for yr in data['GRADE DATE']: year_list.append(yr.split('/')[2]) data['YEAR'] = year_list if NYC == True: #run for restaurants in NYC takes time. Separate it from others can get other boros done first. summary = data.groupby(['YEAR', 'GRADE']).size().unstack() pd.DataFrame(summary).plot(kind = 'bar') plt.title('Grade improvement of restaurant is ' + boro) plt.savefig('grade_improvement_NYC.pdf',format = 'pdf') plt.close() elif NYC == False: boro_df = data[data['BORO'] == boro] summary = boro_df.groupby(['YEAR','GRADE']).size().unstack() pd.DataFrame(summary).plot(kind = 'bar') plt.title('Grade improvement of restaurants is ' + boro) plt.savefig('grade_improvement_' + boro + '.pdf',format = 'pdf') plt.close() else: raise KeyError print 'NYC here is boolean.'
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import importlib import logging import os import re import shutil import traceback from abc import abstractmethod from collections import namedtuple from contextlib import closing, contextmanager import requests from pants.fs.archive import archiver_for_path from pants.subsystem.subsystem import Subsystem from pants.util.contextutil import temporary_dir, temporary_file from pants.util.memo import memoized_method, memoized_property from pants.util.meta import AbstractClass from six.moves.urllib.parse import urlparse from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary logger = logging.getLogger(__name__) class Fetcher(AbstractClass): """Knows how to interpret some remote import paths and fetch code to satisfy them.""" class FetchError(Exception): """Indicates an error fetching remote code.""" @abstractmethod def root(self, import_path): """Returns the root of the given remote import_path. The root is defined as the portion of the remote import path indicating the associated package's remote location; ie: for the remote import path of 'github.com/docker/docker/daemon/events' it would be 'github.com/docker/docker'. Many remote import paths may share the same root; ie: all the 20+ docker packages hosted at https://github.com/docker/docker share the 'github.com/docker/docker' root. This is called the import-prefix in 'https://golang.org/cmd/go/#hdr-Remote_import_paths' :param string import_path: The remote import path to extract the root from. :returns: The root portion of the import path. :rtype: string """ @abstractmethod def fetch(self, import_path, dest, rev=None): """Fetches to remote library to the given dest dir. The dest dir provided will be an existing empty directory. :param string import_path: The remote import path to fetch. :param string rev: The version to fetch - may be `None` or empty indicating the latest version should be fetched. :param string dest: The path of an existing empty directory to extract package containing the remote library's contents to. :raises: :class:`Fetcher.FetchError` if there was a problem fetching the remote package. """ class Fetchers(Subsystem): """A registry of installed remote code fetchers.""" class AdvertisementError(Exception): """Indicates an error advertising a :class:`Fetcher`.""" class InvalidAdvertisement(AdvertisementError): """Indicates the type submitted for advertisement is not valid.""" class ConflictingAdvertisement(AdvertisementError): """Indicates a requested advertisement conflicts with an already-registered advertisement.""" _FETCHERS = {} @classmethod def _fully_qualified_class_name(cls, clazz): return '{}.{}'.format(clazz.__module__, clazz.__name__) @classmethod def advertise(cls, fetcher_class, namespace=None): """Advertises a :class:`Fetcher` class available for installation. Fetcher implementations need not be registered unless one of the following is true: 1. You wish to provide an alias to refer to the fetcher with for configuration. 2. The fetcher class is a :class:`pants.subsystem.subsystem.Subsystem` that needs to be available for further configuration of its own. Un-advertised Non-Subsystem fetchers can be configured by their fully qualified class names. If a namespace is supplied, the fetcher class will be registered both by its fully qualified class name (the default), and under an alias formed from the namespace dotted with the simple class name of the fetcher. If the supplied namespace is the empty string (''), then the alias becomes just the simple class name of the fetcher. For example, for the fetcher class `medium.pants.go.UUCPFetcher` and a namespace of 'medium' the registered alias would be 'medium.UUCPFetcher'. Supplying a namespace of '' would simply register 'UUCPFetcher'. In either case, the fully qualified class name of 'medium.pants.go.UUCPFetcher' would also be registered as an alias for the fetcher type. :param type fetcher_class: The :class:`Fetcher` subclass to advertise. :param string namespace: An optional string to prefix the `fetcher_class`'s simple class `__name__` with (<namespace>.<simple class name>). If the namespace is the emtpy string ('') then the `fetcher_class`'s simple class name becomes the full alias with no prefixing. :raises: :class:`Fetchers.InvalidAdvertisement` If the given fetcher_class is not a fetcher subclass. :raises: :class:`Fetchers.ConflictingAdvertisement` If the given alias is already used. """ # TODO(John Sirois): Find a sane way to map advertisements to documentation. We could dump # out a list of all the aliases and the class docstring of the aliased fetcher class for # example, but this could simply be too much output for command line help (which also does not # allow control over the help string formatting - notably newlines cannot be dictated). if not issubclass(fetcher_class, Fetcher): raise cls.InvalidAdvertisement('The {} type is not a Fetcher'.format(fetcher_class)) fully_qualified_fetcher_class_name = cls._fully_qualified_class_name(fetcher_class) cls._FETCHERS[fully_qualified_fetcher_class_name] = fetcher_class if namespace is not None: namespaced_key = ('{}.{}'.format(namespace, fetcher_class.__name__) if namespace else fetcher_class.__name__) if namespaced_key != fully_qualified_fetcher_class_name: existing_alias = cls._FETCHERS.get(namespaced_key) if existing_alias and existing_alias != fetcher_class: raise cls.ConflictingAdvertisement('Cannot advertise {} as {!r} which already aliases {}' .format(fetcher_class, namespaced_key, existing_alias)) cls._FETCHERS[namespaced_key] = fetcher_class @classmethod def alias(cls, fetcher_class): """Returns the most concise register alias for the given fetcher type. If no alias is registered, returns it's fully qualified class name. :param type fetcher_class: The fetcher class to look up an alias for. :raises: :class:`Fetchers.InvalidAdvertisement` if the given fetcher class is not a :class:`Fetcher`. """ # Used internally to find the shortest alias for a fetcher. aliases = sorted((alias for alias, clazz in cls._FETCHERS.items() if clazz == fetcher_class), key=lambda a: len(a)) if aliases: # Shortest alias is friendliest alias. return aliases[0] else: if not issubclass(fetcher_class, Fetcher): raise cls.InvalidAdvertisement('The {} type can have no alias since its not a Fetcher' .format(fetcher_class)) return cls._fully_qualified_class_name(fetcher_class) _DEFAULT_FETCHERS = {} @classmethod def _register_default(cls, regex, fetcher_class): # Used internally to register default shipped fetchers under their shortest alias for best # display in the command line help default. Should be called _after_ advertising an alias. # See the bottom of this file for the builtin advertisements and default registrations. aliases = sorted((alias for alias, clazz in cls._FETCHERS.items() if clazz == fetcher_class), key=lambda a: len(a)) alias = aliases[0] if aliases else cls._fully_qualified_class_name(fetcher_class) cls._DEFAULT_FETCHERS[regex] = alias @classmethod def subsystem_dependencies(cls): return tuple(f for f in set(cls._FETCHERS.values()) if issubclass(f, Subsystem)) options_scope = 'go-fetchers' deprecated_options_scope = 'fetchers' deprecated_options_scope_removal_version = '1.2.0' @classmethod def register_options(cls, register): # TODO(John Sirois): Introduce a fetchers option that assigns names to fetchers for re-use # in mapping below which will change from a dict to a list of 2-tuples (regex, named_fetcher). # This will allow for the user configuring a fetcher several different ways and then controlling # match order by placing fetchers at the head of the list to handle special cases before # falling through to more general matchers. # Tracked at: https://github.com/pantsbuild/pants/issues/2018 register('--mapping', metavar='<mapping>', type=dict, default=cls._DEFAULT_FETCHERS, advanced=True, help="A mapping from a remote import path matching regex to a fetcher type to use " "to fetch the remote sources. The regex must match the beginning of the remote " "import path; no '^' anchor is needed, it is assumed. The Fetcher types are " "fully qualified class names or else an installed alias for a fetcher type; " "I.e., the built-in 'contrib.go.subsystems.fetchers.ArchiveFetcher' is aliased " "as 'ArchiveFetcher'.") class GetFetcherError(Exception): """Indicates an error finding an appropriate Fetcher.""" class UnfetchableRemote(GetFetcherError): """Indicates no Fetcher claims the given remote import path.""" class InvalidFetcherError(GetFetcherError): """Indicates an invalid Fetcher type or an un-instantiable Fetcher.""" class InvalidFetcherModule(InvalidFetcherError): """Indicates the Fetcher's module cannot be imported.""" class InvalidFetcherClassName(InvalidFetcherError): """Indicates the given fetcher class name cannot be imported.""" class InvalidFetcherType(InvalidFetcherError): """Indicates the given fetcher type if not, in fact, a Fetcher.""" @classmethod def _fetcher(cls, name): fetcher_class = cls._FETCHERS.get(name) if fetcher_class: return fetcher_class fetcher_module, _, fetcher_class_name = name.rpartition('.') try: module = importlib.import_module(fetcher_module) except ImportError: traceback.print_exc() raise cls.InvalidFetcherModule('Failed to import fetcher {} from module {}' .format(name, fetcher_module)) if not hasattr(module, fetcher_class_name): raise cls.InvalidFetcherClassName('Failed to find fetcher class {} in module {}' .format(fetcher_class_name, fetcher_module)) fetcher_class = getattr(module, fetcher_class_name) if not issubclass(fetcher_class, Fetcher): raise cls.InvalidFetcherType('Fetcher {} must be a {}' .format(name, cls._fully_qualified_class_name(fetcher_class))) return fetcher_class @memoized_property def _fetchers(self): fetchers = [] for regex, fetcher in self.get_options().mapping.items(): matcher = re.compile(regex) fetcher_class = self._fetcher(fetcher) fetcher = (fetcher_class.global_instance() if issubclass(fetcher_class, Subsystem) else fetcher_class()) fetchers.append((matcher, fetcher)) return fetchers @memoized_method def maybe_get_fetcher(self, import_path): """Returns a :class:`Fetcher` capable of resolving the given remote import path. :param string import_path: The remote import path to fetch. :returns: A fetcher capable of fetching the given `import_path` or `None` if no capable fetcher was found. :rtype: :class:`Fetcher` """ for matcher, fetcher in self._fetchers: match = matcher.match(import_path) if match and match.start() == 0: return fetcher return None def get_fetcher(self, import_path): """Returns a :class:`Fetcher` capable of resolving the given remote import path. :param string import_path: The remote import path to fetch. :returns: A fetcher capable of fetching the given `import_path`. :rtype: :class:`Fetcher` :raises: :class:`Fetcher.UnfetchableRemote` if no fetcher is registered to handle the given import path. """ fetcher = self.maybe_get_fetcher(import_path) if not fetcher: raise self.UnfetchableRemote(import_path) return fetcher class ArchiveFetcher(Fetcher, Subsystem): """A fetcher that knows how find archives for remote import paths and unpack them.""" class UrlInfo(namedtuple('UrlInfo', ['url_format', 'default_rev', 'strip_level'])): def rev(self, rev): return rev or self.default_rev options_scope = 'go-archive-fetcher' deprecated_options_scope = 'archive-fetcher' deprecated_options_scope_removal_version = '1.2.0' _DEFAULT_MATCHERS = { r'bitbucket\.org/(?P<user>[^/]+)/(?P<repo>[^/]+)': UrlInfo(url_format='https://bitbucket.org/\g<user>/\g<repo>/get/{rev}.tar.gz', default_rev='tip', strip_level=1), r'github\.com/(?P<user>[^/]+)/(?P<repo>[^/]+)': UrlInfo(url_format='https://github.com/\g<user>/\g<repo>/archive/{rev}.tar.gz', default_rev='master', strip_level=1), r'golang\.org/x/(?P<repo>[^/]+)': UrlInfo(url_format='https://github.com/golang/\g<repo>/archive/{rev}.tar.gz', default_rev='master', strip_level=1), r'google\.golang\.org/.*': UrlInfo(url_format='{meta_repo_url}/+archive/{rev}.tar.gz', default_rev='master', strip_level=0), } @classmethod def register_options(cls, register): register('--matchers', metavar='<mapping>', type=dict, default=cls._DEFAULT_MATCHERS, advanced=True, help="A mapping from a remote import path matching regex to an UrlInfo struct " "describing how to fetch and unpack a remote import path. The regex must match " "the beginning of the remote import path (no '^' anchor is needed, it is " "assumed) until the first path element that is contained in the archive. (e.g. for " "'bazil.org/fuse/fs', which lives in the archive of 'bazil.org/fuse', it must match " "'bazil.org/fuse'.) The UrlInfo struct is a 3-tuple with the following slots:\n" "0. An url format string that is supplied to the regex match\'s `.template` " "method and then formatted with the remote import path\'s `rev`, `import_prefix`, " "and `pkg`.\n" "1. The default revision string to use when no `rev` is supplied; ie 'HEAD' or " "'master' for git. " "2. An integer indicating the number of leading path components to strip from " "files upacked from the archive. " "An example configuration that works against github.com is: " "{r'github.com/(?P<user>[^/]+)/(?P<repo>[^/]+)': " " ('https://github.com/\g<user>/\g<repo>/archive/{rev}.zip', 'master', 1)}") register('--buffer-size', metavar='<bytes>', type=int, advanced=True, default=10 * 1024, # 10KB in case jumbo frames are in play. help='The number of bytes of archive content to buffer in memory before flushing to ' 'disk when downloading an archive.') register('--retries', default=1, advanced=True, help='How many times to retry to fetch a remote library.') register('--prefixes', metavar='<paths>', type=list, advanced=True, fromfile=True, default=[], help="Known import-prefixes for go packages") @memoized_property def _matchers(self): matchers = [] for regex, info in self.get_options().matchers.items(): matcher = re.compile(regex) url_info = self.UrlInfo(*info) matchers.append((matcher, url_info)) return matchers @memoized_property def _prefixes(self): """Returns known prefixes of Go packages that are the root of archives.""" # The Go get meta protocol involves reading the HTML to find a meta tag with the name go-import # that lists a prefix. Knowing this prefix ahead of time allows the ArchiveFetcher to fetch # the archive. This is especially useful if running in an environment where there is no # network access other than to a repository of tarballs of the source. return self.get_options().prefixes @memoized_method def _matcher(self, import_path): for matcher, url_info in self._matchers: match = matcher.search(import_path) if match and match.start() == 0: return match, url_info raise self.FetchError("Don't know how to fetch {}".format(import_path)) def root(self, import_path): for prefix in self._prefixes: if import_path.startswith(prefix): return prefix match, _ = self._matcher(import_path) return match.string[:match.end()] def fetch(self, import_path, dest, rev=None, url_info=None, meta_repo_url=None): match, url_info = self._matcher(import_path) pkg = GoRemoteLibrary.remote_package_path(self.root(import_path), import_path) archive_url = match.expand(url_info.url_format).format( rev=url_info.rev(rev), pkg=pkg, import_prefix=self.root(import_path), meta_repo_url=meta_repo_url) try: archiver = archiver_for_path(archive_url) except ValueError: raise self.FetchError("Don't know how to unpack archive at url {}".format(archive_url)) with self._fetch(archive_url) as archive: if url_info.strip_level == 0: archiver.extract(archive, dest) else: with temporary_dir() as scratch: archiver.extract(archive, scratch) for dirpath, dirnames, filenames in os.walk(scratch, topdown=True): if dirpath != scratch: relpath = os.path.relpath(dirpath, scratch) relpath_components = relpath.split(os.sep) if len(relpath_components) == url_info.strip_level and (dirnames or filenames): for path in dirnames + filenames: src = os.path.join(dirpath, path) dst = os.path.join(dest, path) shutil.move(src, dst) del dirnames[:] # Stops the walk. @contextmanager def _fetch(self, url): parsed = urlparse(url) if not parsed.scheme or parsed.scheme == 'file': yield parsed.path else: with self._download(url) as download_path: yield download_path def session(self): session = requests.session() # Override default http adapters with a retriable one. retriable_http_adapter = requests.adapters.HTTPAdapter(max_retries=self.get_options().retries) session.mount("http://", retriable_http_adapter) session.mount("https://", retriable_http_adapter) return session @contextmanager def _download(self, url): # TODO(jsirois): Wrap with workunits, progress meters, checksums. logger.info('Downloading {}...'.format(url)) with closing(self.session().get(url, stream=True)) as res: if res.status_code != requests.codes.ok: raise self.FetchError('Failed to download {} ({} error)'.format(url, res.status_code)) with temporary_file() as archive_fp: # NB: Archives might be very large so we play it safe and buffer them to disk instead of # memory before unpacking. for chunk in res.iter_content(chunk_size=self.get_options().buffer_size): archive_fp.write(chunk) archive_fp.close() res.close() yield archive_fp.name class GopkgInFetcher(Fetcher, Subsystem): """A fetcher implementing the URL re-writing protocol of gopkg.in. The protocol rewrites a versioned remote import path scheme to a github URL + rev and delegates to the ArchiveFetcher to do the rest. The versioning URL scheme is described here: http://gopkg.in NB: Unfortunately gopkg.in does not implement the <meta/> tag re-direction scheme defined in `go help importpath` so we are forced to implement their re-direction protocol instead of using the more general <meta/> tag protocol. """ options_scope = 'gopkg-in' deprecated_options_scope = 'gopkg.in' deprecated_options_scope_removal_version = '1.2.0' @classmethod def subsystem_dependencies(cls): return (ArchiveFetcher,) @property def _fetcher(self): return ArchiveFetcher.global_instance() def root(self, import_path): user, package, raw_rev = self._extract_root_components(import_path) pkg = '{}.{}'.format(package, raw_rev) return 'gopkg.in/{}/{}'.format(user, pkg) if user else 'gopkg.in/{}'.format(pkg) # VisibleForTesting def _do_fetch(self, import_path, dest, rev=None): return self._fetcher.fetch(import_path, dest, rev=rev) def fetch(self, import_path, dest, rev=None, meta_repo_url=None): github_root, github_rev = self._map_github_root_and_rev(import_path, rev) self._do_fetch(github_root, dest, rev=rev or github_rev) # GitHub username rules allow us to bank on pkg.v1 being the package/rev and never a user. # Could not find docs for this, but trying to sign up as 'pkg.v1' on 11/17/2015 yields: # "Username may only contain alphanumeric characters or single hyphens, and cannot begin or end # with a hyphen." _USER_PACKAGE_AND_REV_RE = re.compile(r'(?:(?P<user>[^/]+)/)?(?P<package>[^/]+).(?P<rev>v[0-9]+)') @memoized_method def _extract_root_components(self, import_path): components = import_path.split('/', 1) domain = components.pop(0) if 'gopkg.in' != domain: raise self.FetchError('Can only fetch packages for gopkg.in, given: {}'.format(import_path)) match = self._USER_PACKAGE_AND_REV_RE.match(components[0]) if not match: raise self.FetchError('Invalid gopkg.in package and rev in: {}'.format(import_path)) user, package, raw_rev = match.groups() return user, package, raw_rev @memoized_method def _map_github_root_and_rev(self, import_path, rev=None): user, package, raw_rev = self._extract_root_components(import_path) user = user or 'go-{}'.format(package) rev = rev or self._find_highest_compatible(user, package, raw_rev) github_root = 'github.com/{user}/{pkg}'.format(user=user, pkg=package) logger.debug('Resolved {} to {} at rev {}'.format(import_path, github_root, rev)) return github_root, rev class ApiError(Fetcher.FetchError): """Indicates a compatible version could not be found due to github API errors.""" class NoMatchingVersionError(Fetcher.FetchError): """Indicates versions were found, but none matched.""" class NoVersionsError(Fetcher.FetchError): """Indicates no versions were found even there there were no github API errors - unexpected.""" def _find_highest_compatible(self, user, repo, raw_rev): candidates = set() errors = [] def collect_refs(search): try: return candidates.update(self._iter_refs(user, repo, search)) except self.FetchError as e: errors.append(e) collect_refs('refs/tags') highest_compatible = self._select_highest_compatible(candidates, raw_rev) if highest_compatible: return highest_compatible collect_refs('refs/heads') highest_compatible = self._select_highest_compatible(candidates, raw_rev) if highest_compatible: return highest_compatible # http://labix.org/gopkg.in defines the v0 fallback as master. if raw_rev == 'v0': return 'master' if len(errors) == 2: raise self.ApiError('Failed to fetch both tags and branches:\n\t{}\n\t{}' .format(errors[0], errors[1])) elif not candidates: raise self.NoVersionsError('Found no tags or branches for github.com/{user}/{repo} - this ' 'is unexpected.'.format(user=user, repo=repo)) elif errors: raise self.FetchError('Found no tag or branch for github.com/{user}/{repo} to match {rev}, ' 'but encountered an error while searching:\n\t{}', errors.pop()) else: raise self.NoMatchingVersionError('Found no tags or branches for github.com/{user}/{repo} ' 'compatible with {rev} amongst these refs:\n\t{refs}' .format(user=user, repo=repo, rev=raw_rev, refs='\n\t'.join(sorted(candidates)))) # VisibleForTesting def _do_get(self, url): res = self._fetcher.session().get(url) if res.status_code != requests.codes.ok: raise self.FetchError('Failed to scan for the highest compatible version of {} ({} error)' .format(url, res.status_code)) return res.json() def _do_get_json(self, url): try: return self._do_get(url) except requests.RequestException as e: raise self.FetchError('Failed to scan for the highest compatible version of {} ({} error)' .format(url, e)) def _iter_refs(self, user, repo, search): # See: https://developer.github.com/v3/git/refs/#get-all-references # https://api.github.com/repos/{user}/{repo}/git/refs/tags # https://api.github.com/repos/{user}/{repo}/git/refs/heads # [{"ref": "refs/heads/v1", ...}, ...] url = ('https://api.github.com/repos/{user}/{repo}/git/{search}' .format(user=user, repo=repo, search=search)) json = self._do_get_json(url) for ref in json: ref_name = ref.get('ref') if ref_name: components = ref_name.split(search + '/', 1) if len(components) == 2: prefix, raw_ref = components yield raw_ref class Match(namedtuple('Match', ['minor', 'patch', 'candidate'])): """A gopkg.in major version match that is suitable for simple sorting of highest match.""" def _select_highest_compatible(self, candidates, raw_rev): prefix = raw_rev + '.' matches = [] for candidate in candidates: if candidate == raw_rev: matches.append(self.Match(minor=0, patch=0, candidate=candidate)) elif candidate.startswith(prefix): rest = candidate[len(prefix):] xs = rest.split('.', 1) try: minor = int(xs[0]) patch = (0 if len(xs) == 1 else int(xs[1])) matches.append(self.Match(minor, patch, candidate)) except ValueError: # The candidates come from all tag and branch names in the repo; so there could be # 'vX.non_numeric_string' candidates that do not confirm to gopkg.in's 'vX.(Y.(Z))' # scheme and so we just skip past those. pass if not matches: return None else: match = max(matches, key=lambda match: match.candidate) return match.candidate # All builtin fetchers should be advertised and registered as defaults here, 1st advertise, # then register: Fetchers.advertise(GopkgInFetcher, namespace='') Fetchers._register_default(r'gopkg\.in/.*', GopkgInFetcher) Fetchers.advertise(ArchiveFetcher, namespace='') Fetchers._register_default(r'bitbucket\.org/.*', ArchiveFetcher) Fetchers._register_default(r'github\.com/.*', ArchiveFetcher) Fetchers._register_default(r'golang\.org/x/.*', ArchiveFetcher) Fetchers._register_default(r'google\.golang\.org/.*', ArchiveFetcher)