blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ba398639002ee6e73bb66baf9147410fb204a72 | d03c99cf4e11775ea7c0391623ebf4bba1e9963f | /week4/mostFrequentVisitors.py | c672781de863a6967cc6b81b2ef0ba3d1f8dc11e | [
"MIT"
] | permissive | avinashsc/w261 | ce939155bc7662594a5d17129ed96840d251f287 | fb0cf538015093496f87f0c07ecdb63db93d0094 | refs/heads/master | 2020-04-21T19:45:07.023063 | 2016-12-08T01:48:38 | 2016-12-08T01:48:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | #!/usr/bin/python
from mrjob.job import MRJob
from mrjob.step import MRStep
from mrjob.protocol import RawValueProtocol
import re
import operator
class mostFrequentVisitors(MRJob):
OUTPUT_PROTOCOL = RawValueProtocol
URLs = {}
def steps(self):
return [MRStep(
mapper = self.mapper,
combiner = self.combiner,
reducer_init = self.reducer_init,
reducer = self.reducer
)]
def mapper(self, _, line):
data = re.split(",",line)
pageID = data[1]
custID = data[4]
yield pageID,{custID:1}
def combiner(self,pageID,visits):
allVisits = {}
for visit in visits:
for custID in visit.keys():
allVisits.setdefault(custID,0)
allVisits[custID] += visit[custID]
yield pageID,allVisits
def reducer_init(self):
with open("anonymous-msweb.data", "r") as IF:
for line in IF:
try:
line = line.strip()
data = re.split(",",line)
URL = data[4]
pageID = data[1]
self.URLs[pageID] = URL
except IndexError:
pass
def reducer(self,pageID,visits):
allVisits = {}
for visit in visits:
for custID in visit.keys():
allVisits.setdefault(custID,0)
allVisits[custID] += visit[custID]
custID = max(allVisits.items(), key=operator.itemgetter(1))[0]
yield None,self.URLs[pageID]+","+pageID+","+custID+","+str(allVisits[custID])
if __name__ == '__main__':
mostFrequentVisitors.run() | [
"jason.sanchez@blueowl.xyz"
] | jason.sanchez@blueowl.xyz |
4ba29b5b0658607d45ec4cddc703e57f647803a2 | ee81efa621f8a18569d8ac00e5176aff1a736d86 | /kornislav.py | 37758a2ea9cf0c8c91688ee2dd1fcffcb9de4eeb | [] | no_license | renaldyresa/Kattis | c8b29f40a84f4161f49c6247abf10ec2ecc14810 | e504f54602b054eeffaac48b43e70beb976ca94c | refs/heads/master | 2021-12-01T14:57:57.614911 | 2021-11-29T07:44:43 | 2021-11-29T07:44:43 | 182,920,692 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | data = list(map(int, input().split()))
data.sort()
print(data[0]*data[2])
| [
"noreply@github.com"
] | renaldyresa.noreply@github.com |
45bb8fa9ff7cac2a617fde9225c823719d4b209b | 467c8b99507ccabd0d89042fb0510af88d387ec3 | /test/1.createShortcut/src/createShortcut.py | 21d3f05d98922c6071fe048b7a2fc3c39182bbeb | [] | no_license | Kosuke-Tomita/python | 5f5741a59ec097922a3d17decee4012578628dff | 8ffad74c912a2a08fe94947e29dee6434d6a1c03 | refs/heads/master | 2020-07-21T15:47:41.166210 | 2019-09-08T01:47:38 | 2019-09-08T01:47:38 | 206,912,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | import tkinter
from tkinter import messagebox as tkMessageBox
from tkinter import filedialog as tkFileDialog
import os
import sys
from pathlib import Path
import os.path
import win32com.client
import datetime
class FolderDialog:
def __init__(self):
pass
def showFD(self):
root=tkinter.Tk()
root.withdraw()
iDir='c:/'
dirPath=tkFileDialog.askdirectory(initialdir=iDir)
return dirPath
class ShortCut:
def __init__(self,parentFolderPath):
self.folderPath = parentFolderPath
def createShortCut(self,saveFilePath):
shell = win32com.client.Dispatch('WScript.shell')
shCut = shell.CreateShortcut(os.path.join(self.folderPath,os.path.basename(saveFilePath)+".lnk"))
shCut.TargetPath = saveFilePath
shCut.WindowStyle = 1
shCut.IconLocation = saveFilePath
shCut.WorkingDirectory = self.folderPath
shCut.Save()
#ショートカット作成のための親フォルダ取得
fd = FolderDialog()
fileFolderPath = fd.showFD()
if fileFolderPath in (None,''):
sys.exit()
#ショートカット保存フォルダ作成
dt = datetime.datetime.now()
dtStr = dt.strftime('%Y%m%d_%H%M%S_%f')[:-3]
saveFolderPath = os.path.join(os.getcwd(), 'shortcut' + '_' + dtStr)
os.mkdir(saveFolderPath)
#ショートカット作成
shortCut = ShortCut(saveFolderPath)
p = Path(fileFolderPath)
for filePath in list(p.iterdir()):
shortCut.createShortCut(str(filePath))
# for path in list(p.glob('*')):
# message += str(path) + '\n'
# if message != '':
# tkMessageBox.showinfo('FolderInfo', message)
# else:
# tkMessageBox.showinfo('FolderInfo', 'NoFile')
| [
"54474920+Kosuke-Tomita@users.noreply.github.com"
] | 54474920+Kosuke-Tomita@users.noreply.github.com |
5e53ab4f41caab7253868ca8b0b46d897ca04b95 | a6588f6e38e90286549851bf38c76339d98d5d1e | /PythonOOP/basic07.py | 248f5c9337e90b6db4f73aaac06cbaa3a4734f62 | [] | no_license | Kamonphet/BasicPython | 6c5213d7ec265e000370bb6b13eea8c2a84825ee | aa99bcbf0cf69dd61d7495a6e5f9ba608814a8cb | refs/heads/main | 2023-07-24T00:35:01.916593 | 2021-09-09T13:23:06 | 2021-09-09T13:23:06 | 403,143,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,479 | py | # Inheritance การสืบทอดคุณสมบัติ => การสร้างสิ่งใหม่ขึ้นด้วยการสืบทอดหรือรับเอา
# คุณสมบัติบางอย่างมากจากสิ่งเดิมที่มีอยู่แล้วโดยการสร้างเพิ่มเิมจากสิ่งที่มีอยู่แล้ว
# แบ่งเป็น superclass และ subclass
# superclass
# super() => เรียกใช้งานคุณสมบัติในsuperclass
# class Employee:
class Employee:
#class variable
_minSalary = 12000
_maxSalary = 50000
def __init__(self,name,salary,department):
# instance variable
self._name = name #protected
self.__salary = salary
self.__department = department
# protected medthod
def _showdata(self):
print("complete attribute")
print("name = {}".format(self._name))
print("salary = ",self.__salary)
print("Department = ",self.__department)
def _getIncome(self) :
return self.__salary*12
#แปลง obj เป็น str
def __str__(self) :
return ("EmployeeName = {} , Department = {} , SalaryPerYear = {}".format(self._name,self.__department,self._getIncome()))
# subclass
# class name(Employee):
class Accounting(Employee):
__departmentName = "แผนกบัญชี"
def __init__(self,name,salary):
super().__init__(name,salary,self.__departmentName)
class Programmer(Employee):
__departmentName = "แผนกพัฒนาระบบ"
def __init__(self,name,salary):
super().__init__(name,salary,self.__departmentName)
# super()._showdata()
class sale(Employee):
__departmentName = "แผนกขาย"
def __init__(self,name,salary):
super().__init__(name,salary,self.__departmentName)
# สร้างวัตถุ
obj1 = Employee("phet",50000,"Teacher")
obj2 = Employee("Flim",100000,"Bussines")
obj3 = Employee("Family",150000,"House")
account = Accounting('phet',40000)
programmer = Programmer('flim',60000)
Sale = sale('love',1000)
#เรียกใช้
# print(Employee._maxSalary)
# print(account._minSalary)
# account._showdata()
# print("Income = {}".format(account._getIncome()))
# print(account.__str__()) | [
"noreply@github.com"
] | Kamonphet.noreply@github.com |
2504fde740ea6ec4311efc8b5ea73256cae6680b | 0961b605531fa73cb88640b5978572217bdb6554 | /combinationSum.py | 178da555738c081ee937891c539841ec56526b69 | [] | no_license | LYoung-Hub/Algorithm-Data-Structure | e01d8b72c4026d9d4b9788016ca54c4e359e80ba | e42ec45d98f990d446bbf4f1a568b70855af5380 | refs/heads/master | 2020-07-13T17:17:42.897244 | 2019-11-11T06:15:59 | 2019-11-11T06:15:59 | 205,121,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | class Solution(object):
ans = []
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
length = len(candidates)
if length == 0:
return []
self.ans = []
comb = []
self.backTracking(candidates, target, comb)
return self.ans
def backTracking(self, nums, target, comb):
if target == 0:
self.ans.append(comb)
return
if target < 0:
return
for i in range(0, len(nums)):
self.backTracking(nums[i:], target - nums[i], comb + [nums[i]])
if __name__ == '__main__':
solu = Solution()
print solu.combinationSum([2, 3, 6, 7], 7)
| [
"yangliu2@caltech.edu"
] | yangliu2@caltech.edu |
33c45d1bea515ce8e7487f1aaae7632a3290ea23 | 7266cf77381267869aa39b6345666d148793f153 | /THETA/run_0.8tZ_0.2bW_0.0tH.py | 783200ff41e1e95fad152beac8cafdd97d339b2f | [] | no_license | justinrpilot/VLQAnalysis | 8a0ff20f96be58d5613dc1eec308e43d0a931f96 | 99d6295ef985ebdd8a66820b024750b890ab4aff | refs/heads/master | 2020-05-03T06:17:42.717548 | 2019-06-04T20:16:16 | 2019-06-04T20:16:16 | 178,469,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,000 | py | import json
def allhadfilter(hname):
#print hname
names = hname.split("__")
channel = names[0]
process = names[1]
#if ("tprime" not in process):
# return hname
keep = True
if (("sig800" in channel) and ("tprime800" not in process)):
keep = False
if (("sig1000" in channel) and ("tprime1000" not in process)):
keep = False
if (("sig1200" in channel) and ("tprime1200" not in process)):
keep = False
if (("sig1400" in channel) and ("tprime1400" not in process)):
keep = False
if (("sig1600" in channel) and ("tprime1600" not in process)):
keep = False
if (("sig1800" in channel) and ("tprime1800" not in process)):
keep = False
if 'diboson' in process and 'SF' in hname:
keep = False
#print channel, process, keep
if (not keep):
hname = hname.replace("__", "DONOTUSE")
#print hname
return hname
def build_model__ttbar_allhad(allhadfile, mcstat):
print "Using All-Had File: ", allhadfile
mod = build_model_from_rootfile(allhadfile, root_hname_to_convention=allhadfilter, include_mc_uncertainties=True)
mod.fill_histogram_zerobins()
mod.set_signal_processes("tprime*")
sf_t = 0.02
sf_W = 0.02
sf_Z = 0.02
sf_H = 0.02
sf_b = 0.02
sf_q = 0.02
for chan in mod.get_observables():
print chan
for p in mod.processes:
if 'qcd' in p: continue
tags = 0
mod.add_lognormal_uncertainty('lumi', 0.027, p, chan)
#if '1W' in chan:
# tags += 1
# mod.add_lognormal_uncertainty('W_SF', sf_W, p, chan)
#if '2W' in chan:
# tags += 2
# mod.add_lognormal_uncertainty('W_SF', 2*sf_W, p, chan)
#if '3W' in chan:
# tags += 3
# mod.add_lognormal_uncertainty('W_SF', 3*sf_W, p, chan)
#if '4W' in chan:
# tags += 4
# mod.add_lognormal_uncertainty('W_SF', 4*sf_W, p, chan)
#if '1t' in chan:
# tags += 1
# mod.add_lognormal_uncertainty('t_SF', sf_t, p, chan)
#if '2t' in chan:
# tags += 2
# mod.add_lognormal_uncertainty('t_SF', 2*sf_t, p, chan)
#if '3t' in chan:
# tags += 3
# mod.add_lognormal_uncertainty('t_SF', 3*sf_t, p, chan)
#if '4t' in chan:
# tags += 4
# mod.add_lognormal_uncertainty('t_SF', 4*sf_t, p, chan)
#if '1Z' in chan:
# tags += 1
# mod.add_lognormal_uncertainty('Z_SF', sf_Z, p, chan)
#if '2Z' in chan:
# tags += 2
# mod.add_lognormal_uncertainty('Z_SF', 2*sf_Z, p, chan)
#if '3Z' in chan:
# tags += 3
# mod.add_lognormal_uncertainty('Z_SF', 3*sf_Z, p, chan)
#if '4Z' in chan:
# tags += 4
# mod.add_lognormal_uncertainty('Z_SF', 4*sf_Z, p, chan)
#if '1H' in chan:
# tags += 1
# mod.add_lognormal_uncertainty('H_SF', sf_H, p, chan)
#if '2H' in chan:
# tags += 2
# mod.add_lognormal_uncertainty('H_SF', 2*sf_H, p, chan)
#if '3H' in chan:
# tags += 3
# mod.add_lognormal_uncertainty('H_SF', 3*sf_H, p, chan)
#if '4H' in chan:
# tags += 4
# mod.add_lognormal_uncertainty('H_SF', 4*sf_H, p, chan)
#if '1b' in chan:
# tags += 1
# mod.add_lognormal_uncertainty('b_SF', sf_b, p, chan)
#if '2b' in chan:
# tags += 2
# mod.add_lognormal_uncertainty('b_SF', 2*sf_b, p, chan)
#if '3b' in chan:
# tags += 3
# mod.add_lognormal_uncertainty('b_SF', 3*sf_b, p, chan)
#if '4b' in chan:
# tags += 4
# mod.add_lognormal_uncertainty('b_SF', 4*sf_b, p, chan)
#mod.add_lognormal_uncertainty('q_SF', (4-tags)*sf_q, p, chan)
#mod.add_lognormal_uncertainty('xsec_ttbar', 0.05, 'ttbar', chan)
mod.add_lognormal_uncertainty('xsec_wjets', 0.10, 'wjets', chan)
mod.add_lognormal_uncertainty('xsec_zjets', 0.10, 'zjets', chan)
mod.add_lognormal_uncertainty('xsec_diboson', 0.50, 'diboson', chan)
mod.add_lognormal_uncertainty('xsec_ttV', 0.50, 'ttV', chan)
mod.add_lognormal_uncertainty('xsec_higgs', 0.50, 'higgs', chan)
return mod
infile = "templates/theta4jet_0.8tz_0.2bw_0.0th.root"
model = build_model__ttbar_allhad(infile, True)
#model_summary(model, create_plots=True, all_nominal_templates=False, shape_templates=True)
opts = Options()
options = Options()
options.set('minimizer', 'strategy', 'robust')
options.set('minimizer', 'minuit_tolerance_factor', '10000000')
#runs = bayesian_quantiles(model, input='toys:0', n=1000, run_theta=False, hint_method='zero')
#runs_data = bayesian_quantiles(model, input='data', n=10, run_theta=False, hint_method='zero')
# results = bayesian_limits(model, input='toys:0', n=10, run_theta = True, **args)
#print results
#for sig in model.signal_process_groups:
# print sig, runs[sig]
# run = runs[sig]
# run_data = runs_data[sig]
# thisOptions = Options()
# run.get_configfile(thisOptions)
# run_data.get_configfile(thisOptions)
expected, observed = bayesian_limits(model, 'all', n_toy = 500, run_theta = True)
expected.write_txt("limitsJAN_0.8tz_0.2bw_0.0th.txt")
observed.write_txt("obslimitsJAN_0.8tz_0.2bw_0.0th.txt")
#parameter_values_nom = {}
#for p in model.get_parameters([]):
# parameter_values_nom['beta_signal'] = 1.0
# parameter_values_nom[p] = 0.0
#histos = evaluate_prediction(model, parameter_values_nom, include_signal = True)
#out_histos_dict = dict()
#for channel in histos:
# out_histos_dict[channel] = dict()
# for hist in histos[channel]:
# out_histos_dict[channel].update( {hist : list(histos[channel][hist].get_values())} )
#with open('histos.json', 'w') as file:
# file.write(json.dumps(out_histos_dict))
#discovery(model, use_data=False, maxit=50, n = 100, n_expected = 100 )
# pVals = pvalue(model, input='toys:1.0', n = 50, options=options, bkgtoys_n = 100, bkgtoys_n_runs = 3)
# print pVal
# options = Options()
# options.set('minimizer', 'strategy', 'robust')
# options.set('minimizer', 'minuit_tolerance_factor', '10000000')
# parVals = mle(model, input='toys:0', n=10, options = options)
# print parVals
# parameter_values = {}
# parameter_values_nom = {}
# for p in model.get_parameters([]):
# parameter_values[p] = parVals['zpn3000'][p][0][0]
# parameter_values_nom[p] = 0.0#parVals['zpn3000'][p][0][0]
# histos = evaluate_prediction(model, parameter_values, include_signal = False)
# write_histograms_to_rootfile(histos, 'histos-mle.root')
# histos = evaluate_prediction(model, parameter_values_nom, include_signal = False)
# write_histograms_to_rootfile(histos, 'histos-nom.root')
# exp.write_txt("limits_combo_test.txt")
# obs.write_txt("limits_obs_combo_test.txt")
# o_file = open('limits.txt', 'w')
# for i in range(len(exp.x)):
# o_file.write( '%.2f %.5f' % (exp.x[i], exp.y[i]))
# o_file.write(' %.5f %.5f' % (exp.bands[1][1][i], exp.bands[1][0][i]))
# o_file.write(' %.5f %.5f' % (exp.bands[0][1][i], exp.bands[0][0][i]))
# o_file.write(' %.5f' % (obs.y[i] if obs else -1.))
# o_file.write('\n')
# o_file.close()
| [
"pilot@Justins-MacBook-Air-4.local"
] | pilot@Justins-MacBook-Air-4.local |
55e3891a2fa5e9360b2ff65099e45176087373ca | d8ec14c780f7536a099b6c4f03461ac546d54d6c | /helga_excuses.py | 6f6e408b60364668e4888aac2a13ec87064d9e81 | [] | no_license | alfredodeza/helga-excuses | bdcc0621a505fc9abd2cdc79c020bd7aa694c5a9 | 251c155affaf7d3412a65fe1cb572e6e565d8864 | refs/heads/master | 2021-01-10T19:11:16.352300 | 2013-12-10T23:48:43 | 2013-12-10T23:48:43 | 14,873,521 | 1 | 0 | null | 2013-12-10T23:48:44 | 2013-12-02T20:15:25 | Python | UTF-8 | Python | false | false | 416 | py | import requests
from BeautifulSoup import BeautifulSoup
from helga.plugins import command
@command('excuses', aliases=['excuse'],
help='Show something from developer excuses. Usage: helga (excuses|excuse)')
def excuses(client, channel, nick, message, cmd, args):
response = requests.get('http://developerexcuses.com/')
return BeautifulSoup(response.text).find('a').text.encode('ascii', 'ignore')
| [
"shaun.duncan@gmail.com"
] | shaun.duncan@gmail.com |
366b74b3e20dae7c6b4d55ed70cd676b8ed2c615 | 7a3ac2f27b2afb16d7e872d07d019b81be597417 | /NonAdditivePartition.py | f564c2f374087a55d26293fe03c95accefa7333b | [] | no_license | TropicalMaster/Master | 8519cc555a87ef5a9d1076ae3cd987e791215c6e | eb682eff7e13e967ad7a37f5c145895a96da4396 | refs/heads/main | 2023-04-13T14:35:55.910513 | 2021-04-18T20:23:14 | 2021-04-18T20:23:14 | 359,234,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,038 | py | from sympy import *
from math import *
import copy
from finiteField import *
from random import randint
class PartitionCodes():
def __init__(self, n, k, s, FF):
self.n = n
self.k = k
self.s = s
self.FF = FF
self.q = FF.q
self.p = FF.p
# Define I to be any subspace of F_q expressed in exponent representation
# of F_q^m
element = [0]
element.extend([0 for i in range(n-1)])
element = self.FF.invext(element,True)
self.I = [element]
self.basis = [[i] for i in range(n)]
# Create q-Vandermonde matrix of basis (Transposed Moore matrix)
# Shifted because [i] = q^i in this case, but still holds
self.M = self.qvan(self.basis,self.n)
# Matrix inversion
self.M_inv = self.FF.inv(copy.deepcopy(self.M))
# Create a q-vandermonde matrix of input elements
# a: vector containing alpha element degrees of length n
# s: the number of rows in sxn matrix, the range of q-degrees to raise the a elements to
def qvan(self,a,s):
# Initializing the matrix
matrix = []
# Going through the range s
for i in range(s):
row = []
# Going through each element in the "a" vector
for elem in a:
if len(elem) != 0:
elem = elem[0]
# Raising the existing element degrees to q^i (* because "a" elements are given in degrees)
elem = (elem * (self.q**(self.s*i))) % self.p
row.append([elem])
else:
row.append([])
# Append the row to the matrix
matrix.append(row)
return matrix
# Calculate the norm
def norm(self,a):
deg = int((self.q**(2*self.n) - 1)/(self.q-1)) % self.p
if len(a):
norm = [(a[0]*deg) % self.p]
return norm
else:
return []
def modifiedBM(self,g):
r = 0
L = 0
# Initializing polynomials with q-degree 0
Lambda = [[0],[0]]
B = [[0],[0]]
while r <= len(g)-1:
# Find delta_r as g_r + sum
delta_r = g[r].copy()
# Sum of Lambda coefficients multiplied with g coefficients
for i in range(L):
# Check whether g[r-i] is zero
if len(g[r-(i+1)]):
coeff = [(Lambda[0][i+1] + g[r-(i+1)][0]*self.q**(self.s*(i+1))) % self.p]
delta_r = self.FF.add(delta_r,coeff,"+")
# Condition
if len(delta_r) == 0:
B = self.FF.composite([0],B[0],[self.s],B[1])
else:
# Copy current Lambda
Lambda_temp = copy.deepcopy(Lambda)
# Find lambda - delta*x^[1]*B
composite = self.FF.composite(delta_r,B[0],[self.s],B[1])
Lambda = self.FF.addComp(Lambda[0],composite[0],Lambda[1],composite[1],"-")
if 2*L > r:
B = self.FF.composite([0],B[0],[self.s],B[1])
else:
# Multiply Lambda_temp with the inverse of delta_r
for i in range(len(Lambda_temp[0])):
Lambda_temp[0][i] = (Lambda_temp[0][i] - delta_r[0]) % self.p
# Define new B polynomial as Lambda_temp
B = copy.deepcopy(Lambda_temp)
# Increase L
L = r + 1 - L
r = r + 1
# Negate coefficients because of relation
for i in range(len(Lambda[0])):
Lambda[0][i] = self.FF.add([],[Lambda[0][i]],"-")[0]
return [L, Lambda, B]
def partialBM(self,L,Lambda,B,r,g):
# Find delta_r as g_r + sum
delta_r = [g[r][0]]
# Sum of Lambda coefficients multiplied with g coefficients
for i in range(L):
# Check whether g[r-i] is zero
if len(g[r-(i+1)]):
coeff = [(Lambda[0][i+1] + g[r-(i+1)][0]*self.q**(self.s*(i+1))) % self.p]
delta_r = self.FF.add(delta_r,coeff,"+")
# Condition
if len(delta_r) == 0:
B = self.FF.composite([0],B[0],[self.s],B[1])
else:
# Copy current Lambda
Lambda_temp = copy.deepcopy(Lambda)
# Find lambda - delta*x^[1]*B
composite = self.FF.composite(delta_r,B[0],[self.s],B[1])
Lambda = self.FF.addComp(Lambda[0],composite[0],Lambda[1],composite[1],"-")
return L,Lambda,B
def PartitionEncoding(self,r):
# Change received word according to condition
if self.norm(r[0]) not in self.I:
if (self.k+1) % 2 == 0:
r.append(r[0].copy())
else:
r.append(self.FF.add([],r[0],"-"))
r[0] = []
codeword = self.FF.Codeword(r,self.M)
return codeword
def PartitionDecoding(self,f):
beta = self.FF.Codeword(f,self.M_inv)
if beta[self.k+1:self.n] == [[] for i in range(self.k+1,self.n)]:
print("Decoded word", beta)
return beta
# Berlekamp-Massey algorithm on coefficients from k+1 to 2n from beta
t0, Lambda, B = self.modifiedBM(beta[self.k+1:self.n])
g = copy.deepcopy(beta)
g_sols = []
lambda_vectors = []
if t0 == int((self.n-self.k)/2):
t,Lambda,B = self.modifiedBM(g[self.k:self.n])
# Find g_0
g_0 = []
for i in range(1,t+1):
coeff = [(Lambda[0][i] + g[self.n-i][0]*self.q**(self.s*i)) % self.p]
g_0 = self.FF.add(g_0,coeff,"+")
# Check if norm(g[0]-g_0) in I
if self.norm(self.FF.add(g[0],g_0,"-")) in self.I:
# Add to solution set
lambda_vectors.append(Lambda[0].copy())
g_sols.append(copy.deepcopy(beta))
# Create a copy of g, and add g_0 at the end to find BM(g_k+1,g_n+1)
g_temp = copy.deepcopy(g)
g_temp.append(g[0].copy())
t,Lambda,B = self.modifiedBM(g_temp[self.k+1:self.n+1])
# Find g_k
g_k_temp = []
for i in range(1,t):
coeff = [(Lambda[0][i] + g[self.k+t-i][0]*self.q**(self.s*i)) % self.p]
g_k_temp = self.FF.add(g_k_temp,coeff,"+")
g_kt = [(g[self.k+t][0]*self.q**(self.s*(self.n+t))) % self.p]
g_k = self.FF.add(g_kt,g_k_temp,"-")
g_k = [(g_k[0]-Lambda[0][t]) % self.p]
norm = self.norm(self.FF.add(g[self.k],g_k,"-"))
# Negate norm element if nks is odd
if (self.n*self.k*self.s) % 2 != 0:
norm = self.FF.add([],norm,"-")
# Check norm(g[k]-g_k) not in I
if norm not in self.I:
# Negate according to encoding
if (self.k+1) % 2 != 0:
g_k = self.FF.add([],g_k,"-")
# Add to solution set
g[self.k] = g_k
lambda_vectors.append(Lambda[0].copy())
g_sols.append(g)
else:
lambda_vectors.append(Lambda[0].copy())
g_sols.append(g)
for l in range(len(g_sols)):
g = g_sols[l]
lambda_vector = lambda_vectors[l]
# Check periodicity
check = 0
checklimit = 3
for i in range(self.n+checklimit):
g_i = []
for j in range(1,t0+1):
# Subscript of g is i-j % 2n
k = (i-j) % (self.n)
if len(g[k]):
coeff = [(lambda_vector[j] + g[k][0]*self.q**(j*self.s)) % self.p]
g_i = self.FF.add(g_i, coeff,"+")
if i < self.n:
g[i] = g_i
else:
if g_i == g[i % self.n]:
check += 1
if check == checklimit:
# Recover the codeword elements
c = []
for i in range(self.n):
e_i = []
for j in range(self.n):
if len(g[j]):
coeff = [(g[j][0] + self.basis[i][0]*self.q**(j*self.s)) % self.p]
e_i = self.FF.add(e_i, coeff,"+")
c_i = self.FF.add(f[i],e_i,"-")
c.append(c_i)
print("Decoded codeword ",c,"\n")
return c
else:
if l == 2:
print("Decoding Failure")
return "Decoding Failure"
| [
"noreply@github.com"
] | TropicalMaster.noreply@github.com |
7765cc67a607b9556d7c75470b892c02b3fe5707 | f208676788a901f4b66fa0a5809ef5563c1d5471 | /classy_vision/hooks/classy_hook.py | ad5c0a900f8643ca8ed1f247fd4a4e113ac37853 | [
"MIT"
] | permissive | cwb96/ClassyVision | 10e47703ec3989260840efe22db94720122f9e66 | 597a929b820efdd914cd21672d3947fa9c26d55e | refs/heads/master | 2021-02-18T03:35:51.520837 | 2020-03-05T05:41:24 | 2020-03-05T05:43:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,321 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Dict
from classy_vision import tasks
class ClassyHookState:
"""Class to store state within instances of ClassyHook.
Any serializable data can be stored in the instance's attributes.
"""
def get_classy_state(self) -> Dict[str, Any]:
return self.__dict__
def set_classy_state(self, state_dict: Dict[str, Any]):
self.__dict__ = state_dict
class ClassyHook(ABC):
"""Base class for hooks.
Hooks allow to inject behavior at different places of the training loop, which
are listed below in the chronological order.
on_start -> on_phase_start ->
on_step -> on_phase_end -> on_end
Deriving classes should call ``super().__init__()`` and store any state in
``self.state``. Any state added to this property should be serializable.
E.g. -
.. code-block:: python
class MyHook(ClassyHook):
def __init__(self, a, b):
super().__init__()
self.state.a = [1,2,3]
self.state.b = "my_hook"
# the following line is not allowed
# self.state.my_lambda = lambda x: x^2
"""
def __init__(self):
self.state = ClassyHookState()
def _noop(self, *args, **kwargs) -> None:
"""Derived classes can set their hook functions to this.
This is useful if they want those hook functions to not do anything.
"""
pass
@classmethod
def name(cls) -> str:
"""Returns the name of the class."""
return cls.__name__
@abstractmethod
def on_start(self, task: "tasks.ClassyTask") -> None:
"""Called at the start of training."""
pass
@abstractmethod
def on_phase_start(
self, task: "tasks.ClassyTask", local_variables: Dict[str, Any]
) -> None:
"""Called at the start of each phase."""
pass
@abstractmethod
def on_step(self, task: "tasks.ClassyTask") -> None:
"""Called each time after parameters have been updated by the optimizer."""
pass
@abstractmethod
def on_phase_end(
self, task: "tasks.ClassyTask", local_variables: Dict[str, Any]
) -> None:
"""Called at the end of each phase (epoch)."""
pass
@abstractmethod
def on_end(self, task: "tasks.ClassyTask") -> None:
"""Called at the end of training."""
pass
def get_classy_state(self) -> Dict[str, Any]:
"""Get the state of the ClassyHook.
The returned state is used for checkpointing.
Returns:
A state dictionary containing the state of the hook.\
"""
return self.state.get_classy_state()
def set_classy_state(self, state_dict: Dict[str, Any]) -> None:
"""Set the state of the ClassyHook.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the hook from a checkpoint.
"""
self.state.set_classy_state(state_dict)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
610f110f48d5d27f54a0bc5be7c5258c9bb94d12 | ddddc401695a23f595e42e25abfb16baa3da82ba | /educode/apps.py | bda8e94ca6e930c4de9057d5acc31779b253fc79 | [
"BSD-3-Clause"
] | permissive | harshavardhan26082001/MovieApp | bf6a60dd03b0101ec7d0104414270bec610fff04 | cc0e9b743b3f73322af93997901c3c24265ec870 | refs/heads/main | 2023-05-07T19:03:53.862739 | 2021-05-27T05:52:42 | 2021-05-27T05:52:42 | 370,947,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.apps import AppConfig
class EducodeConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'educode'
| [
"harsha26082001@gmail.com"
] | harsha26082001@gmail.com |
bf020a5eba52f22d341b35e48d8b8451ab99b48b | 977724d5e811c5b54963908022752e3e3517d30e | /main.py | abc855110c8863f56aa8d3ef9792b6d1d220de75 | [] | no_license | naem1023/CNN_DS | 5f7ec702a3aebbf0866f73eb3995825a0ca7b528 | 5a686efa3858346339d5d6c5c0cb930a8fc498a3 | refs/heads/master | 2023-03-28T11:24:05.523790 | 2021-04-01T10:20:31 | 2021-04-01T10:20:31 | 322,156,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,585 | py | import read_train_file
import model
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import pandas as pd
import time
from multiprocessing import Process, Lock, Queue, Pool
import multiprocessing
from tqdm import tqdm
from tqdm import trange
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D
from keras.utils import to_categorical
from tensorflow.keras import initializers
from sklearn.model_selection import StratifiedShuffleSplit
import platform
def plot_loss_curve(history):
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 10))
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
def train_model(X_train, X_test, y_train, y_test, model):
X_train = X_train.reshape(X_train.shape[0], 300, 300, 3)
X_test = X_test.reshape(X_test.shape[0], 300, 300, 3)
print("X_train.shape=", X_train.shape)
print("y_train.shape", y_train.shape)
print("X_test.shape=", X_test.shape)
print("y_test.shape", y_test.shape)
# print(y_train[0])
'''
softmax layer -> output=10개의 노드. 각각이 0부터 9까지 숫자를 대표하는 클래스
이를 위해서 y값을 one-hot encoding 표현법으로 변환
0: 1,0,0,0,0,0,0,0,0,0
1: 0,1,0,0,0,0,0,0,0,0
...
5: 0,0,0,0,0,1,0,0,0,0
'''
# reformat via one-hot encoding
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# print(y_train[0])
# catergorical_crossentropy = using when multi classficiation
# metrics = output data type
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# batch_size : see batch_size data and set delta in gradient decsending
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=16, epochs=30, verbose=1)
plot_loss_curve(history.history)
# print(history.history)
print("train loss=", history.history['loss'][-1])
print("validation loss=", history.history['val_loss'][-1])
# save model in file
# offering in KERAS
model.save('model-201611263.model')
history_df = pd.DataFrame(history.history)
with open("history_data.csv", mode='w') as file:
history_df.to_csv(file)
return model
def get_class_name(n):
if n == 0:
return "food"
elif n == 1:
return "interior"
elif n == 2:
return "exterior"
def predict_image_sample(model, X_test, y_test, n):
from random import randrange
correct_count = 0;
wrong_count = 0
for idx in range(n):
if correct_count == 2 and wrong_count == 2:
break
test_sample_id = randrange(len(X_test))
test_image = X_test[test_sample_id]
test_image = test_image.reshape(1, 300, 300, 3)
# get answer
y_actual = y_test[test_sample_id]
# get prediction list
y_pred = model.predict(test_image)
# get prediction
y_pred = np.argmax(y_pred, axis=1)
# true, prediction is right
if y_pred == y_actual and correct_count <= 2:
plt.imshow(test_image[0])
plt.show()
print("==right prediction==")
print("y_actual number=", y_actual)
print("y_actual class=", get_class_name(y_actual))
# 3 dimensiong
print("y_pred number=", y_pred)
print("y_pred number=", get_class_name(y_pred))
print()
correct_count += 1
elif y_pred != y_actual and wrong_count <= 2:
plt.imshow(test_image[0])
plt.show()
print("==wrong prediction==")
print("y_actual number=", y_actual)
print("y_actual class=", get_class_name(y_actual))
# 3 dimensiong
print("y_pred number=", y_pred)
print("y_pred number=", get_class_name(y_pred))
print()
wrong_count += 1
'''
if y_pred != y_actual:
print("sample %d is wrong!" %test_sample_id)
with open("wrong_samples.txt", "a") as errfile:
print("%d"%test_sample_id, file=errfile)
else:
print("sample %d is correct!" %test_sample_id)
'''
def shuffle_and_valdiate(X, y):
print("start split and shuffle!")
shuffle_split = StratifiedShuffleSplit(train_size=0.7, test_size=0.3, n_splits=1, random_state=0)
for train_idx, test_idx in tqdm(shuffle_split.split(X, y)):
X_train, X_test = X[train_idx], X[test_idx]
y_train, y_test = y[train_idx], y[test_idx]
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, shuffle=True, random_state=42)
print(X_train.shape)
print(X_test.shape)
return X_train, X_test, y_train, y_test
def get_image():
image_dir = 'images'
file_number = len(os.listdir(os.path.join(image_dir)))
print(file_number)
# np.zeros((300, 300, 3))
# X = np.zeros((file_number, 300, 300, 3), dtype=int)
# #
# y = np.zeros((file_number), dtype=int)
X = list()
y = list()
for image_name in tqdm(os.listdir(os.path.join(image_dir))):
image = cv2.imread(os.path.join(image_dir, image_name))
if image_name[:4] == "food":
y.append(0)
# y[idx] = 0
elif image_name[:8] == 'interior' :
y.append(1)
# y[idx] = 1
elif image_name[:8] == 'exterior':
y.append(2)
# y[idx] = 2
X.append(image)
# X[idx] = image
start_time = time.time()
print("read complete")
X = np.array(X)
y = np.array(y)
end_time = time.time()
print("convert image to numpy time = ", end_time - start_time)
print("converting complete")
print(X.shape)
print(y.shape)
start_time = time.time()
X_train, X_test, y_train, y_test = shuffle_and_valdiate(X, y)
end_time = time.time()
print("shuffle image time = ", end_time - start_time)
# read_train_file.write_data(X_train, X_test, y_train, y_test)
return X_train, X_test, y_train, y_test
def make_common_model():
model = Sequential([
Input(shape=(300, 300, 3), name='input_layer'),
# size of parameter = n_filters * (filter_size + 1) = 32*(9+1) = 320
# using 32 filter
# filter size is 3
Conv2D(64, kernel_size=(1, 1)),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(32, kernel_size=(3, 3)),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, kernel_size=(1, 1)),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(32, kernel_size=(3, 3)),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, kernel_size=(1, 1)),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(32, kernel_size=(3, 3)),
BatchNormalization(),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(24, activation='relu'),
Dropout(0.5),
Dense(3, activation='softmax', name='output_layer')
])
model.summary()
return model
def make_resnet_model():
model = Sequential()
model.add(Input(shape=(300, 300, 3), name='input_layer'),)
model.add(ZeroPadding2D(padding=(3,3)))
model.add(Conv2D(32, (10, 10), strides=2, kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(MaxPooling2D((2, 2), strides=1, padding='same'))
model.add(Conv2D(32, (1, 1), strides=1, padding='valid',
kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), strides=1, padding='same', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
# model.add(MaxPooling2D((2, 2), strides=1, padding='same'))
model.add(Conv2D(32, (1, 1), strides=2, padding='valid', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), strides=1, padding='same',
kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), strides=1, padding='valid', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
# model.add(MaxPooling2D((2, 2), strides=1, padding='same'))
# model.add(Conv2D(8, (1, 1), strides=1, padding='same', activation='relu', kernel_initializer='he_normal'))
# model.add(Flatten())
# model.add(Dense(8, activation='relu'))
# model.add(Dropout(0.5))
model.add(GlobalAveragePooling2D())
model.add(Dense(3, activation='softmax', name='output_layer'))
model.summary()
return model
if __name__ == '__main__':
print(platform.architecture()[0])
# import mnist
#
# mnist.train_mnist()
all_start_time = time.time()
start_time = time.time()
# set tensorflow
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(session)
# model = make_resnet_model()
model = model.model_resnet()
# model = make_common_model()
# get train and test data
X_train, X_test, y_train, y_test = get_image()
print("Get all image")
end_time = time.time()
print("read image time = ", end_time - start_time)
# X_train, X_test, y_train, y_test = read_train_file.read_data()
# print("Read all image")
# start_time = time.time()
model = train_model(X_train, X_test, y_train, y_test, model)
model = load_model('model-201611263.model')
predict_image_sample(model, X_test, y_test, 500)
end_time = time.time()
all_end_time = time.time()
print("train elapsed time = ", end_time - start_time)
print("all elapsed time = ", all_end_time - all_start_time)
| [
"relilau00@gmail.com"
] | relilau00@gmail.com |
b83067167ec0f4fa218892d23cdf72fb3e4791d4 | bf92c350a1799ac463bad046e8eea3271aeb7b15 | /3ds/tests/screen-metrics.py | 2ffeb6e74a166976529130f118d5f1990d13fa5c | [
"MIT"
] | permissive | ObsidianX/3ds_monty | 72065dbdfe9890c1a02bb0988aa0a5dd591a8cc6 | 8dc10ca4874175dffcb9c95d1e294c74f3c47ee2 | refs/heads/master | 2021-09-24T18:12:10.556030 | 2021-09-18T10:32:51 | 2021-09-18T10:32:51 | 56,961,916 | 20 | 6 | MIT | 2021-09-18T10:32:52 | 2016-04-24T08:30:45 | C | UTF-8 | Python | false | false | 742 | py | from citrus import *
import sf2d
gfx.init_default()
top = console.PrintConsole(gfx.SCREEN_TOP, window=(2, 2, 46, 26))
metrics = {
"top_width": sf2d.screen['top']['width'],
"top_height": sf2d.screen['top']['height'],
"bot_width": sf2d.screen['bottom']['width'],
"bot_height": sf2d.screen['bottom']['height']
}
print("""Screen metrics:
Top screen:
width = %(top_width)s
height = %(top_height)s
Bottom screen:
width = %(bot_width)s
height = %(bot_height)s""" % metrics)
top.set_position(13, 25)
print('Press Start to exit')
while apt.main_loop():
hid.scan_input()
if hid.keys_down() & hid.KEY_START:
break
gfx.flush_buffers()
gfx.swap_buffers()
gsp.wait_for_vblank()
gfx.exit() | [
"obsidianx@gmail.com"
] | obsidianx@gmail.com |
fcf69ccc998f4adb76624b38832b6312065d0673 | d1160216bfbeb13a0f8356d5fcf70a6588be0d2b | /server.py | 3b1c24fb2388ddabcc729992f682dcea6bc679ac | [] | no_license | afabijan/hypflask | e32770d036385aec096c7567a48a551e3df45354 | d4a292361a98b83f78ee579a32dada34abd18dc7 | refs/heads/master | 2021-01-10T14:56:18.854402 | 2015-06-05T13:50:30 | 2015-06-05T13:50:30 | 36,935,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | from flask import Flask, render_template, request
import mymodule
app = Flask(__name__)
app.debug = True
@app.route("/")
def index():
name="Aleksander"
return render_template('index.html',name=name)
#retunrs all data
@app.route("/data/")
def alldata():
return mymodule.process()
#an example of 1 paramter pass
@app.route("/data/date=<date>")
def data(date=0):
return str(date)
#return mymodule.process()
#an example of x paramter pass
@app.route("/datapost", methods=['GET','POST'])
def datapost():
arguemtns = request.get_json()
return "DATA:"
if __name__ == "__main__":
app.run()
| [
"aleksander.fabijan@me.com"
] | aleksander.fabijan@me.com |
3cfa851500fb84c304d21eae3159205368643a00 | d54afd55df19afffa98dd767b812f24d204e9d1b | /Querying SQLite from Python-256.py | 6d038843eb0505f231aed39453320b21ab9c8ce2 | [] | no_license | nemkothari/Sql-Fundamentals | b7666a8ff64525a8cb02afccede2aa47178c7eb3 | 99bc7be798bbe88eef2af12eea5a9851d9e38914 | refs/heads/master | 2020-04-10T15:39:40.572504 | 2018-12-10T04:39:59 | 2018-12-10T04:39:59 | 161,117,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | ## 3. Connecting to the Database ##
import sqlite3
conn = sqlite3.connect('jobs.db')
## 6. Creating a Cursor and Running a Query ##
import sqlite3
conn = sqlite3.connect("jobs.db")
cursor = conn.cursor()
query = "select Major from recent_grads;"
cursor.execute(query)
majors = cursor.fetchall()
print(majors[0:2])
## 8. Fetching a Specific Number of Results ##
import sqlite3
conn = sqlite3.connect("jobs.db")
qry =" select Major , Major_category from recent_grads"
five_results=conn.execute(qry).fetchmany(5)
## 9. Closing the Database Connection ##
conn = sqlite3.connect("jobs.db")
conn.close()
## 10. Practice ##
import sqlite3
con = sqlite3.connect("jobs2.db")
qrt= 'select Major from recent_grads order by Major desc'
reverse_alphabetical = conn.execute(qrt).fetchall()
con.close() | [
"noreply@github.com"
] | nemkothari.noreply@github.com |
d71cabbf96f5623e576964651f4821ca2a0f0d60 | 89ed212f9fc3554b70e8785d7230d0835f47e68d | /unorganized_code/kp_single_molecule.py | c8a3a7bd60c98d8a195232e346fecf043f5f6d16 | [
"MIT"
] | permissive | rganti/Channel_Capacity_T_Cell | b2499fe62631aae9e4fdd7cd2d0382fb1180ed33 | 62b9cba7a4248287598d06c010dcfcc4601a7006 | refs/heads/master | 2023-03-29T15:10:36.609260 | 2021-04-01T16:18:43 | 2021-04-01T16:18:43 | 275,271,929 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,800 | py | import argparse
from realistic_network import TcrCycleSelfWithForeign, KPRealistic, make_and_cd
from simulation_parameters import MembraneBindingParameters
from ssc_tcr_membrane import MembraneSharedCommands
class SingleMoleculeKprLs(TcrCycleSelfWithForeign):
def __init__(self, arguments=None):
TcrCycleSelfWithForeign.__init__(self, arguments=arguments)
self.rate_constants = MembraneBindingParameters()
del self.n_initial['Lf']
del self.n_initial['Ls']
self.record = []
self.output = []
self.diffusion_flag = True
def change_ligand_concentration(self, concentration):
for i in range(concentration):
name = "Ls{0}".format(i + 1)
self.n_initial[name] = 1
# self.record.append(name)
self.output.append(name)
self.k_L_off[name] = self.rate_constants.k_self_off
class SingleMoleculeKprLsLf(SingleMoleculeKprLs):
def __init__(self, arguments=None):
SingleMoleculeKprLs.__init__(self, arguments=arguments)
self.lf = 3
for i in range(self.lf):
name = "Lf{0}".format(i + 1)
self.n_initial[name] = 1
# self.record.append(name)
self.output.append(name)
self.k_L_off[name] = self.rate_constants.k_foreign_off
class KPRealisticSingleMolecule(KPRealistic):
def __init__(self, self_foreign=False, arguments=None):
KPRealistic.__init__(self, self_foreign=self_foreign, arguments=arguments)
if self.self_foreign_flag:
self.ligand = SingleMoleculeKprLsLf(arguments=arguments)
else:
self.ligand = SingleMoleculeKprLs(arguments=arguments)
self.single_molecule = True
def define_diffusion(self, f):
for key in self.ligand.diffusion_rate_dict.keys():
if self.ligand.diffusion_loc_dict[key] == "Plasma":
f.write("diffusion {0} at {1} in {2}\n".format(key, self.ligand.diffusion_rate_dict[key],
self.ligand.diffusion_loc_dict[key]))
else:
f.write(
"diffusion {0} at {1} in {2}, Cytosol<->Plasma\n".format(key, self.ligand.diffusion_rate_dict[key],
self.ligand.diffusion_loc_dict[key]))
def generate_ssc_script(self, simulation_name):
script_name = simulation_name + ".rxn"
shared = MembraneSharedCommands(self.ligand.n_initial, self.ligand.record, self.ligand.diffusion_loc_dict)
f = open(script_name, "w")
n = open("ordered_network", "w")
self.regions.define_membrane_region(f)
f.write("-- Forward reactions \n")
n.write("# Forward Reactions \n")
self.define_reactions(f, self.ligand.forward_rxns, self.ligand.forward_rates, n)
n.write("\n# Reverse Reactions \n")
f.write("\n-- Reverse reactions \n")
self.define_reactions(f, self.ligand.reverse_rxns, self.ligand.reverse_rates, n)
f.write("\n")
if self.ligand.diffusion_flag:
f.write("\n-- Diffusion \n")
self.define_diffusion(f)
f.write("\n")
shared.initialize(f)
f.write("\n")
shared.record_species(f)
n.close()
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Submitting job for single step KPR",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--run', action='store_true', default=False,
help='Flag for submitting simulations.')
parser.add_argument('--ss', action='store_true', default=False,
help="flag for checking if sims approach steady-state.")
parser.add_argument('--steps', dest='steps', action='store', type=int, default=0,
help="number of KP steps.")
parser.add_argument('--ls', action='store_true', default=False,
help="flag for submitting Ls calculations.")
parser.add_argument('--ls_lf', dest='ls_lf', action='store', type=int, default=3,
help="number of foreign ligands.")
args = parser.parse_args()
directory_name = "{0}_step".format(args.steps)
make_and_cd(directory_name)
if args.ls:
sub_directory = "Ls"
make_and_cd(sub_directory)
kp = KPRealisticSingleMolecule(arguments=args)
elif args.ls_lf:
sub_directory = "Ls_Lf"
# make_and_cd(sub_directory)
kp = KPRealisticSingleMolecule(self_foreign=True, arguments=args)
else:
raise Exception("Need to specify Ls or Ls_Lf")
kp.main_script(run=args.run)
| [
"rg468@cam.ac.uk"
] | rg468@cam.ac.uk |
b30f5537526f3f947a1bf63a0c422307f10c78e4 | 8b495f11fe76002342304092bd04338dcdee2378 | /tests/metasploit/test.py | 6e78227fc7040098fcb7e69447004d7b61e0a6b4 | [
"BSD-3-Clause"
] | permissive | cacalote/ptp | bd88c6ba253f4fb8d4a339730cda41f7b29ade39 | 720e9f8b33a15eb9ff88f858a68c99f30e37982e | refs/heads/master | 2020-09-15T13:29:49.671521 | 2015-05-20T15:41:02 | 2015-05-20T15:41:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,644 | py | from __future__ import print_function
import os
import traceback
from ptp import PTP
from ptp.libptp.constants import UNKNOWN, INFO, LOW, MEDIUM, HIGH
__testname__ = 'metasploit'
REPORTS = {
# Scanner
'auxiliary/scanner/ftp/anonymous': {
'report_low.txt': LOW,
'report_high.txt': HIGH,
},
'auxiliary/scanner/ftp/ftp_version': {
'report_info.txt': INFO,
'report_info2.txt': INFO,
},
'auxiliary/scanner/ftp/ftp_login': {
'report_low.txt': LOW,
'report_high.txt': HIGH,
},
'auxiliary/scanner/smtp/smtp_enum': {
'report_low.txt': LOW,
'report_low2.txt': LOW,
},
'auxiliary/scanner/vnc/vnc_login': {
'report_high.txt': HIGH,
'report_high2.txt': HIGH,
},
'auxiliary/scanner/vnc/vnc_none_auth': {
'report_high.txt': HIGH,
},
'auxiliary/scanner/x11/open_x11': {
'report_high.txt': HIGH,
'report_high2.txt': HIGH,
},
# TODO: Add report examples for EMC AlphaStor.
'auxiliary/scanner/mssql/mssql_ping': {
'report_info.txt': INFO,
'report_info2.txt': INFO,
'report_info3.txt': INFO,
},
'auxiliary/scanner/mssql/mssql_login': {
'report_high.txt': HIGH,
'report_high2.txt': HIGH,
},
'auxiliary/scanner/mssql/mssql_hashdump': {
'report_high.txt': HIGH,
},
# TODO: Add report examples for MSSQL Schema dump.
# TODO: Add report examples for DCERPC endpoint mapper.
# TODO: Add report examples for DCERPC hidden.
'auxiliary/scanner/smb/smb_version': {
'report_info.txt': INFO,
'report_info2.txt': INFO,
'report_info3.txt': INFO,
},
'auxiliary/scanner/smb/pipe_auditor': {
'report_info.txt': INFO,
'report_info2.txt': INFO,
},
'auxiliary/scanner/smb/smb_enumusers': {
'report_info.txt': INFO,
'report_info2.txt': INFO,
'report_info3.txt': INFO,
},
'auxiliary/scanner/smb/smb_login': {
'report_high.txt': HIGH,
'report_high2.txt': HIGH,
'report_unknown.txt': UNKNOWN,
},
'auxiliary/scanner/snmp/snmp_enumusers': {
'report_low.txt': LOW,
},
# FIXME: Fix the snmp_enumshares signature.
#'auxiliary/scanner/snmp/snmp_enumshares': {
# 'report_low.txt': LOW,
#},
# TODO: Add report examples for SNMP enums.
# TODO: Add report examples for SNMP AIX version.
'auxiliary/scanner/snmp/snmp_login': {
'report_low.txt': LOW,
'report_high.txt': HIGH,
'report_high2.txt': HIGH,
}
}
def run():
try:
reports = REPORTS.iteritems()
except AttributeError: # Python3
reports = REPORTS.items()
for plugin, outputs in reports:
print('\t> %s' % plugin)
for output in outputs:
ptp = PTP('metasploit')
print('\t\ttest parse():', end=' ')
res = 'OK'
try:
ptp.parse(
pathname=os.path.join(
os.getcwd(),
'tests/metasploit/',
plugin),
filename=output,
plugin=plugin)
except Exception:
print(traceback.format_exc())
res = 'FAIL'
print(res)
print('\t\ttest get_highest_ranking():', end=' ')
res = 'OK'
try:
assert ptp.get_highest_ranking() == outputs[output]
except Exception:
print(traceback.format_exc())
res = 'FAIL'
print(res)
| [
"sauvage.tao@gmail.com"
] | sauvage.tao@gmail.com |
c4116cf331ef22b2e9039e73422782228e1bf95c | e4f2374a50cfdc674ba8e97fa6616b2fb11f40d0 | /function_store.py | 6284927d14856e44faba396c172a1075b3202852 | [] | no_license | flying-pi/functionPatching | 9eabc7613dabc8537a8bc3386750014859ac29ff | b488b9ab9f43d6a2715dd24aec652bf582f0ffd3 | refs/heads/master | 2021-05-15T17:01:38.550572 | 2017-10-19T15:59:13 | 2017-10-19T15:59:13 | 107,568,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from peta_module import peta_function
def adder(a):
return peta_function(a, 10)
| [
"yura.braiko@raccoongang.com"
] | yura.braiko@raccoongang.com |
55f4a31aab12f7a1297431badf330562a268656e | 1d2e8da3aa4aa845dfd246adf1fb16568c6e21fe | /bakery/venv/bin/pip2 | b35d65ae05505ed7d6725acf4252ae497a18b037 | [] | no_license | stanislaw-rzewuski/REST-automation-demo | 732e5fb846c8ae1d0ba9b5ca0462f96fc33828ef | 00842d798c211f5eec11fd2dc9642d6cc0ac7a97 | refs/heads/master | 2020-07-02T13:32:17.956593 | 2019-09-01T23:45:26 | 2019-09-01T23:45:26 | 201,534,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | #!/home/stan/repos/REST-automation-demo/bakery/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"stanislaw.rzewuski@gmail.com"
] | stanislaw.rzewuski@gmail.com | |
6a12d1accfa480e4b12e2edf24372e30ecff77b8 | 7775d1f4db482114f734645f23a64fef1ef7e724 | /model/ESTRNN.py | 7ac59c92438ab761b5fd37a87b2257adbba998d8 | [
"MIT"
] | permissive | RunqiuBao/Event_ESTRNN | f325820413cfc938cec420a7c88123b605e9e1e7 | 6d156cc42a3a33bd0b4b7c4c4be98f943ff53acb | refs/heads/master | 2023-06-16T20:21:04.992942 | 2021-07-11T02:26:12 | 2021-07-11T02:26:12 | 360,787,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,918 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from thop import profile
from .arches import conv1x1, conv3x3, conv5x5, actFunc
# Dense layer
class dense_layer(nn.Module):
def __init__(self, in_channels, growthRate, activation='relu'):
super(dense_layer, self).__init__()
self.conv = conv3x3(in_channels, growthRate)
self.act = actFunc(activation)
def forward(self, x):
out = self.act(self.conv(x))
out = torch.cat((x, out), 1)
return out
# Residual dense block
class RDB(nn.Module):
def __init__(self, in_channels, growthRate, num_layer, activation='relu'):
super(RDB, self).__init__()
in_channels_ = in_channels
modules = []
for i in range(num_layer):
modules.append(dense_layer(in_channels_, growthRate, activation))
in_channels_ += growthRate
self.dense_layers = nn.Sequential(*modules)
self.conv1x1 = conv1x1(in_channels_, in_channels)
def forward(self, x):
out = self.dense_layers(x)
out = self.conv1x1(out)
out += x
return out
# Middle network of residual dense blocks
class RDNet(nn.Module):
def __init__(self, in_channels, growthRate, num_layer, num_blocks, activation='relu'):
super(RDNet, self).__init__()
self.num_blocks = num_blocks
self.RDBs = nn.ModuleList()
for i in range(num_blocks):
self.RDBs.append(RDB(in_channels, growthRate, num_layer, activation))
self.conv1x1 = conv1x1(num_blocks * in_channels, in_channels)
self.conv3x3 = conv3x3(in_channels, in_channels)
def forward(self, x):
out = []
h = x
for i in range(self.num_blocks):
h = self.RDBs[i](h)
out.append(h)
out = torch.cat(out, dim=1)
out = self.conv1x1(out)
out = self.conv3x3(out)
return out
# DownSampling module
class RDB_DS(nn.Module):
def __init__(self, in_channels, growthRate, num_layer, activation='relu'):
super(RDB_DS, self).__init__()
self.rdb = RDB(in_channels, growthRate, num_layer, activation)
self.down_sampling = conv5x5(in_channels, 2 * in_channels, stride=2)
def forward(self, x):
# x: n,c,h,w
x = self.rdb(x)
out = self.down_sampling(x)
return out
# Global spatio-temporal attention module
class GSA(nn.Module):
def __init__(self, para):
super(GSA, self).__init__()
self.n_feats = para.n_features
self.center = para.past_frames
self.num_ff = para.future_frames
self.num_fb = para.past_frames
self.related_f = self.num_ff + 1 + self.num_fb
self.F_f = nn.Sequential(
nn.Linear(2 * (5 * self.n_feats), 4 * (5 * self.n_feats)),
actFunc(para.activation),
nn.Linear(4 * (5 * self.n_feats), 2 * (5 * self.n_feats)),
nn.Sigmoid()
)
# out channel: 160
self.F_p = nn.Sequential(
conv1x1(2 * (5 * self.n_feats), 4 * (5 * self.n_feats)),
conv1x1(4 * (5 * self.n_feats), 2 * (5 * self.n_feats))
)
# condense layer
self.condense = conv1x1(2 * (5 * self.n_feats), 5 * self.n_feats)
# fusion layer
self.fusion = conv1x1(self.related_f * (5 * self.n_feats), self.related_f * (5 * self.n_feats))
def forward(self, hs):
# hs: [(n=4,c=80,h=64,w=64), ..., (n,c,h,w)]
self.nframes = len(hs)
f_ref = hs[self.center]
cor_l = []
for i in range(self.nframes):
if i != self.center:
cor = torch.cat([f_ref, hs[i]], dim=1)
w = F.adaptive_avg_pool2d(cor, (1, 1)).squeeze() # (n,c) : (4, 160)
if len(w.shape) == 1:
w = w.unsqueeze(dim=0)
w = self.F_f(w)
w = w.reshape(*w.shape, 1, 1)
cor = self.F_p(cor)
cor = self.condense(w * cor)
cor_l.append(cor)
cor_l.append(f_ref)
out = self.fusion(torch.cat(cor_l, dim=1))
return out
# RDB-based RNN cell
class RDBCell(nn.Module):
def __init__(self, para):
super(RDBCell, self).__init__()
self.activation = para.activation
self.n_feats = para.n_features
self.n_blocks = para.n_blocks
self.F_B0 = conv5x5(3, self.n_feats, stride=1)
self.F_B1 = RDB_DS(in_channels=self.n_feats, growthRate=self.n_feats, num_layer=3, activation=self.activation)
self.F_B2 = RDB_DS(in_channels=2 * self.n_feats, growthRate=int(self.n_feats * 3 / 2), num_layer=3,
activation=self.activation)
self.F_R = RDNet(in_channels=(1 + 4) * self.n_feats, growthRate=2 * self.n_feats, num_layer=3,
num_blocks=self.n_blocks, activation=self.activation) # in: 80
# F_h: hidden state part
self.F_h = nn.Sequential(
conv3x3((1 + 4) * self.n_feats, self.n_feats),
RDB(in_channels=self.n_feats, growthRate=self.n_feats, num_layer=3, activation=self.activation),
conv3x3(self.n_feats, self.n_feats)
)
def forward(self, x, s_last):
out = self.F_B0(x)
out = self.F_B1(out)
out = self.F_B2(out)
out = torch.cat([out, s_last], dim=1)
out = self.F_R(out)
s = self.F_h(out)
return out, s
# Reconstructor
class Reconstructor(nn.Module):
def __init__(self, para):
super(Reconstructor, self).__init__()
self.para = para
self.num_ff = para.future_frames
self.num_fb = para.past_frames
self.related_f = self.num_ff + 1 + self.num_fb
self.n_feats = para.n_features
self.model = nn.Sequential(
nn.ConvTranspose2d((5 * self.n_feats) * (self.related_f), 2 * self.n_feats, kernel_size=3, stride=2,
padding=1, output_padding=1),
nn.ConvTranspose2d(2 * self.n_feats, self.n_feats, kernel_size=3, stride=2, padding=1, output_padding=1),
conv5x5(self.n_feats, 3, stride=1)
)
def forward(self, x):
return self.model(x)
class Model(nn.Module):
"""
Efficient saptio-temporal recurrent neural network (ESTRNN, ECCV2020)
"""
def __init__(self, para):
super(Model, self).__init__()
self.para = para
self.n_feats = para.n_features
self.num_ff = para.future_frames
self.num_fb = para.past_frames
self.ds_ratio = 4
self.device = torch.device('cuda')
self.cell = RDBCell(para)
self.recons = Reconstructor(para)
self.fusion = GSA(para)
def forward(self, x, profile_flag=False):
if profile_flag:
return self.profile_forward(x)
outputs, hs = [], []
batch_size, frames, channels, height, width = x.shape
s_height = int(height / self.ds_ratio)
s_width = int(width / self.ds_ratio)
# forward h structure: (batch_size, channel, height, width)
s = torch.zeros(batch_size, self.n_feats, s_height, s_width).to(self.device)
for i in range(frames):
h, s = self.cell(x[:, i, :, :, :], s)
hs.append(h)
for i in range(self.num_fb, frames - self.num_ff):
out = self.fusion(hs[i - self.num_fb:i + self.num_ff + 1])
out = self.recons(out)
outputs.append(out.unsqueeze(dim=1))
return torch.cat(outputs, dim=1)
# For calculating GMACs
def profile_forward(self, x):
outputs, hs = [], []
batch_size, frames, channels, height, width = x.shape
s_height = int(height / self.ds_ratio)
s_width = int(width / self.ds_ratio)
s = torch.zeros(batch_size, self.n_feats, s_height, s_width).to(self.device)
for i in range(frames):
h, s = self.cell(x[:, i, :, :, :], s)
hs.append(h)
for i in range(self.num_fb + self.num_ff):
hs.append(torch.randn(*h.shape).to(self.device))
for i in range(self.num_fb, frames + self.num_fb):
out = self.fusion(hs[i - self.num_fb:i + self.num_ff + 1])
out = self.recons(out)
outputs.append(out.unsqueeze(dim=1))
return torch.cat(outputs, dim=1)
def feed(model, iter_samples):
inputs = iter_samples[0]
outputs = model(inputs)
return outputs
def cost_profile(model, H, W, seq_length):
x = torch.randn(1, seq_length, 3, H, W).cuda()
profile_flag = True
flops, params = profile(model, inputs=(x, profile_flag), verbose=False)
return flops / seq_length, params
| [
"zzh.tech@gmail.com"
] | zzh.tech@gmail.com |
52c940283704d2f43f630be09f5c5b68923fc333 | 7032fd0d1652cc1bec1bff053af4f486a5704cd5 | /old/OpenExrId_1.0-beta.17/conanfile.py | dcb74bafee3fb8349547a73fae2331e0d04f1a9f | [] | no_license | MercenariesEngineering/conan_recipes | c8f11ddb3bd3eee048dfd476cdba1ef84b85af5e | 514007facbd1777799d17d041fc34dffef61eff8 | refs/heads/master | 2023-07-09T08:10:35.941112 | 2023-04-19T13:36:38 | 2023-04-19T13:36:38 | 169,575,224 | 7 | 1 | null | 2023-04-19T14:11:35 | 2019-02-07T13:23:02 | C++ | UTF-8 | Python | false | false | 2,256 | py | from conans import ConanFile, CMake, tools
import os, shutil
#conan remote add conan-transit https://api.bintray.com/conan/conan/conan-transit
#conan remote add hulud https://api.bintray.com/conan/hulud/libs
#conan remote add pierousseau https://api.bintray.com/conan/pierousseau/libs
class OpenEXRIdConan(ConanFile):
name = "OpenExrId"
version = "1.0-beta.17"
license = "MIT"
url = "https://github.com/MercenariesEngineering/openexrid"
description = "OpenEXR files able to isolate any object of a CG image with a perfect antialiazing "
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False] }
default_options = "shared=False","*:shared=False","fPIC=True"
generators = "cmake"
def requirements(self):
# From our recipes :
self.requires("zlib/1.2.11@pierousseau/stable")
self.requires("IlmBase/2.2.0@pierousseau/stable")
self.requires("OpenEXR/2.2.0@pierousseau/stable")
self.requires("re2/2019-06-01@pierousseau/stable")
self.requires("OpenImageIO/1.6.18@pierousseau/stable")
def configure(self):
if self.settings.os == "Linux":
# fPIC option exists only on linux
self.options["boost"].fPIC=True
self.options["IlmBase"].fPIC=True
self.options["OpenEXR"].fPIC=True
self.options["OpenImageIO"].fPIC=True
self.options["re2"].fPIC=True
self.options["zlib"].fPIC=True
def source(self):
self.run("git clone http://github.com/MercenariesEngineering/openexrid.git --branch %s" % self.version)
def build(self):
cmake = CMake(self)
#cmake.verbose = True
cmake.definitions["USE_CONAN"] = True
cmake.definitions["BUILD_LIB"] = True
cmake.definitions["BUILD_PLUGINS"] = False
cmake.configure(source_dir="%s/openexrid" % self.source_folder)
cmake.build()
def package(self):
self.copy("*.h", dst="include/openexrid", src="openexrid/openexrid")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| [
"rousseau@mercenaries-engineering.com"
] | rousseau@mercenaries-engineering.com |
74d21d6d9cafd0126d88ab224385192c6153a0aa | 15e820be4636b1f95c962ed5d63d52d4abacc8dc | /Tutorial/Flask/4_CameraStreaming/camera.py | 4aba9a540cb41e882da9d45986107c480c032966 | [] | no_license | hashimotodaisuke/PlantFactoryTutorial | c2accab8c9fc6e618b2b10d475f5867db3a4996d | cb1dfb99929a44d97c251fa3682890e815372b3d | refs/heads/master | 2023-05-31T01:28:31.963952 | 2021-07-05T00:19:13 | 2021-07-05T00:19:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | import cv2
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
# Opencvのカメラをセットします。(0)はノートパソコンならば組み込まれているカメラ
def __del__(self):
self.video.release()
def get_frame(self):
# read()は、二つの値を返すので、success, imageの2つ変数で受けています。
# OpencVはデフォルトでは raw imagesなので JPEGに変換
# ファイルに保存する場合はimwriteを使用、メモリ上に格納したい時はimencodeを使用
# cv2.imencode() は numpy.ndarray() を返すので .tobytes() で bytes 型に変換
success, image = self.video.read()
if success == True:
ret, jpeg = cv2.imencode('.jpg', image)
# self.video.read fail時何も返さないと復帰しないため静止画を返す
# ちなみにfailになるのは複数のブラウザから表示した時
else:
image = cv2.imread('/home/pi/Picture/raspi.png', cv2.IMREAD_GRAYSCALE)
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
| [
"hashi.uniden@gmail.com"
] | hashi.uniden@gmail.com |
587c4dfea6b066d51ee614baaa03ef70152816ab | 6fdb4a1a7ecb68dcddce8b03cb325578aaef3b33 | /2013-02-02/home/atm.py | d7b8635cc00d58365fe6db7df244a1ab52bd9edc | [] | no_license | lowrey/checkio | 19e62bf9383bb21a069bc782a99bbe0e2dcf09d2 | be0090a4d444648b2295ee088bce0c0e2b4e190d | refs/heads/master | 2021-01-23T12:05:17.214951 | 2014-06-09T03:34:50 | 2014-06-09T03:34:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | # Withdraw without any incident
# 120 - 10 - 0.5 - 1% = floor(109.4) = 109
# 109 - 20 - 0.5 - 1% = floor(88.3) = 88
# 88 - 30 - 0.5 - 1% = floor(57.2) = 57
def withdraw(balance, amount):
result = balance - amount
one_percent = .01 * amount
result -= (.5 + one_percent)
return int(result)
def check_amount(amount):
if amount < 0:
return False
if (amount % 5) != 0:
return False
return True
def checkio(data):
balance, withdrawal = data
for amount in withdrawal:
if check_amount(amount):
temp_balance = withdraw(balance, amount)
if temp_balance >= 0:
balance = temp_balance
print balance
return balance
if __name__ == '__main__':
assert checkio([120, [10 , 20, 30]]) == 57, 'First'
# With one Insufficient Funds, and then withdraw 10 $
assert checkio([120, [200 , 10]]) == 109, 'Second'
#with one incorrect amount
assert checkio([120, [3, 10]]) == 109, 'Third'
assert checkio([120, [200, 119]]) == 120 , 'Fourth'
assert checkio([120, [120, 10, 122, 2, 10, 10, 30, 1]]) == 56, "It's mixed all base tests"
print 'All Ok'
| [
"lowrey@server.fake"
] | lowrey@server.fake |
73720836ac4d19c75a01a8faa535d62cefe97bf7 | 58399ec14cef82b023fb9eb83188cd3f4f5f8c20 | /timeexample.py | 8247868e85c8c924d2d8e0e61bab5da117fab710 | [] | no_license | Venky9791/Venky_Geekexample | 0baf4262c05cfa1db4e2b2dfa57f05a2297b11da | 02d0389949d7add55a115ee9c02e064688706a9e | refs/heads/master | 2020-06-26T19:08:01.335717 | 2019-08-04T22:22:45 | 2019-08-04T22:22:45 | 199,725,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | import time
from time import perf_counter as mytimer
import random
import tkinter
print (tkinter.TkVersion)
print (tkinter.TclVersion)
mainwindow = tkinter.Tk()
mainwindow.title = "My First Example GUI"
mainwindow.geometry('640*640+8+400')
mainwindow.mainloop()
# input("Press enter to Start the timeer")
# waittime= random.randint(1,6)
# time.sleep(waittime)
# starttime = mytimer()
# endtime= input ("Press Enter to Stop the timer")
# endtime=mytimer()
#
#
# print("Started at"+time.strftime("%X",time.localtime(starttime)))
# print("Ended at"+time.strftime("%X",time.localtime(endtime)))
# print ("Your Reaction time is {} seconds" .format(endtime-starttime))
| [
"bharthivenky76@gmail.com"
] | bharthivenky76@gmail.com |
987986c93691efd140bd8faaf1832769a486b00e | 0116525d908fd3a604dfe6a4da4ffc455d5f7d40 | /arraymin.py | 963aa15c0df1484c57d06914d65f53f3e30a5e6d | [] | no_license | Krithikasri/set3beginner | c9a18cf20087b72a5aab27c6ba7427a4e05bfb64 | 2d6269605a48905923d428e1717575759dd5a6ad | refs/heads/master | 2020-04-22T03:27:12.009941 | 2019-02-14T05:48:26 | 2019-02-14T05:48:26 | 170,087,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | N=int(input())
k=list(map(int,input().split()))
print(min(k))
| [
"noreply@github.com"
] | Krithikasri.noreply@github.com |
942d5f383fb074463bde66060a1faedb97568626 | 1033c93917117f462771571c29dd046954582bd8 | /revscores/features/proportion_of_symbolic_added.py | 2eeae56295eca238e2c206c786853e46201b8d7b | [
"MIT"
] | permissive | jonasagx/Revision-Scoring | d4e3e892ac5de3a7f3032ef2b4fcc7b6efb20330 | dfacba014e30d49577aa1a56aab13393ecede9d5 | refs/heads/master | 2021-01-17T11:57:39.393734 | 2015-01-10T19:13:02 | 2015-01-10T19:13:02 | 29,064,762 | 0 | 1 | null | 2015-01-10T19:13:03 | 2015-01-10T17:25:22 | Python | UTF-8 | Python | false | false | 501 | py |
from .chars_added import chars_added
from .feature import Feature
from .symbolic_chars_added import symbolic_chars_added
def process(chars_added, symbolic_chars_added):
return symbolic_chars_added/(chars_added or 1)
proportion_of_symbolic_added = Feature("proportion_of_symbolic_added", process,
returns=float,
depends_on=[chars_added,
symbolic_chars_added])
| [
"aaron.halfaker@gmail.com"
] | aaron.halfaker@gmail.com |
c86e24d69cca136581dc75c9b380027f59bfa8d9 | 9b82603adcd8f5ffa9cc9a89b1cc5626ae0c671d | /pan5.py | be25fcb069f2b94db60e56db9513304f6515f444 | [] | no_license | googlelxhgithub/testgit | 101c2511610d5db9c1e83536c4098984b5624d3a | d371d0e7e03c4b6276e5af245f9b0ffbdb0f4d90 | refs/heads/master | 2023-08-17T08:14:08.780942 | 2021-09-27T23:04:38 | 2021-09-27T23:04:38 | 283,491,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | # “倒计时”在我们日常生活中随处可见,
# 比如:交通标志、开工仪式、庆祝活动、 火箭升空。
# 但最戏剧化的还是电影 007 中定时炸弹的倒计时,还有《三体》中的倒计时信号。
# 今天的问题是:输入一个目标时间(包括年、月、日、时、分、秒),
# 如何写出从当前时间开始到目标时间的倒计时?
from datetime import datetime
import os
import time
DDay = input("目标日期(格式:YY/MM/DD HH:MM:SS) ")
# 把输入的日期打印出来,此时是字符形式。
print(DDay)
# 把字符形式转化为日期格式
DDay = datetime.strptime(DDay, "%Y/%m/%d %H:%M:%S")
while True:
os.system('cls')
NDay = datetime.now()
# print(DDay, NDay)
D = DDay - NDay
DD = D.days
secs = D.seconds
HH = secs // 3600
secs = secs % 3600
MM = secs // 60
secs = secs % 60
print(f"{DD}天{HH}时{MM}分{secs}秒")
time.sleep(1)
# cday = datetime.strptime("2017-8-1 18:20:20", "%Y-%m-%d %H:%M:%S")
# print(cday)
# print(cday.day)
| [
"googlelxh@foxmail.com"
] | googlelxh@foxmail.com |
55095ee0ea77fe40bd4ed68f53cd486d3d782b2d | fb235cccecab5368074bc43ed8677025f925dceb | /notebooks/westgrid/cffi_practice/__init__.py | 6a5ba61abdb1177997fc7a77bffbd803fbab65cb | [] | no_license | sbowman-mitre/parallel_python_course | 88a5f767de2f0f630d48faf94983fad51ecbe50f | 85b03809c9725c38df85b0ac1e9b34cc50c0dc54 | refs/heads/master | 2022-01-04T18:29:12.443568 | 2019-11-29T16:08:06 | 2019-11-29T16:08:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | # import version for use by setup.py
from ._version import version_info, __version__ # noqa: F401 imported but unused
from pathlib import Path
import pdb
import os
import pdb
def get_paths(*args, **kwargs):
binpath=Path(os.environ['CONDA_PREFIX'])
<<<<<<< HEAD
libfile= binpath / Path('lib/libcffi_funs.so')
libdir= binpath / Path('lib')
pdb.set_trace()
#
# find either libcffi_funs.so or libcffi_funs.dll
#
library=list(libdir.glob('libcffi_funs.*'))
if len(library) > 1:
raise ImportError('found more than one libcffi_funs library')
try:
libfile=library[0]
except IndexError:
libfile=Path('libcffi_funs')
includedir=Path.joinpath(binpath.parent,Path('include'))
for the_path in [libfile, libdir, includedir]:
if not the_path.exists():
print(f"couldn't find {str(the_path)}. Did you install cffi_funs?")
out_dict=None
break
else:
out_dict=dict(libfile=str(libfile),libdir=str(libdir),includedir=str(includedir))
=======
libfile= binpath/ Path('lib/libcffi_funs.so')
libdir= binpath / Path('lib')
includedir = binpath / Path('include')
out_dict=dict(libfile=str(libfile),libdir=str(libdir),includedir=str(includedir))
>>>>>>> checkpoint
return out_dict
| [
"paustin@eos.ubc.ca"
] | paustin@eos.ubc.ca |
067f9f37e818e5bb7b065854d145ba1f603166aa | 9680c83911441f9e796a8c87a38f060756911552 | /git_trojan.py | 4eb5f066268b109f3909c0c1962f4a578339fd7d | [] | no_license | qk13warcraft/chapter7 | bdf69b5ecd47ff7888e87a3ec9026fa6291a41f4 | e1300162725b1b61b530bfaf7bafa534da04f564 | refs/heads/master | 2021-01-21T21:04:58.298208 | 2017-05-25T14:54:55 | 2017-05-25T14:54:55 | 92,299,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,610 | py | # -*- coding: utf-8 -*-
import json
import base64
import sys
import time
import imp
import random
import threading
import Queue
import os
from github3 import login
"""
木马主体框架,从GitHub上下载配置选项和运行的模块代码
ttp://github3py.readthedocs.io/en/master/repos.html#github3.repos.branch.Branch
"""
trojan_id = "abc" #唯一标识了木马文件
trojan_config = "%s.json" %trojan_id
data_path = "data/%s/" %trojan_id
trojan_modules = []
configured = False
task_queue = Queue.Queue()
def connect_to_github():
"""
连接github,对用户进行认证,获取当前的repo和branch的对象提供给其他函数使用
"""
gh = login(username="qk13warcraft",password = "qk14warcraft")
repo = gh.repository("qk13warcraft","chapter7")
branch = repo.branch("master")
return gh,repo,branch
def get_file_contents(filepath):
"""
从远程的repo中抓取文件,将文件内容读取到本地变量中
"""
gh,repo,branch = connect_to_github()
tree = branch.commit.commit.tree.recurse()
for filename in tree.tree:
if filepath in filename.path:
print "[*] Found file %s" %filepath
blob = repo.blob(filename._json_data['sha'])
return blob.content
return None
def get_trojan_config():
"""
获得repo中的远程配置文件,木马解析其中的内容获得需要运行的模块名称
"""
global configured
config_json = get_file_contents(trojan_config)
config = json.loads(base64.b64decode(config_json))
configured = True
for task in config:
if task['module'] not in sys.modules:
exec("import %s" %task['module'])
return config
def store_module_result(data):
"""
将从目标机器上手机的数据推送到repo中
"""
gh,repo,branch = connect_to_github()
remote_path = "data/%s/%d.data" %(trojan_id,random.randint(1000,100000))
repo.create_file(remote_path,"Commit message",base64.b64encode(data))
return
class GitImporter(object):
"""
当python解释器加载不存在的模块时,该类就会被调用
"""
def __init__(self):
self.current_module_code = ""
def find_module(self,fullname,path=None):
"""
尝试获取模块所在的位置
"""
if configured:
print "[*] Attempting to retrieve %s" %fullname
new_library = get_file_contents("modules/%s" %fullname)
#如果能定位到所需的模块文件,则对其中的内容进行解密并将结果保存到该类中
#通过返回self变量,告诉python解释器找到了所需的模块
if new_library is not None:
self.current_module_code = base64.b64decode(new_library)
return self
return None
def load_module(self,name):
"""
完成模块的实际加载过程,先利用本地的imp模块创建一个空的模块对象,然后将GitHub中获得的代码导入到
这个对象中,最后,将这个新建的模块添加到sys.modules列表中,这样在之后的代码中就可以 import 方法
调用这个模块了
"""
module = imp.new_module(name)
exec self.current_module_code in module.__dict__
sys.modules[name] = module
return module
def module_runner(module):
task_queue.put(1)
result = sysy.modules[module].run()
task_queue.get()
#保存结果到我们的repo中
store_module_result(result)
return
#木马的主循环
sys.meta_path = [GitImporter()]
while True:
if task_queue.empty():
config = get_trojan_config()
for task in config:
t = threading.Thread(target=module_runner,args=(task['module'],))
t.start()
time.sleep(random.randint(1,10))
time.sleep(random.randint(1000,10000)) | [
"qk13warcraft@163.com"
] | qk13warcraft@163.com |
4668b524700dbf55e3711938e6cfd959affaa864 | 57ddfddd1e11db649536a8ed6e19bf5312d82d71 | /AtCoder/ABC1/ABC123/ABC123-A.py | 04402036b76e6ab088ca47d8dcc146c57c639e4d | [] | no_license | pgDora56/ProgrammingContest | f9e7f4bb77714dc5088c2287e641c0aa760d0f04 | fdf1ac5d1ad655c73208d98712110a3896b1683d | refs/heads/master | 2023-08-11T12:10:40.750151 | 2021-09-23T11:13:27 | 2021-09-23T11:13:27 | 139,927,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | sm = float('inf')
bi = - float('inf')
for _ in range(5):
v = int(input())
if v < sm: sm = v
if v > bi: bi = v
if bi - sm > int(input()): print(':(')
else: print('Yay!')
| [
"doradora.prog@gmail.com"
] | doradora.prog@gmail.com |
8c36fc26a272f071d2585e8f26ae41f860d794bf | 85381529f7a09d11b2e2491671c2d5e965467ac6 | /OJ/Leetcode/Algorithm/54. Spiral Matrix.py | 877d512e72cd9a17631f7f49ff7225fae0269c52 | [] | no_license | Mr-Phoebe/ACM-ICPC | 862a06666d9db622a8eded7607be5eec1b1a4055 | baf6b1b7ce3ad1592208377a13f8153a8b942e91 | refs/heads/master | 2023-04-07T03:46:03.631407 | 2023-03-19T03:41:05 | 2023-03-19T03:41:05 | 46,262,661 | 19 | 3 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # -*- coding: utf-8 -*-
# @Author: HaonanWu
# @Date: 2017-03-03 10:57:26
# @Last Modified by: HaonanWu
# @Last Modified time: 2017-03-03 11:01:34
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
ret = []
while matrix:
ret += matrix.pop(0)
if matrix and matrix[0]:
for row in matrix:
ret.append(row.pop())
if matrix:
ret += matrix.pop()[::-1]
if matrix and matrix[0]:
for row in matrix[::-1]:
ret.append(row.pop(0))
return ret | [
"whn289467822@outlook.com"
] | whn289467822@outlook.com |
c7741bf2134b8580c0fa764a5a86ef149790da35 | 1e39e416db368a47c2cfe5eadf797abe2bf3ad4a | /model.py | e70d5dcc2dc4fda4425423d78f294e093224f0f4 | [] | no_license | r00t4/dog | 5846ddf52e0fda1ab30c4c7a027f87f1339f8d48 | 1c99dcd08d9610609efa4e558d4b2e4737e32de2 | refs/heads/master | 2020-09-09T15:19:35.323134 | 2019-11-13T13:56:28 | 2019-11-13T13:56:28 | 221,471,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.conv4 = nn.Conv2d(64, 128, 3, padding=1)
self.conv5 = nn.Conv2d(128, 256, 3)
self.fc1 = nn.Linear(256*12*12, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 120)
def forward(self, input):
x = self.conv1(input)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.conv3(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.conv4(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.conv5(x)
x = F.relu(x)
# print(x.shape)
x = x.view(-1, 256*12*12)
# print(len(x))
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
out = self.fc3(x)
return out
| [
"kborash@dar.kz"
] | kborash@dar.kz |
aea3a266f96e0e510e291772736803d003de39c4 | 08c4415606e2a06593c8d837c746d79f6e2645a9 | /heart-disease.py | efc62dbf34d98220f689e6ce4b503bd804b3bd26 | [] | no_license | vanphuoc9/heart-disease | 2f018074f8f7f177f0a125f79af03f70d61d1c6c | 713efcfbd1e41ec29364d5e5889da98d76fd5101 | refs/heads/master | 2020-03-26T11:39:01.224150 | 2018-09-03T04:26:52 | 2018-09-03T04:26:52 | 144,852,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | import pandas as pd
import numpy as np
import sklearn
from sklearn.preprocessing import Imputer
#Tao ra mo hinh xac suat Bayes thong qua thu vien
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
# #Tao ra mo hinh xac suat Bayes thong qua thu vien
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
model = GaussianNB()
#Doc du lieu tu file
dataset = pd.read_csv("Heart_Disease_Data.csv",na_values="?", low_memory = False)
# doi cac gia tri 1, 2, 3, 4 ve 1
dataset["pred_attribute"].replace(inplace=True, value=[1, 1, 1, 1], to_replace=[1, 2, 3, 4])
# # 13 dataset features
# feature13 = ['age','sex','cp','trestbps','chol','fbs','restecg','thalach','exang','oldpeak','slop','ca','thal']
# print dataset.isnull().sum()
# Load data
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values # = dataset.iloc[:, 13].values
my_imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
my_imputer = my_imputer.fit(X[:,0:13])
X[:, 0:13] = my_imputer.transform(X[:, 0:13])
scaler = StandardScaler()
X = scaler.fit_transform(X)
#Chon du lieu da tach theo nghi thuc hold-out
X_train,X_test,y_train,y_test= train_test_split(X,y)
# #Xay dung mo hinh Bayes voi 2 tap du lieu X_train va y_train
print(X)
#
# #Thuc hien doan nhan cho tap du lieu X con lai va luu nhan cua chung vao vien thucte de doi chieu
# dubao = model.predict(np.array([56,0,2,140,294,0,2,153,0,1.3,2,0,3]).reshape(1,13))
# thucte = y_test
# print (dubao)
def ReadData(age,sex,cp,trestbps,chol,fbs,restecg,thalach,exang,oldpeak,slop,ca,thal):
model.fit(X_train, y_train)
dubao = model.predict(np.array([age,sex,cp,trestbps,chol,fbs,restecg,thalach,exang,oldpeak,slop,ca,thal]).reshape(1,13))
return dubao[0]
print (ReadData(65,0,4,150,225,0,2,114,0,1,2,3,7))
# print ("Do chinh xac tong the: ",accuracy_score(thucte,dubao))
#### KNN
##to choose the right K we build a loop witch examen all the posible values for K.
| [
"thaiphuoc1997@gmail.com"
] | thaiphuoc1997@gmail.com |
143a773bbbec049d6b12a6406b50a9fce3cdd585 | 26dec2f8f87a187119336b09d90182d532e9add8 | /mcod/resources/documents.py | da3e92fb9c8f6d9a843336fb6541b7e1b3f9d460 | [] | no_license | olekstomek/mcod-backend-dane.gov.pl | 7008bcd2dbd0dbada7fe535536b02cf27f3fe4fd | 090dbf82c57633de9d53530f0c93dddf6b43a23b | refs/heads/source-with-hitory-from-gitlab | 2022-09-14T08:09:45.213971 | 2019-05-31T06:22:11 | 2019-05-31T06:22:11 | 242,246,709 | 0 | 1 | null | 2020-02-24T22:39:26 | 2020-02-21T23:11:50 | Python | UTF-8 | Python | false | false | 2,197 | py | from django.apps import apps
from django_elasticsearch_dsl import DocType, Index, fields
from mcod import settings
from mcod.lib.search.fields import TranslatedTextField
Resource = apps.get_model('resources', 'Resource')
Dataset = apps.get_model('datasets', 'Dataset')
TaskResult = apps.get_model('django_celery_results', "TaskResult")
INDEX = Index(settings.ELASTICSEARCH_INDEX_NAMES['resources'])
INDEX.settings(**settings.ELASTICSEARCH_DSL_INDEX_SETTINGS)
data_schema = fields.NestedField(attr='schema', properties={
'fields': fields.NestedField(properties={
'name': fields.KeywordField(attr='name'),
'type': fields.KeywordField(attr='type'),
'format': fields.KeywordField(attr='format')
}),
'missingValue': fields.KeywordField(attr='missingValue')
})
@INDEX.doc_type
class ResourceDoc(DocType):
id = fields.IntegerField()
slug = fields.TextField()
uuid = fields.TextField()
title = TranslatedTextField('title', common_params={'suggest': fields.CompletionField()})
description = TranslatedTextField('description')
file_url = fields.TextField(
attr='file_url'
)
download_url = fields.TextField(
attr='download_url'
)
link = fields.TextField()
format = fields.KeywordField()
file_size = fields.LongField()
type = fields.KeywordField()
openness_score = fields.IntegerField()
dataset = fields.NestedField(properties={
'id': fields.IntegerField(),
'title': TranslatedTextField('title'),
'slug': TranslatedTextField('slug')
})
views_count = fields.IntegerField()
downloads_count = fields.IntegerField()
status = fields.TextField()
modified = fields.DateField()
created = fields.DateField()
verified = fields.DateField()
data_date = fields.DateField()
class Meta:
doc_type = 'resource'
model = Resource
related_models = [Dataset, ]
def get_instances_from_related(self, related_instance):
if isinstance(related_instance, Dataset):
return related_instance.resources.all()
def get_queryset(self):
return self._doc_type.model.objects.filter(status='published')
| [
"piotr.zientarski@britenet.com.pl"
] | piotr.zientarski@britenet.com.pl |
b4f4e6f565e7d55f59c7f5d9117c3a4e0ea4a4ae | 53f733c092e24610d864fa66a2741311b3b31209 | /google/cloud/security/common/data_access/forseti_system_dao.py | 56807d33a930a37da8a06da90dcb1256c46fbd86 | [
"Apache-2.0"
] | permissive | shimizu19691210/forseti-security | a4860e30aa8a097b23d262d7a82fe2bca951a955 | a6a1aa7464cda2ad5948e3e8876eb8dded5e2514 | refs/heads/master | 2021-04-15T06:07:42.110629 | 2018-03-21T21:40:05 | 2018-03-21T21:40:05 | 126,314,876 | 1 | 0 | Apache-2.0 | 2018-03-22T09:57:54 | 2018-03-22T09:57:53 | null | UTF-8 | Python | false | false | 2,279 | py | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the data access object (DAO) for Forseti system management."""
from google.cloud.security.common.data_access import dao
# pylint: disable=line-too-long
from google.cloud.security.common.data_access.sql_queries import cleanup_tables_sql
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__)
class ForsetiSystemDao(dao.Dao):
"""Data access object (DAO) for Forseti system management.
Args:
global_configs (dict): Global config - used to lookup db_name
"""
def __init__(self, global_configs=None):
dao.Dao.__init__(self, global_configs)
self.db_name = global_configs['db_name']
def cleanup_inventory_tables(self, retention_days):
"""Clean up old inventory tables based on their age
Will detect tables based on snapshot start time in snapshot table,
and drop tables older than retention_days specified.
Args:
retention_days (int): Days of inventory tables to retain.
"""
sql = cleanup_tables_sql.SELECT_SNAPSHOT_TABLES_OLDER_THAN
result = self.execute_sql_with_fetch(
cleanup_tables_sql.RESOURCE_NAME,
sql,
[retention_days, self.db_name])
LOGGER.info(
'Found %s tables to clean up that are older than %s days',
len(result),
retention_days)
for row in result:
LOGGER.debug('Dropping table: %s', row['table'])
self.execute_sql_with_commit(
cleanup_tables_sql.RESOURCE_NAME,
cleanup_tables_sql.DROP_TABLE.format(row['table']),
None)
| [
"henryc@google.com"
] | henryc@google.com |
220274ef4a9b4c4918eadc9760519ac1b39963d8 | 3cd18a3e789d3a0739768f1ae848d9f74b9dbbe7 | /mounth001/day21/exercise03.py | fe9a7a38bb1bfcf3fe7454d21909dc564595ee5d | [] | no_license | Molly-l/66 | 4bfe2f93e726d3cc059222c93a2bb3460b21ad78 | fae24a968f590060522d30f1b278fcfcdab8b36f | refs/heads/master | 2020-09-28T12:50:18.590794 | 2019-11-27T04:42:28 | 2019-11-27T04:42:28 | 226,782,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | """
lstack.py 栈的链式结构
重点代码
思路:
1. 源于节点存储数据,建立节点关联
2. 封装方法 入栈 出栈 栈空 栈顶元素
3. 链表的开头作为栈顶(不需要每次遍历)
"""
# 自定义异常
class StackError(Exception):
pass
# 创建节点类
class Node:
def __init__(self,val,next=None):
self.val = val # 有用数据
self.next = next # 节点关系
# 链式栈
class LStack:
def __init__(self):
# 标记顶位置
self._top = None
def is_empty(self):
return self._top is None
def push(self,val):
node=Node(val)
node.next=self._top
self._top=node
def pop(self):
temp=self._top.val
self.top=self.top.next
return temp
| [
"769358744@qq.com"
] | 769358744@qq.com |
b2d1398b1871c9a27671f1b06ceffc99159ba998 | 21a5a58e19a989a7301c3d658c707608071725b2 | /train.py | 42396ce06a89820b1dfd94fcc756b4ecc3f91125 | [] | no_license | Meneville/fast-weights-test | 0787c40de1144166dfab84aecc4f289a1acb984a | 1a3e04d504b379263235a8a805dd7049b0a0406a | refs/heads/main | 2023-05-31T02:30:37.208511 | 2021-07-09T12:57:06 | 2021-07-09T12:57:06 | 384,435,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,189 | py | # ---------------------------------------------------------------------------
# 0. import
# ---------------------------------------------------------------------------
import tensorflow as tf
import numpy as np
import _pickle as pickle
from dataset import DataGenerator
from model import fw_rnn_model
from utils import *
# ---------------------------------------------------------------------------
# 1. parameter
# ---------------------------------------------------------------------------
STEP_NUM = 9
ELEM_NUM = 26 + 10 + 1
BATCH_SZ = 128
HID_NUM = 50
SEED = 7777
MODEL = 'fw_rnn_model'
model_path = './checkpoint/' + MODEL
log_path = './log/' + MODEL
learning_rate = 1e-4
epochs = 1000
reset_seed(SEED)
# ---------------------------------------------------------------------------
# 2. Create Dataset
# ---------------------------------------------------------------------------
with open(os.path.join('data', 'train.p'), 'rb') as f:
x_train, y_train = pickle.load(f)
with open(os.path.join('data', 'valid.p'), 'rb') as f:
x_val, y_val = pickle.load(f)
train_gen = DataGenerator(x_train, y_train, BATCH_SZ, shuffle=False)
val_gen = DataGenerator(x_val, y_val, BATCH_SZ, shuffle=False)
# test_gen = DataGenerator(x_test, y_test, BATCH_SZ, shuffle=False)
# ---------------------------------------------------------------------------
# 3. Train
# ---------------------------------------------------------------------------
opt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, clipnorm=0.1)
model = eval(MODEL)(BATCH_SZ, STEP_NUM, ELEM_NUM, HID_NUM)
model.summary()
model.compile(loss={'output': loss_fn},
optimizer=opt,
metrics=['accuracy'])
checkpoint = tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_accuracy',
verbose=1, save_best_only=True,
mode='max', save_weights_only=True)
csv_logger = tf.keras.callbacks.CSVLogger(log_path, append=True, separator=',')
callbacks_list = [checkpoint, csv_logger]
model.fit(train_gen, epochs=epochs, verbose=1, callbacks=callbacks_list,
validation_data=val_gen)
| [
"chen_qh@zju.edu.cn"
] | chen_qh@zju.edu.cn |
d8cd32918e0332ff185300fa7e171a9a68f0cdd3 | 7ce076dd764fe4b5c7881734f157bc6f77a99ead | /tests/providers/exasol/operators/test_exasol.py | 68e3d121b48bccf3971c3dd9c3a0247ac1f8a694 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] | permissive | kaxil/airflow | db31c98e23f2e0d869d857484e56a7c58acef231 | 42f1da179db00491610946a0b089dd82269adc74 | refs/heads/master | 2023-04-28T04:46:38.478352 | 2020-09-28T20:51:16 | 2020-09-28T20:51:16 | 112,322,392 | 1 | 1 | Apache-2.0 | 2020-08-27T20:15:22 | 2017-11-28T10:42:19 | Python | UTF-8 | Python | false | false | 1,922 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from airflow.providers.exasol.operators.exasol import ExasolOperator
class TestExasol(unittest.TestCase):
@mock.patch('airflow.providers.exasol.hooks.exasol.ExasolHook.run')
def test_overwrite_autocommit(self, mock_run):
operator = ExasolOperator(task_id='TEST', sql='SELECT 1', autocommit=True)
operator.execute({})
mock_run.assert_called_once_with('SELECT 1', autocommit=True, parameters=None)
@mock.patch('airflow.providers.exasol.hooks.exasol.ExasolHook.run')
def test_pass_parameters(self, mock_run):
operator = ExasolOperator(task_id='TEST', sql='SELECT {value!s}', parameters={'value': 1})
operator.execute({})
mock_run.assert_called_once_with('SELECT {value!s}', autocommit=False, parameters={'value': 1})
@mock.patch('airflow.providers.exasol.operators.exasol.ExasolHook')
def test_overwrite_schema(self, mock_hook):
operator = ExasolOperator(task_id='TEST', sql='SELECT 1', schema='dummy')
operator.execute({})
mock_hook.assert_called_once_with(exasol_conn_id='exasol_default', schema='dummy')
| [
"noreply@github.com"
] | kaxil.noreply@github.com |
6f233f3437f6dad2837d92f9c1bdd17ab312e768 | b32ab366f637cf28c7235905affb10ef1831472e | /Project_Part2/train.py | 067ad4dd1aef1dafd55ccbb244b50318b9be1928 | [] | no_license | miloooooz/Information_Retrieval_and_Web_Search | 523eaadddb40d060a2384cce2a8819bff9985a2d | a8070d3ac84d095a2c3f61fcc6b204aba3add1a3 | refs/heads/master | 2021-03-07T02:20:41.758503 | 2020-03-10T10:46:06 | 2020-03-10T10:46:06 | 246,239,799 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | ## Import Necessary Modules...
import pickle
from pprint import pprint
import project_part2_redo as project_part2
## Read the data sets...
### Read the Training Data
train_file = './Data/train.pickle'
train_mentions = pickle.load(open(train_file, 'rb'))
### Read the Training Labels...
train_label_file = './Data/train_labels.pickle'
train_labels = pickle.load(open(train_label_file, 'rb'))
### Read the Dev Data... (For Final Evaluation, we will replace it with the Test Data)
dev_file = './Data/dev.pickle'
dev_mentions = pickle.load(open(dev_file, 'rb'))
### Read the Parsed Entity Candidate Pages...
fname = './Data/parsed_candidate_entities.pickle'
parsed_entity_pages = pickle.load(open(fname, 'rb'))
### Read the Mention docs...
mens_docs_file = "./Data/men_docs.pickle"
men_docs = pickle.load(open(mens_docs_file, 'rb'))
## Result of the model...
result = project_part2.disambiguate_mentions(train_mentions, train_labels, train_mentions, men_docs, parsed_entity_pages)
# result
## We will be using the following function to compute the accuracy...
def compute_accuracy(result, data_labels):
assert set(list(result.keys())) - set(list(data_labels.keys())) == set()
TP = 0.0
for id_ in result.keys():
if result[id_] == data_labels[id_]['label']:
TP +=1
assert len(result) == len(data_labels)
return TP/len(result)
accuracy = compute_accuracy(result, train_labels)
print("Accuracy = ", accuracy)
| [
"zhouruijun0510@hotmail.com"
] | zhouruijun0510@hotmail.com |
76f4330d8dd21990e81c8fffc080ff973d8ca274 | fbb2ff6d6734e02b24d9eef2a16ebba58c755a1b | /wx_event_propagate.py | bb82371c56ce8e78e08839ab3bdfb4977fd6db15 | [] | no_license | brainliubo/wxpython_project | a29fe18eb14c20bc1c348c724460d17d7967dee9 | 98edc15be6c16b7e413027231b4a512bd37343ee | refs/heads/master | 2020-03-24T21:06:09.994644 | 2019-01-02T11:47:08 | 2019-01-02T11:47:08 | 143,013,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | '''
event.Skip() 方法是将event 继续往上一级进行传递的重要方法,如果不调用,则该event只要被catch 一次,就不进行传递了
'''
import wx
ID_BUTTON1 = wx.NewId() # 生成2个ID
ID_BUTTON2 = wx.NewId()
class MyApp(wx.App):
def OnInit(self):
#app 上添加frame
self.frame = MyFrame(None, title="Event Propagation")
self.SetTopWindow(self.frame)
self.frame.Show()
self.Bind(wx.EVT_BUTTON, self.OnButtonApp)
return True
def OnButtonApp(self, event):
event_id = event.GetId()
if event_id == ID_BUTTON1 :
print ("BUTTON ONE Event reached the App Object")
class MyFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title="event propagate frame",pos=wx.DefaultPosition,
size=wx.DefaultSize,style=wx.DEFAULT_FRAME_STYLE,name="MyFrame"):
super(MyFrame, self).__init__(parent, id, title,pos, size, style, name)
self.panel = MyPanel(self)
self.btn1 = wx.Button(self.panel, ID_BUTTON1,"Propagates")
self.btn2 = wx.Button(self.panel, ID_BUTTON2, "Doesn't Propagate")
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.btn1, 0, wx.ALL, 10)
sizer.Add(self.btn2, 0, wx.ALL, 10)
self.panel.SetSizer(sizer)
self.Bind(wx.EVT_BUTTON, self.OnButtonFrame)
def OnButtonFrame(self, event):
event_id = event.GetId()
if event_id == ID_BUTTON1:
print("BUTTON ONE event reached the Frame")
event.Skip()
elif event_id == ID_BUTTON2:
print ("BUTTON TWO event reached the Frame")
event.Skip()
class MyPanel(wx.Panel):
def __init__(self, parent):
super(MyPanel, self).__init__(parent)
self.Bind(wx.EVT_BUTTON, self.OnPanelButton)
def OnPanelButton(self, event):
event_id = event.GetId()
if event_id == ID_BUTTON1:
print ("BUTTON ONE event reached the Panel")
event.Skip()
elif event_id == ID_BUTTON2:
print ("BUTTON TWO event reached the Panel")
event.Skip()
# Not skipping the event will cause its
# propagation to end here
if __name__ == "__main__":
app = MyApp(False)
app.MainLoop() | [
"clairlb@163.com"
] | clairlb@163.com |
a3c0fbf2706cd04d397ed4f4e27f4e86f007625e | d302dc1cdb3f514d08cc812529b542da4ec7c2ae | /Prime.py | c48ea98bcbac66c0ea23679b9a4fadf13cb4320c | [] | no_license | Rahan13/CODE_KATA_PLAYER | 8f5bcdb429a484e951a11a257139709b8926ae10 | 7f1e40214a344cc4416f092959f6a7fa514d23fb | refs/heads/master | 2020-06-05T10:28:51.237104 | 2019-07-11T03:23:10 | 2019-07-11T03:23:10 | 192,409,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | num = int(input())
a = True
for i in range(2,num):
if num%i ==0:
a = False
print("yes")
break
if a:
print("no")
| [
"noreply@github.com"
] | Rahan13.noreply@github.com |
34bfcc6b015aa99b68b4193b7ac4abc2d22eca43 | 46304762aa4dea478008545fcecff88dd56df13e | /build/lib/A22DSE/Parameters/Par_Class_All.py | 1dea22730931f0c10b07e4f0c9252d0577681645 | [] | no_license | ThomasRV/A22CERES | 12551a00be887a68e744523a4c7b548405346b3a | 21c1d1889ef402bd23668493a1a4c6acec344f58 | refs/heads/master | 2020-05-31T11:44:38.858255 | 2019-06-04T15:00:50 | 2019-06-04T15:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,375 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 13 11:37:54 2019
@author: hksam
"""
#import sys
#sys.path.append('../../')
#from A22DSE.Models.AnFP.Current.InitialSizing.AnFP_Exec_initsizing import WSandTW
#from A22DSE.Models.POPS.Current.cruisecalculations import CruiseRange, CruiseTime
#from A22DSE.Models.POPS.Current.cruisecalculations import CruiseRange, CruiseTime
class ParAnFPLst(object):
def __init__(self):
self.A = None #[-] DUMMY VALUE
self.e = None #[-] DUMMY VALUE
self.CD0 = None #[-] DUMMY VALUE
self.S = None #[m_squared] DUMMY VALUE
#cruise parameters
self.TtoW = None #[-] air to fuel ratio by POPS
self.Mdd = None #[-]
self.h_cruise = 20000.
self.M_cruise = None #[-] DUMMY VALUE cruise mach number by POPS
self.s_cruise = None
self.V_cruise = None
self.t_cruise = None
self.CL_cruise = 1.2 #[-] DUMMY VALUE
self.CL_max_cruise = 1.5
self.c_j = 0.6/3600 #[s^-1] DUMMY VALUE
self.SFC = 16*10e-6 #[kg/N/s] DUMMY VALUE
self.LD = 16 #[-] DUMMY VALUE
#takeoff parameters
self.CL_to = 1.8 #[-] DUMMY VALUE cruise mach number by POPS
self.CD_to = 0.1 #[-] DUMMY VALUE cruise mach number by POPS
self.fieldlen_to = 2500 #m
self.rho_SL = 1.225 #[kg/m3]
self.T_to = None #[N] DUMMY
self.Vr = 50 #[m/s] DUMMY
#landing parameters
self.CL_land = 2.8 #[-] DUMMY VALUE
self.CD_land = 0.3 #[-] DUMMY VALUE cruise mach number by POPS
self.fieldlen_land = 2500 #m
def Get_V_cruise(self):
return self.s_cruise/self.t_cruise
class ParCntrlLst(object):
def __init__(self):
self.placeholder = None
class ParStrucLst(object):
def __init__(self):
self.MTOW = None #[kg]
self.FW = None #[kg] #Fuel weight
self.N_engines = 2 #[-]
#ratios
self.OEWratio = 1/2.47 #[-]
self.wfratioclimb = 0.8
class ParPayloadLst(object):
def __init__(self):
self.disperRatePerTime = None
self.airtofuel = 6 #[-] air to fuel ratio by POPS
self.m_sulphur = 10000. #[kg] sulphur mass per flight by POPS
self.rho_sulphur = 1121 #[kg/m^3] density of solid sulphur by POPS
self.rho_alu = 2700 #[kg/m^3] density of aluminium by POPS
self.dispersionrate = 8e-3 #[kg/m]
class ParCostLst(object):
def __init__(self):
#Cost parameters
self.CEF8919 = 284.5/112.5 #[USD/hr]
self.CEF7019 = 284.5/112.5+3.02 #[USD/hr]
self.Fmat= 2.25
self.rer = 62 #[USD/hr] CEF00/CEF89
self.rmr = 34 #[USD/hr] CEF00/CEF89
self.rtr = 43 #[USD/hr] CEF00/CEF89
self.Fdiff = 1.5 #[-]
self.Fcad = 0.8 #[-]
self.Nrdte= 6 #[-] nr of test ac, between 2-8
self.Nst = 2 #[-] nr of static test ac
self.Fobs = 1 #[-]
self.Fpror = 0.1 #[-]
self.Ffinr = 0.05 #[-]
self.Ftsf = 0.2 #CHECK VALUE!!!!!
self.Cavionics = 30000000 #CHECK VALUE
self.Nrr = 0.33 #[-]
self.Nprogram = 150 #[-]
self.Nrm = 11/12 #[-]
self.tpft = 10 #[hrs]
self.Fftoh = 4.0 #[-]
self.FfinM = 0.10 #[-]
class ParConvLst(object):
def __init__(self):
self.ft2m = 0.3048 #[ft/m]
self.lbs2kg = 0.453592 #[lbs/kg]
self.mile2m = 1609.34 #[miles/meter]
self.gallon2L = 3.78541 #[-]
self.kts2ms = 0.514444444 #[-]
class ParSensitivityAnalysis(object):
def __init__(self):
self.N_cont_lines = 10
self.N_colours = 10
self.X_steps = 10
self.Y_steps = 10
class Aircraft(object):
def __init__(self):
# LOAD ALL classes
self.ParPayload = ParPayloadLst()
self.ParAnFP = ParAnFPLst()
self.ParCntrl = ParCntrlLst()
self.ParCostLst = ParCostLst()
self.ParStruc = ParStrucLst()
self.ConversTool = ParConvLst()
self.ParSens = ParSensitivityAnalysis()
| [
"noutvdbos@gmail.com"
] | noutvdbos@gmail.com |
c1a8889953ba8a96272a8c3b6ed202d72807d980 | 53365da025bf6a2b0b9dda4554836836409f65fc | /django_fullstack/semi_restful_tv_shows/main/views.py | 6a2ecdecc361fa3dee8684b594a136391e8c51fb | [] | no_license | jdinthetrees/pythonstack | d837d2cc173cd3f3a3bf6daf984ae0672d7f86e4 | e41ac65ea34a0296260b338d3928f1d81b3725a7 | refs/heads/master | 2023-02-12T06:45:18.486565 | 2021-01-12T21:24:41 | 2021-01-12T21:24:41 | 329,112,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from .models import Show
def index(request):
print(Show.objects.get(id=6).__dict__)
for show in Show.objects.all():
context = {
'all_the_shows': Show.objects.all(),
}
return render(request, "index.html", context)
def showdescription(request, show_id):
context = {
'this_show': Show.objects.get(id=show_id),
}
return render(request, "showdescription.html", context)
def showedit(request, show_id):
context = {
'this_show': Show.objects.get(id=show_id),
}
return render(request, "showedit.html", context)
def showupdate(request, show_id):
errs = Show.objects.show_validator(request.POST)
if len(errs) > 0:
for msg in errs.values():
messages.error(request, msg)
# for key, value in errs.items():
# messages.error(request, msg)
return redirect(f"/shows/{request.POST['this_show.id']}/edit")
else:
one_show = Show.objects.get(id=request.POST['this_show.id'])
one_show.title = request.POST['title']
one_show.network = request.POST['network']
one_show.description = request.POST['description']
one_show.save()
return redirect(f"/shows/{request.POST['this_show.id']}")
def showdelete(request, show_id):
context = {
'this_show': Show.objects.get(id=show_id),
}
one_show = Show.objects.get(id=request.POST['this_show.id'])
one_show.delete()
return redirect("/shows")
def shownew(request):
return render(request, "showadd.html")
def showadd(request):
print(request.POST)
errs = Show.objects.show_validator(request.POST)
if len(errs) > 0:
for msg in errs.values():
messages.error(request, msg)
# for key, value in errs.items():
# messages.error(request, msg)
return redirect(f"/shows/new")
else:
Show.objects.create(
title=request.POST['title'],
network = request.POST['network'],
release_date = request.POST['release_date'],
description = request.POST['description'],
)
last_show = Show.objects.last().id
return redirect(f"/shows/{last_show}")
# Create your views here.
| [
"jollyjohndang@gmail.com"
] | jollyjohndang@gmail.com |
fe3c331699a0e001fa186a6177c1df7612b048b4 | 0b64d153144478bc87c8e187c54de2faeb660641 | /env/Lib/site-packages/autobahn/websocket/compress_snappy.py | cee3d60ccdb0ca60fe4d33a7ec7db60622e4bcb3 | [
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"bzip2-1.0.6",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"Python-2.0",
"TCL",
"LicenseRef-scancode-python-cwi",
"MIT"
] | permissive | YUND4/smartlights | db4d102fd983db355941431553818f243ffd682f | a86d5e68b4e3c72b133a6853ebd4a1ed0f2623d4 | refs/heads/master | 2022-12-24T11:38:06.669241 | 2019-07-12T16:55:01 | 2019-07-12T16:55:01 | 196,614,236 | 0 | 1 | MIT | 2022-12-11T22:42:03 | 2019-07-12T16:52:16 | HTML | UTF-8 | Python | false | false | 16,978 | py | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import snappy
from autobahn.websocket.compress_base import PerMessageCompressOffer, \
PerMessageCompressOfferAccept, \
PerMessageCompressResponse, \
PerMessageCompressResponseAccept, \
PerMessageCompress
__all__ = (
'PerMessageSnappyMixin',
'PerMessageSnappyOffer',
'PerMessageSnappyOfferAccept',
'PerMessageSnappyResponse',
'PerMessageSnappyResponseAccept',
'PerMessageSnappy',
)
class PerMessageSnappyMixin(object):
"""
Mixin class for this extension.
"""
EXTENSION_NAME = "permessage-snappy"
"""
Name of this WebSocket extension.
"""
class PerMessageSnappyOffer(PerMessageCompressOffer, PerMessageSnappyMixin):
"""
Set of extension parameters for `permessage-snappy` WebSocket extension
offered by a client to a server.
"""
@classmethod
def parse(cls, params):
"""
Parses a WebSocket extension offer for `permessage-snappy` provided by a client to a server.
:param params: Output from :func:`autobahn.websocket.WebSocketProtocol._parseExtensionsHeader`.
:type params: list
:returns: A new instance of :class:`autobahn.compress.PerMessageSnappyOffer`.
:rtype: obj
"""
# extension parameter defaults
accept_no_context_takeover = False
request_no_context_takeover = False
# verify/parse client ("client-to-server direction") parameters of permessage-snappy offer
for p in params:
if len(params[p]) > 1:
raise Exception("multiple occurrence of extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME))
val = params[p][0]
if p == 'client_no_context_takeover':
# noinspection PySimplifyBooleanCheck
if val is not True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
accept_no_context_takeover = True
elif p == 'server_no_context_takeover':
# noinspection PySimplifyBooleanCheck
if val is not True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
request_no_context_takeover = True
else:
raise Exception("illegal extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME))
offer = cls(accept_no_context_takeover,
request_no_context_takeover)
return offer
def __init__(self,
accept_no_context_takeover=True,
request_no_context_takeover=False):
"""
:param accept_no_context_takeover: Iff true, client accepts "no context takeover" feature.
:type accept_no_context_takeover: bool
:param request_no_context_takeover: Iff true, client request "no context takeover" feature.
:type request_no_context_takeover: bool
"""
if type(accept_no_context_takeover) != bool:
raise Exception("invalid type %s for accept_no_context_takeover" % type(accept_no_context_takeover))
self.accept_no_context_takeover = accept_no_context_takeover
if type(request_no_context_takeover) != bool:
raise Exception("invalid type %s for request_no_context_takeover" % type(request_no_context_takeover))
self.request_no_context_takeover = request_no_context_takeover
def get_extension_string(self):
"""
Returns the WebSocket extension configuration string as sent to the server.
:returns: PMCE configuration string.
:rtype: str
"""
pmce_string = self.EXTENSION_NAME
if self.accept_no_context_takeover:
pmce_string += "; client_no_context_takeover"
if self.request_no_context_takeover:
pmce_string += "; server_no_context_takeover"
return pmce_string
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: JSON serializable representation.
:rtype: dict
"""
return {'extension': self.EXTENSION_NAME,
'accept_no_context_takeover': self.accept_no_context_takeover,
'request_no_context_takeover': self.request_no_context_takeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: Python string representation.
:rtype: str
"""
return "PerMessageSnappyOffer(accept_no_context_takeover = %s, request_no_context_takeover = %s)" % (self.accept_no_context_takeover, self.request_no_context_takeover)
class PerMessageSnappyOfferAccept(PerMessageCompressOfferAccept, PerMessageSnappyMixin):
"""
Set of parameters with which to accept an `permessage-snappy` offer
from a client by a server.
"""
def __init__(self,
offer,
request_no_context_takeover=False,
no_context_takeover=None):
"""
:param offer: The offer being accepted.
:type offer: Instance of :class:`autobahn.compress.PerMessageSnappyOffer`.
:param request_no_context_takeover: Iff true, server request "no context takeover" feature.
:type request_no_context_takeover: bool
:param no_context_takeover: Override server ("server-to-client direction") context takeover (this must be compatible with offer).
:type no_context_takeover: bool
"""
if not isinstance(offer, PerMessageSnappyOffer):
raise Exception("invalid type %s for offer" % type(offer))
self.offer = offer
if type(request_no_context_takeover) != bool:
raise Exception("invalid type %s for request_no_context_takeover" % type(request_no_context_takeover))
if request_no_context_takeover and not offer.accept_no_context_takeover:
raise Exception("invalid value %s for request_no_context_takeover - feature unsupported by client" % request_no_context_takeover)
self.request_no_context_takeover = request_no_context_takeover
if no_context_takeover is not None:
if type(no_context_takeover) != bool:
raise Exception("invalid type %s for no_context_takeover" % type(no_context_takeover))
if offer.request_no_context_takeover and not no_context_takeover:
raise Exception("invalid value %s for no_context_takeover - client requested feature" % no_context_takeover)
self.no_context_takeover = no_context_takeover
def get_extension_string(self):
"""
Returns the WebSocket extension configuration string as sent to the server.
:returns: PMCE configuration string.
:rtype: str
"""
pmce_string = self.EXTENSION_NAME
if self.offer.request_no_context_takeover:
pmce_string += "; server_no_context_takeover"
if self.request_no_context_takeover:
pmce_string += "; client_no_context_takeover"
return pmce_string
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: JSON serializable representation.
:rtype: dict
"""
return {'extension': self.EXTENSION_NAME,
'offer': self.offer.__json__(),
'request_no_context_takeover': self.request_no_context_takeover,
'no_context_takeover': self.no_context_takeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: Python string representation.
:rtype: str
"""
return "PerMessageSnappyAccept(offer = %s, request_no_context_takeover = %s, no_context_takeover = %s)" % (self.offer.__repr__(), self.request_no_context_takeover, self.no_context_takeover)
class PerMessageSnappyResponse(PerMessageCompressResponse, PerMessageSnappyMixin):
"""
Set of parameters for `permessage-snappy` responded by server.
"""
@classmethod
def parse(cls, params):
"""
Parses a WebSocket extension response for `permessage-snappy` provided by a server to a client.
:param params: Output from :func:`autobahn.websocket.WebSocketProtocol._parseExtensionsHeader`.
:type params: list
:returns: A new instance of :class:`autobahn.compress.PerMessageSnappyResponse`.
:rtype: obj
"""
client_no_context_takeover = False
server_no_context_takeover = False
for p in params:
if len(params[p]) > 1:
raise Exception("multiple occurrence of extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME))
val = params[p][0]
if p == 'client_no_context_takeover':
# noinspection PySimplifyBooleanCheck
if val is not True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
client_no_context_takeover = True
elif p == 'server_no_context_takeover':
# noinspection PySimplifyBooleanCheck
if val is not True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, cls.EXTENSION_NAME))
else:
server_no_context_takeover = True
else:
raise Exception("illegal extension parameter '%s' for extension '%s'" % (p, cls.EXTENSION_NAME))
response = cls(client_no_context_takeover,
server_no_context_takeover)
return response
def __init__(self,
client_no_context_takeover,
server_no_context_takeover):
self.client_no_context_takeover = client_no_context_takeover
self.server_no_context_takeover = server_no_context_takeover
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: JSON serializable representation.
:rtype: dict
"""
return {'extension': self.EXTENSION_NAME,
'client_no_context_takeover': self.client_no_context_takeover,
'server_no_context_takeover': self.server_no_context_takeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: Python string representation.
:rtype: str
"""
return "PerMessageSnappyResponse(client_no_context_takeover = %s, server_no_context_takeover = %s)" % (self.client_no_context_takeover, self.server_no_context_takeover)
class PerMessageSnappyResponseAccept(PerMessageCompressResponseAccept, PerMessageSnappyMixin):
"""
Set of parameters with which to accept an `permessage-snappy` response
from a server by a client.
"""
def __init__(self,
response,
no_context_takeover=None):
"""
:param response: The response being accepted.
:type response: Instance of :class:`autobahn.compress.PerMessageSnappyResponse`.
:param no_context_takeover: Override client ("client-to-server direction") context takeover (this must be compatible with response).
:type no_context_takeover: bool
"""
if not isinstance(response, PerMessageSnappyResponse):
raise Exception("invalid type %s for response" % type(response))
self.response = response
if no_context_takeover is not None:
if type(no_context_takeover) != bool:
raise Exception("invalid type %s for no_context_takeover" % type(no_context_takeover))
if response.client_no_context_takeover and not no_context_takeover:
raise Exception("invalid value %s for no_context_takeover - server requested feature" % no_context_takeover)
self.no_context_takeover = no_context_takeover
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: JSON serializable representation.
:rtype: dict
"""
return {'extension': self.EXTENSION_NAME,
'response': self.response.__json__(),
'no_context_takeover': self.no_context_takeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: Python string representation.
:rtype: str
"""
return "PerMessageSnappyResponseAccept(response = %s, no_context_takeover = %s)" % (self.response.__repr__(), self.no_context_takeover)
class PerMessageSnappy(PerMessageCompress, PerMessageSnappyMixin):
"""
`permessage-snappy` WebSocket extension processor.
"""
@classmethod
def create_from_response_accept(cls, is_server, accept):
pmce = cls(is_server,
accept.response.server_no_context_takeover,
accept.no_context_takeover if accept.no_context_takeover is not None else accept.response.client_no_context_takeover)
return pmce
@classmethod
def create_from_offer_accept(cls, is_server, accept):
pmce = cls(is_server,
accept.no_context_takeover if accept.no_context_takeover is not None else accept.offer.request_no_context_takeover,
accept.request_no_context_takeover)
return pmce
def __init__(self,
is_server,
server_no_context_takeover,
client_no_context_takeover):
self._is_server = is_server
self.server_no_context_takeover = server_no_context_takeover
self.client_no_context_takeover = client_no_context_takeover
self._compressor = None
self._decompressor = None
def __json__(self):
return {'extension': self.EXTENSION_NAME,
'server_no_context_takeover': self.server_no_context_takeover,
'client_no_context_takeover': self.client_no_context_takeover}
def __repr__(self):
return "PerMessageSnappy(is_server = %s, server_no_context_takeover = %s, client_no_context_takeover = %s)" % (self._is_server, self.server_no_context_takeover, self.client_no_context_takeover)
def start_compress_message(self):
if self._is_server:
if self._compressor is None or self.server_no_context_takeover:
self._compressor = snappy.StreamCompressor()
else:
if self._compressor is None or self.client_no_context_takeover:
self._compressor = snappy.StreamCompressor()
def compress_message_data(self, data):
return self._compressor.add_chunk(data)
def end_compress_message(self):
return ""
def start_decompress_message(self):
if self._is_server:
if self._decompressor is None or self.client_no_context_takeover:
self._decompressor = snappy.StreamDecompressor()
else:
if self._decompressor is None or self.server_no_context_takeover:
self._decompressor = snappy.StreamDecompressor()
def decompress_message_data(self, data):
return self._decompressor.decompress(data)
def end_decompress_message(self):
pass
| [
"syundarivera@gmail.com"
] | syundarivera@gmail.com |
8e324a32f92a18daf3d929c2bada111d0e6ec1de | 89d7bd51638bb3e8ca588062af1a3ec4870efd55 | /Tasks/DmitryKozhemyachenok/classwork1/sam.py | 67d9756d85b736503f30c388de6595853f0e1a1b | [] | no_license | Kori3a/M-PT1-38-21 | 9aae9a0dba9c3d1e218ade99f7e969239f33fbd4 | 2a08cc4ca6166540dc282ffc6103fb7144b1a0cb | refs/heads/main | 2023-07-09T20:21:32.146456 | 2021-08-19T17:49:12 | 2021-08-19T17:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | import itertools
r={"zero": 0, "one": 1, "two": 2, "three": 3, "four": 4, "five": 5, "six": 6,
"seven": 7, "eight": 8, "nine": 9, "ten": 10, "eleven": 11, "twelve": 12, "thirteen": 13,
"fourteen": 14, "fifteen": 15, "sixteen": 16, "seventeen": 17, "eighteen":18, "nineteen": 19, "twenty": 20}
r=[r[i] for i in input().split()]
print(r)
#избавляемся от повторений и сотрировка
r=[r[0]for r in itertools.groupby(sorted(r))]
print("без повторений",r)
#произведение и сумма
for i in range(len(r)-1):
if i % 2 == 0:
print("проивзедение",r[i]*r[i+1])
else:
print("сумма",r[i]+r[i+1])
#сумма всех нечетных
print("суммавсех",sum([x for x in r if x%2==1]))
| [
"dmitriikozhemyachenok@mail.ru"
] | dmitriikozhemyachenok@mail.ru |
7046f96277b3a24fa4c120d9e42ebb229ccaad4a | fe7763e194be94c402482619c0111fcaca1ef7f6 | /tutorial/snippets/permissions.py | a42b29204436ae53823a6a8aff8bf895527515ec | [
"MIT"
] | permissive | antoniocarlosortiz/django-rest-framework-sample | 1fc8b11af2aa1cacfbbc2c3363e097262eec7aee | 45ff0213b4a74566c8571c498c67adf66b420d3e | refs/heads/master | 2021-01-01T05:18:51.457373 | 2016-04-23T18:28:12 | 2016-04-23T18:28:12 | 56,934,397 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request.
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
| [
"ortizantoniocarlos@gmail.com"
] | ortizantoniocarlos@gmail.com |
a3441c62140e4ceb7659e27c0851434d9ba88215 | 2272c4d8b34807da78ed419c53bf4325ad8ca289 | /Bariera.py | 93dd573c989711a63be80a2867949c7d4b793f58 | [] | no_license | Konradox/Barrier | c5a26745f909763d364f0edec2cb329ba55e38ff | 015f0bef411a73eb561475960e8100751fbc6c30 | refs/heads/master | 2020-03-27T19:31:16.561492 | 2015-02-10T21:24:01 | 2015-02-10T21:24:01 | 30,615,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | # -*- coding: utf-8 -*-
__author__ = 'Konrad'
import threading
import time
class myThread(threading.Thread):
threadCounter = 0
barrierCounter = 0
exitCounter = 0
lock = threading.Lock()
cv = threading.Condition(lock)
def __enter__(self):
return self
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
with myThread.lock:
myThread.threadCounter += 1
def run(self):
print(self.name + " is starting.")
time.sleep(self.threadID * 2)
print(self.name + " is ending.")
self.barrier()
print(self.name + " - ended")
@staticmethod
def barrier():
myThread.cv.acquire()
myThread.barrierCounter += 1
while myThread.barrierCounter < myThread.threadCounter:
myThread.cv.wait()
myThread.exitCounter += 1
if myThread.exitCounter >= myThread.threadCounter:
myThread.exitCounter = 0
myThread.barrierCounter = 0
myThread.cv.notify_all()
myThread.cv.release()
def __exit__(self, exc_type, exc_val, exc_tb):
with myThread.lock:
myThread.threadCounter -= 1
with myThread(1, "Thread 1") as t1, myThread(2, "Thread 2") as t2, myThread(3, "Thread 3") as t3:
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
with myThread(1, "Thread 4") as t1, myThread(2, "Thread 5") as t2, myThread(3, "Thread 6") as t3:
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join() | [
"xkonradox@gmail.com"
] | xkonradox@gmail.com |
d30b2d899932c4a3c83284b2c6de91a631b995de | 7f52cfb2fb4f09a14ada450862bab25af19cb151 | /merge_data.py | 5b5b6fcc24e023f854acf30ff74bba31ba11acc2 | [] | no_license | shahumang19/Periocular-Recognition | e63d33e3c3393ba42f9342f6ed0729b1595aedfd | 640b82d31dc79e7bbb46c88433c6561c0f011fa5 | refs/heads/master | 2022-12-17T23:04:33.959009 | 2020-08-11T06:59:50 | 2020-08-11T06:59:50 | 285,555,399 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | import os, pickle
import numpy as np
F1 = "data\\leye_features1.pkl"
F2 = "data\\leye_features2.pkl"
F3 = "data\\reye_features1.pkl"
F4 = "data\\reye_features2.pkl"
FILES = [F1, F2, F3, F4]
features, labels = None, None
for file1 in FILES:
with open(file1, "rb") as f1:
data = pickle.load(f1)
if features is None:
features = data["features"]
labels = data["labels"]
else:
features = np.append(features, data["features"], axis=0)
labels = labels + data["labels"]
print(features.shape)
print(len(labels))
fn = "data\\merged_features.pkl"
with open(fn, "wb") as fl:
pickle.dump({"features": features, "labels": labels}, fl)
print(f"{fn} saved...")
| [
"shahumang19@gmail.com"
] | shahumang19@gmail.com |
7d7e17f1be39a1bce373f6aa4892368c83bdc96a | 693ae5945a34ac9487e40c478a1cabb6f4ef7eb6 | /quantum/tests/unit/test_routerserviceinsertion.py | 633629f27ecdfca8ce29f30623a22f723f39806d | [
"Apache-2.0"
] | permissive | yy2008/quantum | 292f7a5cc1c78ce97ba8b5e6211f7bd6dad2a46a | b590f9dd560978ab8cee2da7bee96f29e2f307f7 | refs/heads/master | 2021-01-24T00:54:11.916086 | 2013-02-22T06:16:42 | 2013-02-22T06:16:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,397 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest2 as unittest
import webob.exc as webexc
import quantum
from quantum.api import extensions
from quantum.api.v2 import router
from quantum.common import config
from quantum.db.loadbalancer import loadbalancer_db as lb_db
from quantum.db import db_base_plugin_v2
from quantum.db import l3_db
from quantum.db import routedserviceinsertion_db as rsi_db
from quantum.db import routerservicetype_db as rst_db
from quantum.db import servicetype_db as st_db
from quantum.extensions import routedserviceinsertion as rsi
from quantum.extensions import routerservicetype as rst
from quantum.openstack.common import cfg
from quantum.plugins.common import constants
from quantum.tests.unit import test_api_v2
from quantum.tests.unit import testlib_api
from quantum import wsgi
_uuid = test_api_v2._uuid
_get_path = test_api_v2._get_path
extensions_path = ':'.join(quantum.extensions.__path__)
class RouterServiceInsertionTestPlugin(
rst_db.RouterServiceTypeDbMixin,
rsi_db.RoutedServiceInsertionDbMixin,
st_db.ServiceTypeManager,
lb_db.LoadBalancerPluginDb,
l3_db.L3_NAT_db_mixin,
db_base_plugin_v2.QuantumDbPluginV2):
supported_extension_aliases = [
"router", "router-service-type", "routed-service-insertion",
"service-type", "lbaas"
]
def create_router(self, context, router):
with context.session.begin(subtransactions=True):
r = super(RouterServiceInsertionTestPlugin, self).create_router(
context, router)
service_type_id = router['router'].get(rst.SERVICE_TYPE_ID)
if service_type_id is not None:
r[rst.SERVICE_TYPE_ID] = service_type_id
self._process_create_router_service_type_id(
context, r)
return r
def get_router(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
r = super(RouterServiceInsertionTestPlugin, self).get_router(
context, id, fields)
rsbind = self._get_router_service_type_id_binding(context, id)
if rsbind:
r[rst.SERVICE_TYPE_ID] = rsbind['service_type_id']
return r
def delete_router(self, context, id):
with context.session.begin(subtransactions=True):
super(RouterServiceInsertionTestPlugin, self).delete_router(
context, id)
rsbind = self._get_router_service_type_id_binding(context, id)
if rsbind:
raise Exception('Router service-type binding is not deleted')
def create_resource(self, res, context, resource, model):
with context.session.begin(subtransactions=True):
method_name = "create_{0}".format(res)
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
o = method(context, resource)
router_id = resource[res].get(rsi.ROUTER_ID)
if router_id is not None:
o[rsi.ROUTER_ID] = router_id
self._process_create_resource_router_id(
context, o, model)
return o
def get_resource(self, res, context, id, fields, model):
method_name = "get_{0}".format(res)
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
o = method(context, id, fields)
if fields is None or rsi.ROUTER_ID in fields:
rsbind = self._get_resource_router_id_binding(
context, id, model)
if rsbind:
o[rsi.ROUTER_ID] = rsbind['router_id']
return o
def delete_resource(self, res, context, id, model):
method_name = "delete_{0}".format(res)
with context.session.begin(subtransactions=True):
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
method(context, id)
self._delete_resource_router_id_binding(context, id, model)
if self._get_resource_router_id_binding(context, id, model):
raise Exception("{0}-router binding is not deleted".format(res))
def create_pool(self, context, pool):
return self.create_resource('pool', context, pool, lb_db.Pool)
def get_pool(self, context, id, fields=None):
return self.get_resource('pool', context, id, fields, lb_db.Pool)
def delete_pool(self, context, id):
return self.delete_resource('pool', context, id, lb_db.Pool)
def create_health_monitor(self, context, health_monitor):
return self.create_resource('health_monitor', context, health_monitor,
lb_db.HealthMonitor)
def get_health_monitor(self, context, id, fields=None):
return self.get_resource('health_monitor', context, id, fields,
lb_db.HealthMonitor)
def delete_health_monitor(self, context, id):
return self.delete_resource('health_monitor', context, id,
lb_db.HealthMonitor)
def create_vip(self, context, vip):
return self.create_resource('vip', context, vip, lb_db.Vip)
def get_vip(self, context, id, fields=None):
return self.get_resource(
'vip', context, id, fields, lb_db.Vip)
def delete_vip(self, context, id):
return self.delete_resource('vip', context, id, lb_db.Vip)
def stats(self, context, pool_id):
pass
class RouterServiceInsertionTestCase(unittest.TestCase):
def setUp(self):
plugin = (
"quantum.tests.unit.test_routerserviceinsertion."
"RouterServiceInsertionTestPlugin"
)
# point config file to: quantum/tests/etc/quantum.conf.test
args = ['--config-file', test_api_v2.etcdir('quantum.conf.test')]
config.parse(args=args)
#just stubbing core plugin with LoadBalancer plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('service_plugins', [plugin])
cfg.CONF.set_override('quota_router', -1, group='QUOTAS')
# Ensure 'stale' patched copies of the plugin are never returned
quantum.manager.QuantumManager._instance = None
# Ensure existing ExtensionManager is not used
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.LOADBALANCER: RouterServiceInsertionTestPlugin()}
)
extensions.PluginAwareExtensionManager._instance = ext_mgr
router.APIRouter()
app = config.load_paste_app('extensions_test_app')
self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1"
res = self._do_request('GET', _get_path('service-types'))
self._service_type_id = res['service_types'][0]['id']
def tearDown(self):
self._api = None
cfg.CONF.reset()
def _do_request(self, method, path, data=None, params=None, action=None):
content_type = 'application/json'
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
if res.status_code != webexc.HTTPNoContent.code:
return res.json
def _router_create(self, service_type_id=None):
data = {
"router": {
"tenant_id": self._tenant_id,
"name": "test",
"admin_state_up": True,
"service_type_id": service_type_id,
}
}
res = self._do_request('POST', _get_path('routers'), data)
return res['router']
def test_router_create_no_service_type_id(self):
router = self._router_create()
self.assertEqual(router.get('service_type_id'), None)
def test_router_create_with_service_type_id(self):
router = self._router_create(self._service_type_id)
self.assertEqual(router['service_type_id'], self._service_type_id)
def test_router_get(self):
router = self._router_create(self._service_type_id)
res = self._do_request('GET',
_get_path('routers/{0}'.format(router['id'])))
self.assertEqual(res['router']['service_type_id'],
self._service_type_id)
def _test_router_update(self, update_service_type_id):
router = self._router_create(self._service_type_id)
router_id = router['id']
new_name = _uuid()
data = {
"router": {
"name": new_name,
"admin_state_up": router['admin_state_up'],
}
}
if update_service_type_id:
data["router"]["service_type_id"] = _uuid()
with self.assertRaises(webexc.HTTPClientError) as ctx_manager:
res = self._do_request(
'PUT', _get_path('routers/{0}'.format(router_id)), data)
self.assertEqual(ctx_manager.exception.code, 400)
else:
res = self._do_request(
'PUT', _get_path('routers/{0}'.format(router_id)), data)
res = self._do_request(
'GET', _get_path('routers/{0}'.format(router['id'])))
self.assertEqual(res['router']['name'], new_name)
def test_router_update_with_service_type_id(self):
self._test_router_update(True)
def test_router_update_without_service_type_id(self):
self._test_router_update(False)
def test_router_delete(self):
router = self._router_create(self._service_type_id)
self._do_request(
'DELETE', _get_path('routers/{0}'.format(router['id'])))
def _test_lb_setup(self):
self._subnet_id = _uuid()
router = self._router_create(self._service_type_id)
self._router_id = router['id']
def _test_pool_setup(self):
self._test_lb_setup()
def _test_health_monitor_setup(self):
self._test_lb_setup()
def _test_vip_setup(self):
self._test_pool_setup()
pool = self._pool_create(self._router_id)
self._pool_id = pool['id']
def _create_resource(self, res, data):
resp = self._do_request('POST', _get_path('lb/{0}s'.format(res)), data)
return resp[res]
def _pool_create(self, router_id=None):
data = {
"pool": {
"tenant_id": self._tenant_id,
"name": "test",
"protocol": "HTTP",
"subnet_id": self._subnet_id,
"lb_method": "ROUND_ROBIN",
"router_id": router_id
}
}
return self._create_resource('pool', data)
def _pool_update_attrs(self, pool):
uattr = {}
fields = [
'name', 'description', 'lb_method',
'health_monitors', 'admin_state_up'
]
for field in fields:
uattr[field] = pool[field]
return uattr
def _health_monitor_create(self, router_id=None):
data = {
"health_monitor": {
"tenant_id": self._tenant_id,
"type": "HTTP",
"delay": 1,
"timeout": 1,
"max_retries": 1,
"router_id": router_id
}
}
return self._create_resource('health_monitor', data)
def _health_monitor_update_attrs(self, hm):
uattr = {}
fields = ['delay', 'timeout', 'max_retries']
for field in fields:
uattr[field] = hm[field]
return uattr
def _vip_create(self, router_id=None):
data = {
"vip": {
"tenant_id": self._tenant_id,
"name": "test",
"protocol": "HTTP",
"port": 80,
"subnet_id": self._subnet_id,
"pool_id": self._pool_id,
"address": "192.168.1.101",
"connection_limit": 100,
"admin_state_up": True,
"router_id": router_id
}
}
return self._create_resource('vip', data)
def _vip_update_attrs(self, vip):
uattr = {}
fields = [
'name', 'description', 'pool_id', 'connection_limit',
'admin_state_up'
]
for field in fields:
uattr[field] = vip[field]
return uattr
def _test_resource_create(self, res):
getattr(self, "_test_{0}_setup".format(res))()
obj = getattr(self, "_{0}_create".format(res))()
obj = getattr(self, "_{0}_create".format(res))(self._router_id)
self.assertEqual(obj['router_id'], self._router_id)
def _test_resource_update(self, res, update_router_id,
update_attr, update_value):
getattr(self, "_test_{0}_setup".format(res))()
obj = getattr(self, "_{0}_create".format(res))(self._router_id)
uattrs = getattr(self, "_{0}_update_attrs".format(res))(obj)
uattrs[update_attr] = update_value
data = {res: uattrs}
if update_router_id:
uattrs['router_id'] = self._router_id
with self.assertRaises(webexc.HTTPClientError) as ctx_manager:
newobj = self._do_request(
'PUT',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])), data)
self.assertEqual(ctx_manager.exception.code, 400)
else:
newobj = self._do_request(
'PUT',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])), data)
updated = self._do_request(
'GET',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])))
self.assertEqual(updated[res][update_attr], update_value)
def _test_resource_delete(self, res):
getattr(self, "_test_{0}_setup".format(res))()
obj = getattr(self, "_{0}_create".format(res))()
self._do_request(
'DELETE', _get_path('lb/{0}s/{1}'.format(res, obj['id'])))
obj = getattr(self, "_{0}_create".format(res))(self._router_id)
self._do_request(
'DELETE', _get_path('lb/{0}s/{1}'.format(res, obj['id'])))
def test_pool_create(self):
self._test_resource_create('pool')
def test_pool_update_with_router_id(self):
self._test_resource_update('pool', True, 'name', _uuid())
def test_pool_update_without_router_id(self):
self._test_resource_update('pool', False, 'name', _uuid())
def test_pool_delete(self):
self._test_resource_delete('pool')
def test_health_monitor_create(self):
self._test_resource_create('health_monitor')
def test_health_monitor_update_with_router_id(self):
self._test_resource_update('health_monitor', True, 'timeout', 2)
def test_health_monitor_update_without_router_id(self):
self._test_resource_update('health_monitor', False, 'timeout', 2)
def test_health_monitor_delete(self):
self._test_resource_delete('health_monitor')
def test_vip_create(self):
self._test_resource_create('vip')
def test_vip_update_with_router_id(self):
self._test_resource_update('vip', True, 'name', _uuid())
def test_vip_update_without_router_id(self):
self._test_resource_update('vip', False, 'name', _uuid())
def test_vip_delete(self):
self._test_resource_delete('vip')
| [
"fank@vmware.com"
] | fank@vmware.com |
8986dc358b507dacb7c4a4416d0697035d394d40 | 9c51b666e9c6db491af0fe67b58dd1335f77088e | /tools/markdownlint-fixer.py | 05d0e8bcb4f9d491480e15c28ff52a5a5084255f | [
"CC-BY-4.0"
] | permissive | undergroundwires/Azure-in-bullet-points | aa15ee480d6fe5a7f057fda03e454e70e5118154 | 9743b1d81a49fa7c28d93a4ced21db30cef28765 | refs/heads/master | 2023-08-31T23:38:30.500859 | 2023-08-18T13:41:37 | 2023-08-18T13:41:37 | 175,283,225 | 1,052 | 470 | CC-BY-4.0 | 2023-08-18T13:41:39 | 2019-03-12T19:31:20 | Python | UTF-8 | Python | false | false | 2,781 | py | '''
Not tested for generic usage. It fixes following lint issues in md files:
MD007 - Unordered list indentation
MD009 - No trailing whitespaces
MD004 - Unordered list style
MD002 - Headings should be surruonded by blank lines
'''
import math, os.path, sys, argparse
parser = argparse.ArgumentParser(description="markdownlint fixer")
parser.add_argument('-i',
help='File to fix',
dest='filename',
type=argparse.FileType('r', encoding='UTF-8'),
required=True)
args = parser.parse_args()
path = str(args.filename.name)
new_lines = []
def count_leading_whitespaces(text):
return len(text) - len(text.lstrip(' '))
with open(path, 'r', encoding = 'UTF-8') as file:
lines = file.readlines()
for line_index, line in enumerate(lines):
# Ensure 2 whitespaces are used instead of tabs (MD007 - Unordered list indentation)
if line.startswith(' '):
total_white_spaces = count_leading_whitespaces(line)
line = line.lstrip(' ')
total_white_spaces = total_white_spaces / 2
if int(total_white_spaces) != total_white_spaces:
normalized = math.ceil(total_white_spaces)
print(f'Bad total white spaces: {str(total_white_spaces)} normalized to {str(normalized)}. Line: "{line}"')
total_white_spaces = normalized
total_white_spaces = int(total_white_spaces)
for i in range(total_white_spaces):
line = ' ' + line
# Fix MD009 - No trailing whitespaces
text_part = line.split('\n')[0].rstrip(' ')
if line.endswith('\n'):
line = f'{text_part}\n'
else:
line = f'{text_part}'
# MD004 - Unordered list style
if line.lstrip().startswith('-'):
total_white_spaces = 0
while line.startswith(' '):
total_white_spaces += 1
line = line[1:len(line)]
line = "*" + line[1:len(line)]
while total_white_spaces != 0:
line = ' ' + line
total_white_spaces -= 1
# MD002 - Headings should be surruonded by blank lines
if line_index < len(lines) - 1:
next_line = lines[line_index + 1].lstrip(' ')
if next_line.startswith('#') and line != '\n':
line = f'{line}\n'
else:
if line.lstrip().startswith("#") and next_line != '\n':
line = f'{line}\n'
new_lines.append(line)
filename, file_extension = os.path.splitext(path)
output_path = f'{filename}_fixed{file_extension}'
with open(output_path, 'w', encoding='UTF-8') as fixed_file:
fixed_file.writelines(new_lines) | [
"undergroundwires@users.noreply.github.com"
] | undergroundwires@users.noreply.github.com |
c412835e863548366c31fa22434e45e614059113 | 56278a6e508ce1a282270f90f1cd9984edd14965 | /tests/test_validation/_test_utils.py | ae430d81167f643c218fc773e99d0fc4cf3c2974 | [
"MIT"
] | permissive | gc-ss/py-gql | 3d5707938e503dc26addc6340be330c1aeb2aa76 | 5a2d180537218e1c30c65b2a933fb4fe197785ae | refs/heads/master | 2023-04-10T05:21:24.086980 | 2020-04-01T14:18:20 | 2020-04-01T14:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | # -*- coding: utf-8 -*-
from py_gql._string_utils import dedent
from py_gql.lang import parse
from py_gql.validation import validate_ast
from py_gql.validation.validate import SPECIFIED_RULES, default_validator
def _ensure_list(value):
if isinstance(value, list):
return value
else:
return [value]
def assert_validation_result(
schema, source, expected_msgs=None, expected_locs=None, checkers=None
):
# Prints are here so we can more easily debug when running pytest with -v
expected_msgs = expected_msgs or []
expected_locs = expected_locs or []
print(source)
result = validate_ast(
schema,
parse(dedent(source), allow_type_system=True),
validators=[
lambda s, d, v: default_validator(
s, d, v, validators=(checkers or SPECIFIED_RULES)
)
],
)
errors = result.errors
msgs = [str(err) for err in errors]
locs = [[node.loc for node in err.nodes] for err in errors]
print(" [msgs] ", msgs)
print(" [locs] ", locs)
assert msgs == expected_msgs
if expected_locs:
assert locs == [_ensure_list(l) for l in expected_locs]
def assert_checker_validation_result(
checker, schema, source, expected_msgs=None, expected_locs=None
):
assert_validation_result(
schema,
source,
expected_msgs=expected_msgs,
expected_locs=expected_locs,
checkers=[checker],
)
| [
"c.lirsac@gmail.com"
] | c.lirsac@gmail.com |
c2191030e2543c62287b31ad7e253f8767252f1c | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/enums/types/feed_item_quality_approval_status.py | 2b7fc3c81f16e8f0168b1a99e3484c10977c937b | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 1,260 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"FeedItemQualityApprovalStatusEnum",},
)
class FeedItemQualityApprovalStatusEnum(proto.Message):
r"""Container for enum describing possible quality evaluation
approval statuses of a feed item.
"""
class FeedItemQualityApprovalStatus(proto.Enum):
r"""The possible quality evaluation approval statuses of a feed
item.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
26c80160665467234f7e9f9bac615e1f9a40f3ec | 4cc2b3ba7d7b87a57ba1d4c5532426d5c6676bf1 | /product/migrations/0002_auto_20210609_1621.py | c9e0653bfda3267fe361ec7c2eeed7d81838c27c | [] | no_license | Bhavesh852/Order | 5cba5bb4a02db9fd9d7d2f681bcaa2126c82a3ac | 92383e555f5b3fac4549e45ba1510c8355d6e621 | refs/heads/master | 2023-05-15T05:16:47.297902 | 2021-06-09T14:11:05 | 2021-06-09T14:11:05 | 375,373,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | # Generated by Django 3.0.8 on 2021-06-09 10:51
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False),
),
migrations.AlterField(
model_name='order',
name='total_price',
field=models.DecimalField(decimal_places=2, max_digits=10),
),
migrations.AlterField(
model_name='order',
name='unit_price',
field=models.DecimalField(decimal_places=2, max_digits=6),
),
migrations.AlterField(
model_name='product',
name='unit_price',
field=models.DecimalField(decimal_places=2, max_digits=6),
),
]
| [
"bchandora60@gmail.com"
] | bchandora60@gmail.com |
4c9659fd162014d48f2f652a5dcc598705d45fbb | 3c56f54ec7e6cade93b5c988cba531e8a2edd453 | /Day5/SF21-Flask-Advanced/wtforms/forms.py | 51a054063629ee6fc5e406bb584f9f4a84aca901 | [
"MIT"
] | permissive | JuJu2181/Learning_Flask | ae6addeedea29df0d19ed272d68f37d945bda6e4 | f9f46cac743323a1821ed214dd512ef337a804f1 | refs/heads/master | 2023-08-25T21:44:41.931317 | 2021-07-15T07:55:07 | 2021-07-15T07:55:07 | 383,996,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | from flask_wtf import FlaskForm
from wtforms import IntegerField, StringField, SubmitField
from wtforms.validators import InputRequired, NumberRange
class NameForm(FlaskForm):
name = StringField("Full Name",validators=[InputRequired("Please Add a Name")]) # add validation here
submit = SubmitField("Submit")
class NameFormSecond(FlaskForm):
name = StringField("Full Name", validators=[InputRequired()])
age = IntegerField(
"Age",
validators=[
InputRequired(),
NumberRange(min=13, max=60, message="Age must be between 13 and 60."),
],
)
submit = SubmitField("Submit")
| [
"anishshilpakar8@gmail.com"
] | anishshilpakar8@gmail.com |
b81fbe136d30d1cc561a5d652413f0cc5ab24af0 | 48828cdb69093b261f134c664cad7bdb1bf01b10 | /_config/hash.py | 324c90cd5ea71c75011d82a92978cef837f32323 | [] | no_license | 404neko/PageCat | cb2178efee7f09d5f8d412f42810b4d4a006f559 | 1deda59694624b3048754fdc76aeae286cd8a679 | refs/heads/master | 2021-01-17T17:35:26.372506 | 2016-05-02T10:56:13 | 2016-05-02T10:56:13 | 52,928,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import hashlib
flask_secret_key = '233'
salt = '2333'
def uhash(password,salt):
pre_hash = password[0]+salt+password[1:]
Hash=hashlib.md5()
Hash.update(pre_hash)
return Hash.hexdigest() | [
"404neko@gmail.com"
] | 404neko@gmail.com |
cfdbb2f58717e16d81d08e87d8d9d40cf2a66e9c | a8e5e85cd1d3210f3e404faa35654e02ba52a7af | /py作业/serv/course_actions.py | a7d8fdd99701f1c1546574c2baed2d51e36addc4 | [] | no_license | Lizhupu-0802/gradesystem | 6b56299642d819139bc3a3c9415adbc764cd89d5 | 9c57d112fb840379a91236a278fcbe829609ec61 | refs/heads/main | 2023-02-01T21:44:59.597086 | 2020-12-19T05:47:44 | 2020-12-19T05:47:44 | 321,072,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,308 | py | from aiohttp import web
import psycopg2.errors
from urllib.parse import urlencode
from .config import db_block, web_routes
@web_routes.post('/action/course/add')
async def action_course_add(request):
params = await request.post()
param_fields = ['no', 'name', 'score', 'attr', ]
param_values = {field: params.get(field) for field in param_fields}
for field, value in param_values.items():
if value is None:
return web.HTTPBadRequest(text=f"{field} must be required")
try:
with db_block() as db:
db.execute("""
INSERT INTO course (no, name, score, attr)
VALUES ( %(no)s, %(name)s, %(score)s, %(attr)s)
""", param_values)
except psycopg2.errors.UniqueViolation:
query = urlencode({
"message": "已经添加该课程号课程",
"return": "/course"
})
return web.HTTPFound(location=f"/error?{query}")
return web.HTTPFound(location="/course")
@web_routes.post('/action/course/edit/{sn}')
async def edit_course_action(request):
sn = request.match_info.get("sn")
if sn is None:
return web.HTTPBadRequest(text="sn, must be required")
params = await request.post()
param_fields = ['no', 'name', 'score', 'attr', ]
param_values = {field: params.get(field) for field in param_fields}
for field, value in param_values.items():
if value is None:
return web.HTTPBadRequest(text=f"{field} must be required")
try:
param_values['sn'] = int(sn)
param_values['score'] = int(param_values['score'])
except ValueError:
return web.HTTPBadRequest(text="invalid value")
with db_block() as db:
db.execute("""
UPDATE course SET no=%(no)s, name=%(name)s, score=%(score)s, attr=%(attr)s
WHERE sn = %(sn)s
""", param_values)
return web.HTTPFound(location="/course")
@web_routes.post('/action/course/delete/{sn}')
def delete_course_action(request):
sn = request.match_info.get("sn")
if sn is None:
return web.HTTPBadRequest(text="sn must be required")
with db_block() as db:
db.execute("""
DELETE FROM course
WHERE sn = %(sn)s
""", dict(sn=sn))
return web.HTTPFound(location="/course")
| [
"noreply@github.com"
] | Lizhupu-0802.noreply@github.com |
7824a3129a4b0602e416b5f8eb4533f577abb87f | 74b2f9658f3fd47aaf255febf2852263c6ec19e0 | /takerest/src/helpers/test-data-gen/src/lib/vendors/pairs/main/pairwisepy/__init__.py | 388cb2859d0ac5d57e2ce7a71e3661e25c431ed3 | [] | no_license | upworka0/restio | bf47db136e884c72b061962e3973546a4bdd78c7 | 9f22fc6513fefd6c98738b0ea016abfec7c437cb | refs/heads/master | 2023-03-05T00:23:12.243382 | 2021-10-01T01:49:01 | 2021-10-01T01:49:01 | 203,429,128 | 1 | 0 | null | 2023-03-01T19:12:22 | 2019-08-20T18:07:32 | JavaScript | UTF-8 | Python | false | false | 65 | py | # Author: Nagaraj
# Date: 6/5/18
from .pairwise import AllPairs
| [
"upworka0@gmail.com"
] | upworka0@gmail.com |
5cace677c0248096c96561d057f04dc8d7c24177 | c96f37e005b5dc6ef96ef871222a36c6920102e6 | /MotionFunctions.py | d720bf1a8499891d8b35362de3a874f4f0573848 | [] | no_license | shrutisub/robot | eef31ba3c1d396e699ce37fecd45fa1a74859739 | a70e246a078c02d8085b9eed7d8d45acb5a5351f | refs/heads/master | 2021-08-30T07:23:14.971463 | 2017-12-16T18:15:29 | 2017-12-16T18:15:29 | 114,481,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | import numpy as np
import time
def PTPtoConfiguration(start_cfg, target_cfg, motiontype):
"""PTP path planning
:param start_cfg: Current axis angle of the robot
:type start_cfg: array of floats
:param target_cfg: Target angle of the robot
:type target_cfg: array of floats
:param motiontype: Type of motion (asynchronous, synchronous, fully synchronous)
:type motiontype: int
:returns: Array containing the axis angles of the interpolated path
:rtype: matrix of floats
"""
trajectory = np.empty([100, 6])
#TODO: Implement PTP (Replace pseudo implementation with your own code)! Consider the max. velocity and acceleration of each axis
diff = target_cfg - start_cfg
delta = diff / 100.0
for i in xrange(100):
trajectory[i] = start_cfg + (i*delta)
trajectory[99] = target_cfg
return trajectory
def Move(robot, trajectory):
for i in range(trajectory.shape[0]):
robot.SetDOFValues(trajectory[i])
time.sleep(0.01)
| [
"noreply@github.com"
] | shrutisub.noreply@github.com |
a30f1f5184e240fdb168d288874791f7260c7029 | cdbb11473dc8d34767a5916f9f85cb68eb2ca3f2 | /core/helpers.py | a9cf1b2ad8c669f8aac1b940187d7a46adde3660 | [] | no_license | skyride/evestats | fb2a1a248952771731dcfecadab7d02b1f08cd4b | 4bd2153f65c084b478272513733dcc78f9a0ef98 | refs/heads/master | 2020-03-23T13:50:19.216870 | 2018-08-05T19:19:47 | 2018-08-05T19:19:47 | 141,640,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from sde.models import Type
def generate_breadcrumb_trail(marketgroup):
def recurse(node):
"""Return an list containing the path to this trail"""
if isinstance(node, dict):
return []
elif isinstance(node, Type):
return [*recurse(node.market_group), node]
elif node.parent is None:
return [node]
else:
return [*recurse(node.parent), node]
return [
{
"name": "Market",
"root": True
},
*recurse(marketgroup)
] | [
"adam.findlay@mercurytide.co.uk"
] | adam.findlay@mercurytide.co.uk |
913f443dfbaa96309422d8d52d60c558de4b9c33 | 2f1ac001742fa3e117e1b900a26ac0afb97f8967 | /app.py | 6654b10d9a5d022143a5903a8806571608b92145 | [] | no_license | debugDoug/carrier_limit_dash | f67411df48c24bdb609211542146dfd936800138 | 348830bbb4c4e7f9043008f8b946f97084501830 | refs/heads/master | 2022-11-11T01:11:24.206897 | 2020-06-25T21:45:59 | 2020-06-25T21:45:59 | 275,017,556 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,081 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 08:11:43 2020
@author: 1197058
"""
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import numpy as np
import dash_table
import plotly.express as px
#hey
def generate_table(dataframe, max_rows=100):
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))
])
])
#%% Load in IFCL
df = pd.read_excel(r'M:\Workspace\Doug\Carrier Limit Tracking\Data\DailyWeekly\062220_Inforced Carrier Limit.xlsx')
# drop Centauri
df = df[df['Carrier Display Name'] != 'Centauri Specialty Insurance Company']
# drop Channel and Lloyds - DBD Slip (100% Channel) (as per Craig) and remove Harco (counted in iPartners report)
#df = df[df['Carrier Display Name'] != 'Argo Re']
#df = df[df['Carrier Display Name'] != 'Ariel']
df = df[df['Carrier Display Name'] != 'Channel-DBD']
df = df[df['Carrier Display Name'] != 'Lloyds - DBD Slip (100% Channel)']
df = df[df['Carrier Display Name'] != 'Harco National Insurance Company']
df = df[df['Carrier Display Name'] != 'Exclude']
# change S4242 Re to Syndicate 4242
df['Carrier Display Name'].replace("S4242 Re", "Syndicate 4242", inplace=True)
#change Exclude to QBE
#df['Carrier Display Name'].replace("Exclude","QBE", inplace=True)
# change Crum & Forster_PBU to Crum and Forster
df['Carrier Display Name'].replace("Crum & Forster_PBU","Crum and Forster", inplace=True)
# change RenRe_PBU to RenRe
df['Carrier Display Name'].replace("RenRe_PBU","RenRe", inplace=True)
# change NF&M and BHSIC to Berkshire Hathaway
df['Carrier Display Name'].replace("NF&M","Berkshire Hathaway", inplace=True)
df['Carrier Display Name'].replace("BHSIC","Berkshire Hathaway", inplace=True)
# change Ariel and Argo Re
df['Carrier Display Name'].replace("Ariel", "Other", inplace=True)
df['Carrier Display Name'].replace("Argo Re", "Other", inplace=True)
# create macrozone column
df['Microzone'].value_counts()
df['Microzone'].isna().sum()
df['Microzone'].fillna('UNKNOWN', inplace=True)
microMacro = pd.read_excel(r'M:\Workspace\Doug\Carrier Limit Tracking\Data\DailyWeekly\MicroToMacro.xlsx')
microMacro = dict(zip(microMacro.Microzone, microMacro.Macrozone))
df['Macrozone'] = df['Microzone'].map(microMacro)
df.Macrozone.value_counts()
check = df[df['Macrozone'].isna()]
# create segment column
def segment (row):
if row['Carrier Display Name'] == 'Harco National Insurance Company':
return 'HBU'
if row['Carrier Display Name'] == 'Syndicate 2288_Harco Auth Participant':
return 'HBU'
elif row['Source System'] == 'Epicenter':
return 'PBU'
else:
return 'MMBU'
df['Segment'] = df.apply(lambda row: segment(row), axis=1)
# print(df['Segment'].value_counts())
# change 2288 HBU name
df['Carrier Display Name'].replace("Syndicate 2288_Harco Auth Participant", "Syndicate 2288", inplace=True)
# create state col
df['State'] = df.Microzone.str[:2]
df['State'] = df['State'].astype(str)
# create peril col
wind_zones = ['AL','FL','GA','HI','LA','MA','MS','NC','NJ','NY','TX']
quake_zones = ['CA','OR','WA']
def peril (row):
if row['Policy Number'][0] == 'A': # doesn't appear to exist anymore
return 'App'
if (row['Policy Number'][0] == 'E') & (row['State'] in (wind_zones)) : # EQX polcies, was NAC now HU
return 'HU'
if (row['Policy Number'][0] == 'E') & (row['State'] in (quake_zones)) : # EQX polcies, was NAC now EQ
return 'EQ'
if row['Policy Number'][:2] == 'IQ': # was QBE Excess now HU
return 'HU'
if row['Policy Number'][:2] == 'IC': # was FL-Admitted now HU
return 'HU'
if row['Policy Number'][5] == '0':
return 'EQ'
if row['Policy Number'][5] == '6':
return 'HU'
if row['Policy Number'][5] == '8': # was CGL now HU
return 'HU'
if row['Policy Number'][5] == '9': # was AOP now HU
return 'HU'
df['Peril'] = df.apply(lambda row: peril(row), axis=1)
df['Peril'].value_counts()
# create Month column
df['Month-Year'] = pd.to_datetime(df['Policy Effective Date']).dt.to_period('M')
df['Month-Year'] = df['Month-Year'].dt.strftime('%b-%Y')
# create New/Renewal column
def new_renew (row):
if row['Policy Number'][-1] != '0':
return 'Renewal'
else:
return 'New'
df['New/Renewal'] = df.apply(lambda row: new_renew(row), axis=1)
# print(df['New/Renewal'].value_counts())
### check for duplicates ###
df['Check_pol'] = df['Policy Number'].str[:16]
# print(df['Check_pol'].nunique())
# print(df['Policy Number'].nunique())
unique = pd.DataFrame(df.groupby('Check_pol')["Policy Effective Date"].nunique()).reset_index()
# merge grouped back to main df
df = pd.merge(df, unique, how='left', on="Check_pol")
# get df of duplicates (unique policy dates = 2)
duplicates = df[df['Policy Effective Date_y']==2]
# difference b/w expiring and renewing True=2018
duplicates.groupby([(duplicates["Policy Effective Date_x"] >= '2018-01-01') & (duplicates["Policy Effective Date_x"] <= '2019-12-31')])["Carrier Limit"].sum()
# check values and drop duplicates from the df by finding rows with PED_y ==2 AND PED <= '2018-12-31'
duplicates.groupby(duplicates['Policy Effective Date_x'].dt.year)['Policy Effective Date_y'].value_counts()
df = df.drop(df[(df["Policy Effective Date_y"] == 2) & (df["Policy Effective Date_x"] <= '2019-12-31')].index)
# get expiring/old pol number
def getExpNumber(row):
if row['New/Renewal'] == 'Renewal':
val = row['Policy Number'][-2:]
val = int(val) - 1
val = '0' + str(val)
base_pol = row['Policy Number'][:-2]
old_pol = base_pol + val
return old_pol
else:
return 'None'
df['Old Policy Number'] = df.apply(getExpNumber, axis=1)
#%% generate some charts as tests
df2 = df.groupby(['Carrier Display Name','Segment'])[['Carrier Limit']].sum()
df2.reset_index(inplace=True)
df2 = df2[df2['Segment'] != 'HBU']
# graph
fig = px.bar(df2, x='Carrier Display Name', y='Carrier Limit', hover_data=['Segment'],color='Carrier Limit')
df3 = df[df['Carrier Display Name'] == 'Syndicate 4242']
df3 = df3.groupby('Policy Effective Date_x')[['Carrier Limit']].sum()
df3.reset_index(inplace=True)
fig2 = px.line(df3, x='Policy Effective Date_x',
y='Carrier Limit')
df4 = df.groupby(['Carrier Display Name','Segment'])[['Carrier Limit']].sum()
df4.reset_index(inplace=True)
df4 = df4[df4['Segment'] != 'HBU']
df4['Carrier Limit'] = df4['Carrier Limit'].apply(lambda x : "{:,}".format(x))
df5 = df[df['Segment'] != 'HBU']
df5['Month-Year'] = pd.to_datetime(df5['Month-Year'])
summary_pivot = pd.pivot_table(data=df5, values='Carrier Limit', index=['Carrier Display Name','Segment'],
columns='Month-Year', aggfunc=np.sum, fill_value=0, margins=True)
summary_pivot.reset_index(inplace=True)
old_colNames = list(summary_pivot.columns[2:-1])
new_colNames = []
for i in summary_pivot.columns[2:-1]:
i = i.strftime("%m-%Y")
new_colNames.append(i)
col_rename_dict = {i:j for i,j in zip(old_colNames,new_colNames)}
summary_pivot.rename(columns=col_rename_dict, inplace=True)
summary_pivot.columns = summary_pivot.columns.astype(str)
#%%
# CARRIER TAB
df_carrier = df
df_carrier['Carrier Limit'] = df_carrier['Carrier Limit'].astype(float)
df_carrier = df.pivot_table(values='Carrier Limit', index=['Month-Year','Carrier Display Name'],
aggfunc = sum, fill_value=0)
df_carrier.reset_index(inplace=True)
df_carrier['Month-Year'] = pd.to_datetime(df_carrier['Month-Year'])
df_carrier = df_carrier.sort_values(by='Month-Year')
fig_Carrier = px.line(df_carrier, x='Month-Year', y='Carrier Limit', color='Carrier Display Name')
fig_Carrier.update_xaxes(rangeslider_visible=True,
rangeselector=dict(
buttons=list([
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all")
])))
# get sum of limit for each carrier by segment and macrozone
df_agg = df.groupby(['Carrier Display Name','Segment','Macrozone','Peril'])[['Carrier Limit']].sum()
df_agg.reset_index(inplace=True)
df_agg['Carrier Limit'] = df_agg['Carrier Limit'].apply(lambda x : "{:,}".format(x))
# create watch zones column
watch_zones = ['CA Gtr Los Angeles','CA Gtr San Francisco','CA N Central Coast','CA N Coast','WA Washington',
'OR Oregon','FL Tri County','FL Panhandle','FL Southwest','FL Inland','FL West',
'FL East Coast','TX N Texas']
df_agg['Watch Zone'] = df_agg['Macrozone'].apply(lambda x: 'Yes' if x in watch_zones else 'No')
available_carriers = df_agg['Carrier Display Name'].unique()
available_segs = df_agg['Segment'].unique()
available_mz = df_agg['Macrozone'].unique()
available_wz = df_agg['Watch Zone'].unique()
#%%
# MACROZONE TAB
watch_zones = ['CA Gtr Los Angeles','CA Gtr San Francisco','CA N Central Coast','CA N Coast','WA Washington',
'OR Oregon','FL Tri County','FL Panhandle','FL Southwest','FL Inland','FL West',
'FL East Coast','TX N Texas']
df_macrozone = df[(df['Macrozone'].isin(watch_zones)) & (df['Segment'] != 'HBU')]
df_macrozone = df_macrozone.groupby(['Macrozone','Microzone','Carrier Display Name','Segment'])[['Carrier Limit']].sum()
df_macrozone.reset_index(inplace=True)
fig_WZ = px.bar(df_macrozone, x='Macrozone', y='Carrier Limit', color='Carrier Display Name', hover_data=['Segment','Microzone'])
#%%
# APP LAYOUT & STRUCTURE
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
dcc.Tabs([
dcc.Tab(label='All ICAT', children=[
html.H1(children='Carrier Limit Tracking!'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
html.H2(children='Graph of Commercial Inforce Limit'),
dcc.Graph(
id='example-graph',
figure=fig
),
html.H3(children='Graph of s4242 Limit'),
dcc.Graph(figure=fig2),
html.H4(children='Commercial Carriers Limit'),
#generate_table(df4),
dash_table.DataTable(
id='practice_carrier_table',
columns=[{"name": i, "id": i} for i in summary_pivot.columns],
data = summary_pivot.to_dict('records'),
)
#generate_table(df_carrier)
]),
## CARRIER TAB
dcc.Tab(label='Carriers', children=[
html.H1(children='Carrier Limit'),
# dash_table.DataTable(
# id='practice_carrier_table',
# columns=[{"name": i, "id": i} for i in df_carrier.columns],
# data = df_carrier.to_dict('records'),
# )
#generate_table(df_carrier)
html.H2(children='Carrier Limit by Policy Inception Date'),
html.Div(children='''Shows When Limit was Bound ''' ),
# graph of all carriers and their limit by Pol Inception date
dcc.Graph(figure=fig_Carrier),
# dropdown for carrier name
html.Div(children='''Select Carrier(s)'''),
dcc.Dropdown(
id='carrier_aggs_dd',
options=[{'label':i, 'value':i} for i in available_carriers],
value=[],
multi=True
),
# dropdown for segment
html.Div(children='''Select Segment(s)'''),
dcc.Dropdown(
id='carrier_aggs_dd_segment',
options=[{'label':i, 'value':i} for i in available_segs],
value=[],
multi=True
),
# dropdown for Watch Zones
html.Div(children='''Filter to Watch Zones or Rest'''),
dcc.Dropdown(
id='carrier_aggs_dd_wz',
options=[{'label':i, 'value':i} for i in available_wz],
value=[],
multi=True
),
# dropdown for macrozone
html.Div(children='''Select Macrozone(s)'''),
dcc.Dropdown(
id='carrier_aggs_dd_mz',
options=[{'label':i, 'value':i} for i in available_mz],
value=[],#'CA Gtr Los Angeles','FL Tri County', 'TX N Texas'],
multi=True
),
# dash_table.DataTable(
# id='carrier_aggs',
# columns=[{"name": i, "id": i} for i in df_agg.columns],
# data=df_agg.to_dict('records'),
# )
html.Div(children=''' '''),
dash_table.DataTable(
id='carrier_aggs',
columns = [{"name": i, "id": i,} for i in (df_agg.columns)])
]),
dcc.Tab(label='Macrozones', children=[
html.H1(children='Limit by Macrozone'),
html.Div(children=''' Limit in Watch Zones (PML Drivers) '''),
dcc.Graph(figure=fig_WZ)
])
])
])
@app.callback(Output('carrier_aggs', 'data'), [Input('carrier_aggs_dd', 'value'),
Input('carrier_aggs_dd_segment', 'value'),
Input('carrier_aggs_dd_mz', 'value'),
Input('carrier_aggs_dd_wz', 'value')])
def update_rows(selected_carriers, selected_segs, selected_mz, selected_wz):
carriers = list(selected_carriers)
segs = list(selected_segs)
mz = list(selected_mz)
wz = list(selected_wz)
if len(selected_carriers) >= 1:
dff_agg = df_agg[df_agg['Carrier Display Name'].isin(carriers)]
else:
dff_agg = df_agg
if len(selected_segs) >= 1:
dfF_agg = dff_agg[dff_agg['Segment'].isin(segs)]
else:
dfF_agg = dff_agg
if len(selected_mz) >= 1:
dFF_agg = dfF_agg[dfF_agg['Macrozone'].isin(mz)]
else:
dFF_agg = dfF_agg
if len(selected_wz) >= 1:
dFFF_agg = dFF_agg[dFF_agg['Watch Zone'].isin(wz)]
else:
dFFF_agg = dFF_agg
return dFFF_agg.to_dict('records')
if __name__ == '__main__':
app.run_server(debug=True)
# colnames='Syndicate 2288'
# test = df[df['Carrier Display Name'].isin(colnames)]
# df4 = df[df['Segment'] != 'HBU']
# df4['Month-Year'] = pd.to_datetime(df4['Month-Year'])
# summary_pivot = pd.pivot_table(data=df4, values='Carrier Limit', index=['Carrier Display Name','Segment'],
# columns='Month-Year', aggfunc=np.sum, fill_value=0)
# print(summary_pivot.style.format('{0:,.2f}').hide_index())
# summary_pivot.info()
# df4.info()
| [
"noreply@github.com"
] | debugDoug.noreply@github.com |
9fa46ad09d0359bf5046afbe894f2fc153a4e49b | a9de0373f6275a9d6c74701f43306842c1cab60e | /singleInstance.py | 325394d63a31d04aa4713cf79e8e8993ed9f4c31 | [] | no_license | carrotshub/ExDemo | 3bf0c2f82192186a81643232457e4b9776c1ed9a | 00a12dac0175726078e78f9868f98391eaf7fb6c | refs/heads/master | 2020-04-13T01:22:08.152531 | 2019-01-09T13:10:39 | 2019-01-09T13:10:39 | 162,871,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | # -*- coding: utf8 -*-
# 使用装饰器实现单例模式
# 第一种方法类作为装饰器
class Single(object):
def __init__(self,cls):
self._cls = cls
self._instances = None
def __call__(self, *args, **kwargs):
if not self._instances:
self._instances = self._cls(*args)
return self._instances
@Single
class A(object):
def __init__(self,name):
self.name = name
a = A('zhangsan')
b = A('lisi')
print(a is b)
# 第二种,使用函数作为装饰器
def single1(cls):
s = []
def wrapper(*args, **kwargs):
if not s:
s.append(cls(*args, **kwargs))
print(args)
return s
return wrapper
@single1
class B(object):
def __init__(self, name):
self.name = name
a1 = B('zhangsam')
b1 = B('BOb')
print(a1 is b1)
| [
"1398141580@qq.com"
] | 1398141580@qq.com |
a583ce9389fd5048cc2a217b72beb330eb02d48b | c6e042536814dadf338fb9e68246f6c17f1c4cfa | /FourierMy.py | cbf2bbeaadf8ff8e07f0a2d327d3b265dcb58f48 | [] | no_license | NazarovDevelopment/Laba | 0576ed2945ce486e32843f7b25d8afe2683248a8 | 036f2d473673edb6ad5754dc6408b4112d1b045e | refs/heads/master | 2020-04-06T03:35:03.188331 | 2015-04-21T19:42:31 | 2015-04-21T19:42:31 | 33,953,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | __author__ = 'Alexey'
import numpy as np
def forwardfourier(data):
newfftdata = np.fft.fft(data)
print(data)
return newfftdata | [
"anazarov94@gmail.com"
] | anazarov94@gmail.com |
48dee7176bb8171d5e34ce3b814a3824745949bb | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/operations/_dedicated_hosts_operations.py | 51cb4faf00fcd17afa1aa62853dffed3a1b72cf3 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 44,268 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"hostName": _SERIALIZER.url("host_name", host_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"hostName": _SERIALIZER.url("host_name", host_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"hostName": _SERIALIZER.url("host_name", host_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_request(
resource_group_name: str,
host_group_name: str,
host_name: str,
subscription_id: str,
*,
expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"hostName": _SERIALIZER.url("host_name", host_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_host_group_request(
resource_group_name: str, host_group_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class DedicatedHostsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_03_01.ComputeManagementClient`'s
:attr:`dedicated_hosts` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHost, IO],
**kwargs: Any
) -> _models.DedicatedHost:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "DedicatedHost")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: _models.DedicatedHost,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHost, IO],
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
def _update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHostUpdate, IO],
**kwargs: Any
) -> _models.DedicatedHost:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "DedicatedHostUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@overload
def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: _models.DedicatedHostUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHostUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHostUpdate, IO],
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHostUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, host_group_name: str, host_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, host_group_name: str, host_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Delete a dedicated host.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host. Required.
:type host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None,
**kwargs: Any
) -> _models.DedicatedHost:
"""Retrieves information about a dedicated host.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host. Required.
:type host_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' will retrieve
the list of instance views of the dedicated host. 'UserData' is not supported for dedicated
host. Known values are: "instanceView" and "userData". Default value is None.
:type expand: str or ~azure.mgmt.compute.v2021_03_01.models.InstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHost or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
request = build_get_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace
def list_by_host_group(
self, resource_group_name: str, host_group_name: str, **kwargs: Any
) -> Iterable["_models.DedicatedHost"]:
"""Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink
property in the response to get the next page of dedicated hosts.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHost or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHostListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_host_group_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_host_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DedicatedHostListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_host_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts"} # type: ignore
| [
"noreply@github.com"
] | gaoyp830.noreply@github.com |
c45249ab30242a6a3642d0f3e42f0ce53576fc53 | 22d8b1250e0b5178e03ff4843e1f79243d35821e | /areaTools.py | 0f984d5166bd8f3ce473a566711fdfec02894819 | [] | no_license | kmcquighan/Calc-II-Numerical-Methods | 98350b76973cf44d481353cfc981efc8b6875a9c | d22a4eabd327f516308093ffa93701de2bdf8042 | refs/heads/master | 2020-05-27T21:10:54.887481 | 2017-03-24T15:42:07 | 2017-03-24T15:42:07 | 83,606,852 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,176 | py | # -*- coding: utf-8 -*-
"""
by Kelly McQuighan 2017
These tools can be used to visualize different numerical integration schemes,
and to compute the associated error. They can also be used to find the order of
various numerical schemes.
"""
from matplotlib import pyplot as plt
from numpy import *
import numpy as np
import scipy.integrate as inte
import scipy.interpolate as interp
import matplotlib as mpl
mpl.rcParams['font.size'] = 17
colors = ['#0058AF','#FF8000','#D682FF','#00693C','#E02102']
styles = ['-',':','-',':','-']
"""
This function is used to make the plot of what the approximation of the integral
looks like for five different numerical methods: Left Riemann sum, Right Riemann sum,
Midpoint rule, Trapezoid rule, and Simpson's rule.
"""
def plots(func, a,b,n,method,ax):
ax.axvline(0.,color='#666666',linewidth=1)
ax.axhline(0.,color='#666666',linewidth=1)
if (a>0):
xlarge = np.linspace(0.,1.1*b,1000)
elif (b<0):
xlarge = np.linspace(1.1*a,0.,1000)
else:
xlarge = np.linspace(1.1*a,1.1*b,1000)
flarge = func(xlarge)
ax.plot(xlarge,flarge,'b', linewidth=5)
ax.set_xlim([xlarge[0], xlarge[999]])
smallest = np.min(flarge)
largest = np.max(flarge)
dx = (b-a)/n
xs = np.linspace(a,b,n+1)
fxs = func(xs)
if method.lower()=='left':
for i in range(n):
points = [[xs[i], 0], [xs[i], fxs[i]], [xs[i+1], fxs[i]], [xs[i+1],0]]
poly = plt.Polygon(points, fc='g', edgecolor='g', alpha=0.3, linewidth=3)
ax.add_patch(poly)
elif method.lower()=='right':
for i in range(n):
points = [[xs[i], 0], [xs[i], fxs[i+1]], [xs[i+1], fxs[i+1]], [xs[i+1],0]]
poly = plt.Polygon(points, fc='g', edgecolor='g', alpha=0.3, linewidth=3)
ax.add_patch(poly)
elif method.lower()=='midpoint':
x = np.linspace(a+dx/2.,b-dx/2.,n)
fx = func(x)
for i in range(n):
points = [[xs[i], 0], [xs[i], fx[i]], [xs[i+1], fx[i]], [xs[i+1],0]]
poly = plt.Polygon(points, fc='g', edgecolor='g', alpha=0.3, linewidth=3)
ax.add_patch(poly)
elif method.lower()=='trapezoid':
for i in range(n):
points = [[xs[i], 0], [xs[i], fxs[i]], [xs[i+1], fxs[i+1]], [xs[i+1],0]]
poly = plt.Polygon(points, fc='g', edgecolor='g', alpha=0.3, linewidth=3)
ax.add_patch(poly)
elif method.lower()=='simpson':
# note: this implementation keeps the number of grid points the same
for i in range(0,n,2):
lag = interp.lagrange([xs[i], xs[i+1], xs[i+2]], [fxs[i], fxs[i+1], fxs[i+2]])
section = np.linspace(xs[i], xs[i+2], 100)
fsec = lag(section)
ax.fill_between(section,fsec, facecolor='g', edgecolor='g', alpha=0.3, linewidth=3)
x_mid = np.linspace(a+dx,b-dx,n/2)
vert = np.ones(100)
for the_x in x_mid:
ax.plot(the_x*vert,np.linspace(0,func(the_x),100),'g--', linewidth=3, alpha=0.5)
else:
print ('ERROR: You have not specified a valid method. Please check for typos.')
if smallest>0:
ax.set_ylim([0,1.1*largest])
elif largest<0:
ax.set_ylim([1.1*smallest,0])
else:
ax.set_ylim([1.1*smallest, 1.1*largest])
ax.set_xlabel('x')
ax.set_ylabel('f(x)')
"""
This function is used to make the plot of what the approximation of the integral
looks like for all five different numerical methods: Left Riemann sum, Right Riemann sum,
Midpoint rule, Trapezoid rule, and Simpson's rule.
It also makes a bar chart showing the size of the error for each method so that
the user can quickly see which method is better for a specific fixed value of n.
"""
def plotArea(f,a,b,n):
a = eval(a)
b = eval(b)
if n<1:
n=1
print ('ERROR: n must be greater than zero. setting n=1.')
func = eval("lambda x: " + f)
I = inte.quad(func, a, b)[0]
fig = plt.figure(figsize=(15, 6))
ax1 = fig.add_subplot(2,3,1)
ax2 = fig.add_subplot(2,3,2)
ax3 = fig.add_subplot(2,3,3)
ax4 = fig.add_subplot(2,3,4)
ax5 = fig.add_subplot(2,3,5)
ax6 = fig.add_subplot(2,3,6)
plots(func,a,b,n,"left",ax1)
plots(func,a,b,n,"right",ax2)
plots(func,a,b,n,"midpoint",ax3)
plots(func,a,b,n,"trapezoid",ax4)
plots(func,a,b,n,"simpson",ax5)
area1 = evalArea(func,a,b,n,"left")
area2 = evalArea(func,a,b,n,"right")
area3 = evalArea(func,a,b,n,"midpoint")
area4 = evalArea(func,a,b,n,"trapezoid")
area5 = evalArea(func,a,b,n,"simpson")
err1 = np.abs(area1-I)
err2 = np.abs(area2-I)
err3 = np.abs(area3-I)
err4 = np.abs(area4-I)
err5 = np.abs(area5-I)
if (not check_error(err1)): err1=0;
if (not check_error(err2)): err2=0;
if (not check_error(err3)): err3=0;
if (not check_error(err4)): err4=0;
if (not check_error(err5)): err5=0;
ax6.bar(range(5),[err1,err2,err3,err4,err5])
ax6.set_xticks(range(5))
ax6.set_xticklabels(['left','right','mid','trap','Simp'],rotation=70)
ax6.axhline(0,color='k',linewidth=1)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=5.5)
plt.suptitle('f(x) = '+f+', Area = %.3f, n=%d' %(I,n), fontsize=20, y=1.2)
ax1.set_title('Method "Left"\n Approximate area:%.5f \n Absolute error: %.2e' %(area1, err1))
ax2.set_title('Method "Right"\n Approximate area:%.5f \n Absolute error: %.2e' %(area2, err2))
ax3.set_title('Method "Midpoint"\n Approximate area:%.5f \n Absolute error: %.2e' %(area3, err3))
ax4.set_title('Method "Trapezoid"\n Approximate area:%.5f \n Absolute error: %.2e' %(area4, err4))
ax5.set_title('Method "Simpson"\n Approximate area:%.5f \n Absolute error: %.2e' %(area5, err5))
ax6.set_title('Absolute error for each method\n')
plt.show()
"""
This method plots the approximation three times, each time doubling the number of
gridpoints used in the approximation. It also computes and outputs how the error
decreases.
"""
def plot3Areas(f,a,b,n,method):
a = eval(a)
b=eval(b)
func = eval("lambda x: " + f)
I = inte.quad(func, a, b)[0]
plt.figure(figsize=(15, 4))
ax1 = plt.subplot2grid((4,3), (0, 0),rowspan=3)
ax2 = plt.subplot2grid((4,3), (0, 1),rowspan=3)
ax3 = plt.subplot2grid((4,3), (0, 2),rowspan=3)
ax0 = plt.subplot2grid((4,3),(3,0),colspan=3)
ax0.axis('off')
plots(func,a,b,n,method,ax1)
plots(func,a,b,2*n,method,ax2)
plots(func,a,b,4*n,method,ax3)
area1 = evalArea(func,a,b,n,method)
area2 = evalArea(func,a,b,2*n,method)
area3 = evalArea(func,a,b,4*n,method)
err1 = np.abs(area1-I)
err2 = np.abs(area2-I)
err3 = np.abs(area3-I)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.suptitle('f(x) = '+f+', Method: '+method+', Area = %.3f' %I, fontsize=20, y=1.4)
ax1.set_title('n=%d \n Approximate area:%.5f \n Absolute error: %.2e' %(n,area1, err1))
ax2.set_title('n=%d \n Approximate area:%.5f \n Absolute error: %.2e' %(2*n,area2, err2))
ax3.set_title('n=%d \n Approximate area:%.5f \n Absolute error: %.2e' %(4*n,area3, err3))
if (not check_error(err1)):
ax0.text('Using method '+method+' to find the area under f(x) = '+f+' returns no errors, so it does not make sense to compare the errors for different numbers of sub-intervals.',
ha='left', va='top', fontsize=20, transform=ax0.transAxes)
else:
ax0.text(0.0, 1., 'When using method '+method+' to compute the area under f(x) = '+f+':\n'+
'- In doubling the number of subintervals from n=%d to n=%d the error was decreased by a factor of %.2f\n' %(n, 2*n,err1/err2)+
'- In doubling the number of subintervals from n=%d to n=%d the error was decreased by a factor of %.2f' %(2*n, 4*n,err2/err3),
ha='left', va='top', fontsize=20, transform=ax0.transAxes)
plt.show()
"""
This method is currently unused by the Notebook because it refreshes in an awkward way.
Instead plot one method at a time using plot3Areas and a Dropdown box for the method type.
"""
def plotAllMethods(f,a,b,n):
a = eval(a)
b=float(b)
if n<1:
n=1
print ('ERROR: n must be greater than zero. setting n=1.')
plot3Areas(f,a,b,n,"left")
plot3Areas(f,a,b,n,"right")
plot3Areas(f,a,b,n,"midpoint")
plot3Areas(f,a,b,n,"trapezoid")
plot3Areas(f,a,b,n,"simpson")
plt.show()
"""
This function approximates the integral using one of five possible numerical methods:
Left Riemann sum, Right Riemann sum, Midpoint Rule, Trapezoid Rule, and Simpson's Rule.
"""
def evalArea(func,a,b,n, method):
dx = (b-a)/n
if method.lower()=='left':
x = np.linspace(a,b-dx,n)
fx = func(x)
area = np.sum(fx)*dx
elif method.lower()=='right':
x = np.linspace(a+dx,b,n)
fx = func(x)
area = np.sum(fx)*dx
elif method.lower()=='midpoint':
x = np.linspace(a+dx/2.,b-dx/2.,n)
fx = func(x)
area = np.sum(fx)*dx
elif method.lower()=='trapezoid':
x = np.linspace(a+dx,b-dx,n-1)
fx = func(x)
area = dx*(0.5*func(a)+0.5*func(b)+np.sum(fx))
elif method.lower()=='simpson':
x_mid = np.linspace(a+dx,b-dx,n/2)
x_trap = np.linspace(a+2*dx,b-2*dx,n/2-1)
fx_mid = func(x_mid)
fx_trap = func(x_trap)
area = dx/3.*(4*np.sum(fx_mid)+func(a)+func(b)+2*np.sum(fx_trap))
else:
print ('ERROR: You have not specified a valid method. Please check for typos.')
return area
"""
Checks if the error is near machine precision. If so it does not make sense to
compare how the error decreases as teh gridsizes increase. For example, evaluating
a constant function using any of the methods will be exact, so the error should be machine
precision.
"""
def check_error(err):
epsilon = 7./3 - 4./3 -1
return (err>100*epsilon)
"""
This function compares all five methods by making a log-log plot of the error.
The slope of each curve is computed and can be used to determine the order of each
numerical method.
"""
def compareMethods(f,a,b):
n = 8
a = eval(a)
b=float(b)
if n<1:
n=1
print ('ERROR: n must be greater than zero. setting n=1.')
func = eval("lambda x: " + f)
I = inte.quad(func, a, b)[0]
n = int(n)
if n<1:
n=1
errors = np.ones((5,6)) # methods in rows, errors in columns
ns = np.zeros((6))
for i in range(6):
errors[0,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'left'))
errors[1,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'right'))
errors[2,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'midpoint'))
errors[3,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'trapezoid'))
errors[4,i] = np.abs(I-evalArea(func, a,b,2**i*n, 'simpson'))
ns[i] = i
fig = plt.figure(figsize=(10,5))
ax = fig.gca()
ax.set_xlabel(r'$\log_2(n/4)$')
ax.set_ylabel(r'$\log_2(error_i/error_1)$')
ax.set_title('log-log plot of the error versus the number of subintervals.\n f(x) = ' + f)
labels = ['Left Riemann', 'Right Riemann', 'Midpoint', 'Trapezoid','Simpson']
for i in range(5):
if check_error(errors[i,0]):
log_errors = np.log(errors[i,:] / errors[i,0])/np.log(2)
plt.plot(ns, log_errors,linewidth=6,color=colors[i],linestyle=styles[i],label=labels[i])
poly1 = np.polyfit(ns, log_errors, 1)
print ('Using method '+labels[i]+' the slope of the log-log plot is %.2f' %poly1[0])
else:
print (r'Using method '+labels[i]+' the errors are less than machine precision for n=8 already!')
plt.legend(loc=3)
plt.axhline(0.0,0,5,color='k',linewidth=1)
ax.set_xlim([0,5])
plt.show()
| [
"noreply@github.com"
] | kmcquighan.noreply@github.com |
5b625b525d193a50347763599fcc98a42ce55dcc | 732c63fef3f138d22eb3c39dff6bdbd797a9bf75 | /insertion-del-traversing.py | 15ad98a768c0f436b7c666c69d05efbe0005dcce | [] | no_license | yagavardhini/class-and-objects | 8bb2c69f35553635c05a74c8b1ab5057bd7c8ee7 | 4235a45c985272cbdc1456c033551e0c77b46a2a | refs/heads/master | 2020-07-02T03:49:23.553965 | 2019-08-15T08:53:37 | 2019-08-15T08:53:37 | 201,407,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | class Node:
def __init__(self,data):
self.data=data
self.nextt=None
class SLL:
def __init__(self):
self.head=None
def insertAtBeg(self,data):
temp=Node(data)
temp.nextt=self.head
self.head=temp
def delAtBeg(self):
temp=self.head
self.head=self.head.nextt
temp.nextt=None
def printList(self):
temp=self.head
while temp!=None:
print(temp.data,"==>",end='')
temp=temp.nextt
print("None")
obj=SLL()
ch=0
while ch!=4:
print("Linked list implementation\n","1.Insertion at begining 2. Deletion 3. Print Llist 4. Exit")
ch=int(input())
if ch==1:
print("enter value of the node")
data=input()
obj.insertAtBeg(data)
obj.printList()
elif ch==2:
obj.delAtBeg()
obj.printList()
elif ch==3:
obj.printList()
©
| [
"noreply@github.com"
] | yagavardhini.noreply@github.com |
1024bc3664adf719b87b2553b88c5466abfc7d84 | a2311e330da598bca3a38a543f0dc7e1a3656edd | /genetic_algorithm/hw2/run_once.py | 1e5c307bddcc06b217c6658072550eb6fd27f435 | [] | no_license | hw5773/study | 9c980390e814110bef4f4760d40ef0a491758f61 | 973b5a9d99538ee03c8fcaaa3448e761b13fbc99 | refs/heads/master | 2020-04-12T02:30:36.379631 | 2019-11-02T04:01:21 | 2019-11-02T04:01:21 | 55,757,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | import os
import sys
graph_type = ["maxcut50.txt", "maxcut100.txt", "maxcut500.txt"]
g = graph_type[int(sys.argv[2])]
graph = "../graph/" + g
representation = ["chromosome.o"]
selection = ["tornament_selection.o"]
crossover = ["reverse_crossover.o"]
mutation = ["general_mutation.o"]
replacement = ["general_replacement.o"]
stop = ["rate_stop_condition.o"]
common = "cost.o maxcut_once.o common.o"
new_dir = sys.argv[1]
os.system("rm -rf *.o")
os.system("mkdir " + sys.argv[1])
os.system("gcc -c *.c")
#for root, dirs, files in os.walk("./"):
# for f in files:
# if "fixedonepoint_crossover.o" in f:
# crossover.append(f)
# elif "decrease_mutation.o" in f:
# mutation.append(f)
# elif "general_replacement.o" in f:
# replacement.append(f)
# elif "chromosome.o" in f:
# representation.append(f)
# elif "stop_condition.o" in f:
# stop.append(f)
# elif "roulette_selection.o" in f:
# selection.append(f)
# else:
# continue
print crossover
print mutation
print replacement
print representation
print stop
print selection
for b in representation:
for c in crossover:
for m in mutation:
for r in replacement:
for st in stop:
for se in selection:
os.system("gcc -o maxcut " + common + " " + b + " " + " " + c + " " + m + " " + r + " " + st + " local_optimization.o " + se)
for num in range(int(sys.argv[3]), int(sys.argv[3])+5):
file_prefix = new_dir + "/" + g.split(".")[0] + "_binary_" + c.split("_")[0] + "_" + m.split("_")[0] + "_" + r.split("_")[0] + "_" + st.split("_")[0] + "_" + se.split("_")[0] + "_" + "ONCE_S0.7_M0.3_N1000_K1_P00.51_P4_KF4_T0.7_C0.5"
os.system("./maxcut " + graph + " " + file_prefix + "_" + str(num) + ".res " + file_prefix + "_" + str(num) + ".csv")
| [
"hw5773@gmail.com"
] | hw5773@gmail.com |
fa3e65432481dc50669a709c3740fc9753628e14 | 8f0524fc0171e27a15f4cf5fb3fe48ef2053b40e | /leetcode/DP/edit_distance_formula.py | e9141de529dbc4bde7fdefe5cc4713fae1837147 | [] | no_license | MohammedAlewi/competitive-programming | 51514fa04ba03d14f8e00031ee413d6d74df971f | 960da78bfa956cb1cf79a0cd19553af97a2aa0f3 | refs/heads/master | 2023-02-08T20:25:58.279241 | 2023-02-02T00:11:23 | 2023-02-02T00:11:23 | 222,710,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | def edit_str(s1,s2,n,m):
if n<0 or m<0:
return max(m,n)+1
elif s1[n]==s2[m]:
return edit_str(s1,s2,n-1,m-1)
else:
return min(edit_str(s1,s2,n-1,m-1),edit_str(s1,s2,n,m-1),edit_str(s1,s2,n-1,m)) +1
print(edit_str("kitten","sitting",5,6)) | [
"rofyalewi@gmail.com"
] | rofyalewi@gmail.com |
0ea60583881a8cf87ab67946e182928fa337e2f7 | e2a465c3fd63519a68d2515e6460e8e7179365ca | /models/Generator/modules.py | cf3282ce178d5f58948d96a16d8b7ac55810324a | [] | no_license | dongyyyyy/Contrast_enhanced_GAN | 4c621aaec5faf280fcd4cd7900e1780892e26f77 | bfa4906441b0799ae3e2490ff763260b191de794 | refs/heads/master | 2023-08-03T13:03:30.997329 | 2021-09-13T10:53:23 | 2021-09-13T10:53:23 | 403,494,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self,in_features,norm_layer='instance',kernel_size=3,dropout_p=0.,use_bias=False,padding_type ='reflect'):
super(ResidualBlock,self).__init__()
self.conv = self.make_blocks(kernel_size=kernel_size,in_features=in_features,padding_type=padding_type,norm_layer=norm_layer,dropout_p=dropout_p,use_bias=use_bias)
def make_blocks(self,kernel_size=3,in_features=256,padding_type='reflect',norm_layer='instance',dropout_p=0.,use_bias=False):
conv_block = []
conv_p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type =='replicate':
conv_block += [nn.ReplicationPad2d(1)]
else:
conv_p = 1
conv_block += [nn.Conv2d(in_channels=in_features,out_channels=in_features,kernel_size=kernel_size,padding=conv_p,bias=use_bias)]
if norm_layer == 'instance':
conv_block += [nn.InstanceNorm2d(num_features=in_features,affine=False,track_running_stats=False)]
elif norm_layer == 'batch':
conv_block += [nn.BatchNorm2d(num_features=in_features,affine=True,track_running_stats=True)]
conv_block += [nn.ReLU(inplace=True)]
if dropout_p > 0.:
conv_block += [nn.Dropout(dropout_p)]
conv_p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
else:
conv_p = 1
conv_block += [
nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=conv_p,
bias=use_bias)]
if norm_layer == 'instance':
conv_block += [nn.InstanceNorm2d(num_features=in_features, affine=False, track_running_stats=False)]
elif norm_layer == 'batch':
conv_block += [nn.BatchNorm2d(num_features=in_features, affine=True, track_running_stats=True)]
return nn.Sequential(*conv_block)
def forward(self,x):
return self.conv(x) | [
"dongyoung0218@gmail.com"
] | dongyoung0218@gmail.com |
aae84273d14923a5fb83bf35b9b0e6a31ea3d1af | a6270537b5c6d924fa6353a8f0328e07c71a0366 | /numbasltiprovider/urls.py | c12994c32a9c81f0df352e00b8c9d1aa5310f5c7 | [
"Apache-2.0"
] | permissive | oscarsiles/numbas-lti-provider | 9b993175a6b6463a974373c7bdb2c9f38b057b89 | ef7080a2593a800a1b9630c746e4f8667e2ec42d | refs/heads/master | 2020-08-20T03:47:54.399198 | 2020-08-05T13:44:16 | 2020-08-05T13:44:16 | 215,979,486 | 0 | 0 | NOASSERTION | 2019-10-18T08:39:09 | 2019-10-18T08:39:09 | null | UTF-8 | Python | false | false | 519 | py | from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('numbas_lti.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
try:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
except ImportError:
pass
| [
"christianperfect@gmail.com"
] | christianperfect@gmail.com |
bb6643f96de8854e77e2affffa552d014fb97e44 | 0d0d63e25b9afbf5b45880c747d758763829d01a | /ramseymodel.py | 51d66cf8b5baed520aeea68bfcd78191a038d54a | [] | no_license | ecotyper/Ramseymodel | 4056867ed0b887d0a434d20a6e35b29f7acff391 | cf4e212df70dd36eb3d31187df3442ef3173c48d | refs/heads/master | 2023-04-27T02:40:35.289041 | 2018-06-18T22:55:59 | 2018-06-18T22:55:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | import numpy as np
import matplotlib.pyplot as plt
#パラメータ設定
a = 0.3
b = 0.99
d = 0.25
At = 1.0
numer = eval("(1/b+d-1)") #分子
denom = eval("a*At") #分母
exp = eval("1/(a-1)") #指数
#Δk=0軌跡の式
K = np.arange(0, 4, 0.01)
def Ct(K):
Ct = At * K**a - d * K
return Ct
#Δc=0軌跡の式
Kt = (numer/denom)**exp
#消費と資本の軌跡グラフ作図
plt.plot(K,Ct(K)) #Δc=0軌跡のグラフ
plt.vlines([Kt], 0, 1.0, "blue", linestyle='solid') #Δk=0軌跡のグラフ
plt.ylim(0, 1)
plt.plot(Kt,Ct(Kt),"ro") #双方の軌跡の交点を赤く表示
plt.xlabel("Kt")
plt.ylabel("Ct", rotation=0)
plt.text(2.5, 0.8, "ΔK=0")
plt.text(1.5, 0.6, "ΔC=0")
plt.show()
#パラメータを追加
maxT = 30 #期間
#経路の計算
sC = np.empty(maxT)
sC[0] = 0.65 #消費の初期値
sK = np.empty(maxT)
sK[0] = Kt * 0.5 #資本の初期値
for t in range(maxT-1):
sC[t+1] = b*sC[t] * (a*(sK[t]**a - sC[t]+(1-d)*sK[t])**(a-1)+1-d)
sK[t+1] = sK[t]**a - sC[t] + (1-d)*sK[t]
#資本と消費の経路グラフ作図
t = np.arange(0, 4, 1.0)
plt.plot(sK, sC)
plt.plot(K,Ct(K))
plt.vlines([Kt], 0, 1.5, "blue", linestyle='solid')
plt.plot(Kt,Ct(Kt),"ro")
plt.xlabel("Kt")
plt.ylabel("Ct", rotation=0)
plt.text(2.5, 0.8, "ΔK=0")
plt.text(1.4, 0.55, "ΔC=0")
plt.xlim(xmax=4)
plt.ylim(ymax=1.5)
plt.show() | [
"noreply@github.com"
] | ecotyper.noreply@github.com |
68f4d4b54fede867afc44c57daef7f694fc1ae4f | 6777c78344998500252845572da51a7ddaaf40da | /script/minist_SVM_self.py | 082490901d04fd4bd71ca124f6bc0d1f103d8cbe | [] | no_license | Chokurei/Kajima | 2e1bfd74d997aba5d5218f10bce4655ccec7b5eb | 4d5dd3bd56fe41473b4edb8836cb93d19a02bd9c | refs/heads/master | 2021-04-06T20:18:52.624283 | 2018-03-15T17:47:31 | 2018-03-15T17:47:31 | 125,337,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 2 11:39:49 2018
@author: kaku
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
digits = datasets.load_digits()
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i'%label)
n_samples = len(digits.images)
# Change into m x n
data = digits.images.reshape((n_samples, -1))
classifier = svm.SVC(gamma = 0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples//2], digits.target[:n_samples//2])
expected = digits.target[n_samples//2:]
predicted = classifier.predict(data[n_samples//2:])
print('Classification report for classifier %s:\n%s\n'
%(classifier, metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s'% metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples//2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2,4,index + 5)
plt.axis('off')
plt.imshow(image, cmap = plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' %prediction)
plt.show()
| [
"guozhilingty@gmail.com"
] | guozhilingty@gmail.com |
7e0089c1234ae3609da6e14a0889584df5f17339 | 0f1b763baa14a13f91c4d1a56f0b9bea27320aeb | /venv/Lib/site-packages/sqlpharmacy/column_types.py | 0ef7bd3abb5454f6e126aeb1ee0bde14410d3680 | [] | no_license | tangleibest/untitled | 9c775ddf35e34815f0b3d305c42915804e10470c | 6672699da1ad897a660098d07f7ea4c9a70126bd | refs/heads/master | 2020-04-19T02:04:04.740292 | 2019-01-28T03:29:07 | 2019-01-28T03:29:07 | 167,891,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | # encoding=utf-8
"""
sqlpharmacy.column_types
~~~~~~~~~~~~~~~~~~~~~~
More database column types
"""
import json
from sqlalchemy.types import TypeDecorator, String
class JsonType(TypeDecorator):
'''Dumps simple python data structures to json format and stores them as string
Convert the data back to original python data structures when read.
Differences from sqlalchemy PickleType: PickleType only supports python, JsonType supports a lot of languages
Think that you might want to read the data out of database using Java or PHP(or C#...etc).
'''
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
| [
"1220149242@qq.com"
] | 1220149242@qq.com |
ec45d45f973e36dc0e484153ced1934b3170084f | c672f9ba29546468beccbda2aaf892af41dd453a | /tcp.py | b3e26c6d05b61cc89f9c44ee33749872165b9fb8 | [] | no_license | ArthonKorea/artchatbot | 70773c9064f414f24a4cf34ac5270c60dee03fc8 | e2dcb82b18b202ad33cda8fe49b1079eafee74ef | refs/heads/master | 2021-01-01T06:01:37.182985 | 2017-07-18T13:19:40 | 2017-07-18T13:19:40 | 97,331,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | import socket
import chatbot
def Main():
host = "localhost"
port = 8000
mySocket = socket.socket()
mySocket.bind((host, port))
mySocket.listen(1)
conn, addr = mySocket.accept()
print("Connection from: " + str(addr))
while True:
data = conn.recv(1024).decode()
if not data:
break
if "init" in str(data):
conn.send("init/안녕하세요 저는 **라는 작품이에요".encode())
continue
print("from connected user: " + str(data))
Words = bot.Conversation(str(data))
Translated_Words = bot.Translating_Word(Words)
Output = bot.Answering(Translated_Words)
#Output ="안녕"
data = "response/"+str(Output)
print("respon: " + str(Output))
conn.send(data.encode())
conn.close()
if __name__ == '__main__':
bot = chatbot.bot()
Main()
| [
"skawls5028@gmail.com"
] | skawls5028@gmail.com |
e66a3429fba0505d6bd22ffa8a883b2d373757e7 | e06bd7bc83b9990702afb2bac9e1b8df4f7cc578 | /record.py | 8da7a9551aa1b98213dec677c74cc9941c0f0d83 | [] | no_license | sohamlanke/Automation | 7a4c649ad05edcae6afd0be9c33a2c1a52b296b1 | 2b00342ada47fdd61c334f9a2ca2ffcb7f7ac768 | refs/heads/main | 2023-07-17T16:30:15.872150 | 2021-08-16T16:44:49 | 2021-08-16T16:44:49 | 396,882,076 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py |
from pynput import mouse
from pynput import keyboard
from pynput.keyboard import Key
import json
import sys
import time
f = open("mouselogs.txt", "w")
clicks = []
pressTime = 0
releaseTime = 0
def on_click(x,y,button,ispressed):
global pressTime, releaseTime
isdoublepress = False
if ispressed:
pressTime = time.time()
if not ispressed:
releaseTime = time.time()
if(ispressed == False):
diff = abs(pressTime - releaseTime)
print(diff)
if diff <= 0.1:
isdoublepress = True
print("double clicked")
if(ispressed == False):
dict = {"mouse": True, "x": x, "y": y, "duration": 0 if isdoublepress else 1}
clicks.append(dict)
# print(clicks)
def on_release(keys):
if keys != Key.esc:
print('{0} release'.format(
keys.char))
print(type(keys.char))
dict = {"mouse": False, "keypressed": keys.char}
clicks.append(dict)
print(clicks)
if keys == Key.esc:
print("Escape pressed, end string in file")
f.write(json.dumps(clicks))
f.close()
sys.exit()
with keyboard.Listener(on_release=on_release) as k_listener, mouse.Listener(on_click=on_click) as m_listener:
k_listener.join()
m_listener.join()
| [
"sohamlanke@gmail.com"
] | sohamlanke@gmail.com |
2f85952fcbe3b65f4c744f4e3bb7f9549a012652 | cb4cfcece4bc14f591b038adbc7fadccaf447a1d | /ELEVSTRS.py | d84b11ce6e30ca754fe1115b5248d18d884db818 | [] | no_license | psycho-pomp/CodeChef | ba88cc8e15b3e87d39ad0c4665c6892620c09d22 | 881edddded0bc8820d22f42b94b9959fd6912c88 | refs/heads/master | 2023-03-21T06:46:14.455055 | 2021-03-11T12:07:48 | 2021-03-11T12:07:48 | 275,214,989 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | # cook your dish here
from math import sqrt
t=int(input())
for _ in range(t):
n,v1,v2=map(int,input().split())
t1=n/v1
t2=(sqrt(2)*n)/v2
if t2>=t1:
print("Stairs")
else:
print("Elevator")
| [
"noreply@github.com"
] | psycho-pomp.noreply@github.com |
2fb33dbd42a0f97323aa597b2052a24849bb55e3 | 9eb4ce67a7b7be515972be3149de488e6e78a4a7 | /Protótipos/python/Movement (fluid)/game.py | 6e61723e7a2420395fce5b4f3f0837f474af9372 | [] | no_license | wellingtonraam/projetos-jogos | 987f83da0ac82dc7ce6e4575feb7abba141aa939 | 3f76a8fb40d6bc9ca0954e80f225a83fa7c734dd | refs/heads/master | 2020-04-18T10:11:55.729549 | 2020-03-21T16:00:47 | 2020-03-21T16:00:47 | 167,459,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | import sys, pygame
### author: Wellington Ramos || email: wellingtonraam@gmail.com
### Inicialize the game ###
pygame.init()
### screen size and name ###
size = width, height = 640, 480
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Character movement")
### fps ###
clock = pygame.time.Clock()
### test if playing ###
playing = True
### colors ###
white = 255, 255, 255
purple = 142, 68, 173
### Point x and y of the screen ###
py = 0
px = 0
### functions to draw player ###
def player(x, y):
pygame.draw.rect(screen, purple, pygame.Rect(x, y, 40, 40))
### Listener Keyboard Events ###
pressed = True
controlkey = [False, False, False, False]
key = pygame.K_UP
### Checking which key was pressed and changing the character's x or y point ###
def check_keys_py(py):
if controlkey[0]:
py += 1
if controlkey[1]:
py -= 1
return py
def check_keys_px(px):
if controlkey[2]:
px += 1
if controlkey[3]:
px -= 1
return px
### Game loop ###
while playing:
### Keyboard Event Listener ###
for event in pygame.event.get():
if event.type == pygame.QUIT:
playing = False
sys.exit()
if event.type == pygame.KEYUP:
pressed = False
if event.key == pygame.K_UP:
controlkey[0] = pressed
if event.key == pygame.K_DOWN:
controlkey[1] = pressed
if event.key == pygame.K_LEFT:
controlkey[2] = pressed
if event.key == pygame.K_RIGHT:
controlkey[3] = pressed
if event.type == pygame.KEYDOWN:
pressed = True
if event.key == pygame.K_UP:
controlkey[0] = pressed
if event.key == pygame.K_DOWN:
controlkey[1] = pressed
if event.key == pygame.K_LEFT:
controlkey[2] = pressed
if event.key == pygame.K_RIGHT:
controlkey[3] = pressed
py = check_keys_py(py)
px = check_keys_px(px)
### clean screen ###
screen.fill(white)
### Create player (rectangle for collision) ###
y = (240 - (20 + py))
x = (320 - (20 + px))
### draw player ###
player(x, y)
### Refresh screen ###
pygame.display.flip()
clock.tick(60) | [
"wellingtonraam@gmail.com"
] | wellingtonraam@gmail.com |
8c7ec1217dd7bc22b88439c1f406972e4f2a9006 | 3bae1ed6460064f997264091aca0f37ac31c1a77 | /apps/cloud_api_generator/generatedServer/tasklets/rack/create/rack_create.py | 3e407f24ace515e0974c5621850b08fc380425ff | [] | no_license | racktivity/ext-pylabs-core | 04d96b80ac1942754257d59e91460c3a141f0a32 | 53d349fa6bee0ccead29afd6676979b44c109a61 | refs/heads/master | 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 | Python | UTF-8 | Python | false | false | 174 | py | __author__ = 'aserver'
__tags__ = 'rack', 'create'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
| [
"devnull@localhost"
] | devnull@localhost |
45e7ad2daa08b47300fc90982729d3862f4652cb | b806560b367d420bf413d7bac002199741e151c9 | /24pro.py | b14d92381fb1bef0388380f391eb1bc9dc2d64e7 | [] | no_license | umadevic/pro.py | dca7beadc62aa042779b165bc73dabb8647dde19 | 5e521aa8591593e56315500b224d2c15c8794fad | refs/heads/master | 2020-06-23T16:08:36.430856 | 2019-07-24T17:42:30 | 2019-07-24T17:42:30 | 198,673,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | #a
mi=int(input())
n9=2**mi
list1=[]
for i in range(0,n9):
l=bin(i)[2:].zfill(mi)
if(len(l)<len(bin(2**mi-1)[2:])):
list1.append([l.count("1"),l])
else:
list1.append([l.count("1"),l])
list1.sort()
for i in range(len(list1)):
print(list1[i][1])
| [
"noreply@github.com"
] | umadevic.noreply@github.com |
7bde798354f8a27458cd2bd430e193c4242bee55 | 554635c28263a16ae538887187e447e476e09aea | /Node.py | ce1933a2bedb237200b118369ba13e6a1b343484 | [] | no_license | Horofic/AI | 1264fae7401392b2deb885f74dbba8bbb31fbe6c | ecc812956aed4c1bfaa2d876a23bde446b2dbc5e | refs/heads/master | 2021-08-19T21:46:14.047244 | 2017-11-27T13:40:25 | 2017-11-27T13:40:25 | 112,200,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from Link import Link
class Node:
def __init__(self, linkedNodes):
self.links = []
self.value = 0
for node in linkedNodes:
self.links.append(Link(1.0, node, self))
def calculate(self):
self.value = 0
for link in self.links:
self.value += link.start.value * link.weightFactor
| [
"ikweetnie123@hotmail.com"
] | ikweetnie123@hotmail.com |
7be5aa773f2e343fd4b8b491a4269fdf9fff5719 | ca609a94fd8ab33cc6606b7b93f3b3ef201813fb | /2017-feb/1.python/5.data-frames.py | 959658216b9ad8cb6baf46f1063d69277bcff50f | [] | no_license | rajesh2win/datascience | fbc87def2a031f83ffceb4b8d7bbc31e8b2397b2 | 27aca9a6c6dcae3800fabdca4e3d76bd47d933e6 | refs/heads/master | 2021-01-20T21:06:12.488996 | 2017-08-01T04:39:07 | 2017-08-01T04:39:07 | 101,746,310 | 1 | 0 | null | 2017-08-29T09:53:49 | 2017-08-29T09:53:49 | null | UTF-8 | Python | false | false | 726 | py | import pandas as pd
col1 = [10,20,30,40]
col2 = ['abc','def','xyz','pqr']
col3 = [0,0,0,0]
#creating data frame
df1 = pd.DataFrame({'pid':col1,
'pname':col2,'survived':col3})
df1.shape
df1.info()
df1.describe()
df1.head(2)
df1.tail()
df1['col4'] = 0
#access frame content by column/columns
df1.pid
df1['pid']
df1[['pid','pname']]
df1[[0,1]]
#dropping a column
df2 = df1.drop('survived',1)
#slicing rows of frame
df1[0:2]
df1[0:4]
df1[0:]
df1[:2]
df1[-2:]
#filtering rows of dataframe by condition
type(df1.pid > 20)
df1[df1.pid>20]
#selecting subsets of rows and columns
df1.iloc[0:2,]
df1.iloc[[0,2],]
df1.iloc[0:2,0]
df1.iloc[0:2,[0,2]]
df1.loc[0:2,['pname']]
#grouping data in data frames
df1.groupby('id').size()
| [
"info@algorithmica.co.in"
] | info@algorithmica.co.in |
c9c28327fb58a3aa98aeddba5412a1cc8b720423 | 6aa85c2b0cd13c5f1f7bd94a7901baa5c074565e | /PycharmProjects/TCGA_w_bed/1create_mutation_bed.py | 328109e7dea956cc1c73602c8a22049e3d0f3790 | [] | no_license | rrawat/TCGA_w_bed | 204da76115acb6229dfc1dc39a1ceb847a06943c | d09043895a2092611a2d3cee68b6b83910999d7e | refs/heads/master | 2021-01-09T20:57:35.884731 | 2016-07-07T15:43:46 | 2016-07-07T15:43:46 | 62,512,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | #ene name Accession Number Gene CDS length HGNC ID Sample name ID_sample ID_tumour Primary site Site subtype 1 Site subtype 2 Site subtype 3 Primary histology Histology subtype 1 Histology subtype 2 Histology subtype 3 Genome-wide screen Mutation ID Mutation CDS Mutation AA Mutation Description Mutation zygosity LOH GRCh Mutation genome position Mutation strand SNP Resistance Mutation FATHMM prediction FATHMM score Mutation somatic status Pubmed_PMID ID_STUDY Sample source Tumour origin Age
with open("/Users/radhikarawat/PycharmProjects/CosmicMutantExport.tsv","U") as mutation_data_file:
with open("mutation_bed.bed","w") as f:
for row in mutation_data_file:
row=row.split("\t")
exonCount=row[8]
if row[22]=="38":
if "breast" in row:
mutchrom,bps = row[23].split(":")
mutchrom="chr"+mutchrom
mutstart,mutend = bps.split("-")
name=row[1]
gene_name=row[0]
TCGA_info=row[4]
#score=row[11]
f.write ("%s\t%s\t%s\t%s\t%s\t%s\n" % (mutchrom, mutstart, mutend,name,gene_name,TCGA_info)) | [
"rawat.radhika@gmail.com"
] | rawat.radhika@gmail.com |
08b41d7233c27bcbd3ff46bae419f401ad35f9e4 | a67263ccde6de5d18de409a8924b64c38c8a71c1 | /djangosample/main/middlewares.py | 99578eeab540c0ba43bedc6888aeeac7c53cecb0 | [] | no_license | annshress/Demo | 8613437e7d626f7df4786e4d8b3297edb5e75286 | a73d064d9092d2764e873a208f61ceec1e93f62a | refs/heads/master | 2022-12-07T08:33:09.621364 | 2020-07-02T09:49:26 | 2020-07-02T09:49:26 | 239,692,452 | 0 | 0 | null | 2022-11-22T03:17:32 | 2020-02-11T06:28:28 | JavaScript | UTF-8 | Python | false | false | 3,316 | py | import jwt
from channels.auth import AuthMiddlewareStack
from django.contrib.auth import get_user_model
from django.contrib.auth.middleware import get_user
from django.contrib.auth.models import AnonymousUser
from django.db import close_old_connections
from django.utils.functional import SimpleLazyObject
from rest_framework import exceptions
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.request import Request
from rest_framework_jwt.settings import api_settings
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
jwt_get_username_from_payload = api_settings.JWT_PAYLOAD_GET_USERNAME_HANDLER
def get_user_jwt(request):
user = get_user(request)
if user.is_authenticated:
return user
try:
user_jwt = JSONWebTokenAuthentication().authenticate(Request(request))
if user_jwt is not None:
return user_jwt[0]
except:
pass
return user
class JWTTokenAuthenticationMiddleware:
"""
Middleware for setting user from jwt in the `request` object.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
assert hasattr(request, 'session'), "The Django authentication middleware requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
request.user = SimpleLazyObject(lambda: get_user_jwt(request))
response = self.get_response(request)
return response
class JWTTokenAuthMiddleware:
"""
I'mlazyasF
inspiration: https://gist.github.com/rluts/22e05ed8f53f97bdd02eafdf38f3d60a
JWT Token authorization middleware for Django Channels 2
"""
def __init__(self, inner):
self.inner = inner
def __call__(self, scope):
headers = dict(scope['headers'])
if b'sec-websocket-protocol' in headers:
jwt_value = headers[b'sec-websocket-protocol'].decode()
try:
payload = jwt_decode_handler(jwt_value)
user = self.authenticate_credentials(payload)
close_old_connections()
scope['user'] = user
except exceptions.AuthenticationFailed:
scope['user'] = AnonymousUser()
except (jwt.ExpiredSignature, jwt.DecodeError, jwt.InvalidTokenError):
scope['user'] = AnonymousUser()
return self.inner(scope)
def authenticate_credentials(self, payload):
"""
Returns an active user that matches the payload's user id and email.
"""
user_model = get_user_model()
username = jwt_get_username_from_payload(payload)
if not username:
msg = "User not found."
raise exceptions.AuthenticationFailed(msg)
try:
user = user_model.objects.get_by_natural_key(username)
except user_model.DoesNotExist:
msg = "Invalid signature."
raise exceptions.AuthenticationFailed(msg)
if not user.is_active:
msg = "User account is disabled."
raise exceptions.AuthenticationFailed(msg)
return user
JWTTokenAuthMiddlewareStack = lambda inner: JWTTokenAuthMiddleware(AuthMiddlewareStack(inner))
| [
"ann.shress@gmail.com"
] | ann.shress@gmail.com |
c421acb76a5856f072bae09257a60d9442f0928d | b7d435bbd9780eaaeb559c3969eb010506a10bdf | /dhis2_core/src/dhis2/core/metadata/models/system_info.py | f3d03bcca46fcd5203cafe4e403e1346bc5e650f | [
"BSD-3-Clause"
] | permissive | dhis2/dhis2-python | 29e3377e5bef52789b937a2337a69aa4619dd1d8 | d5ec976a5c04e6897756e3be14924ec74a4456fd | refs/heads/main | 2021-12-16T09:15:35.727187 | 2021-11-29T06:27:52 | 2021-11-29T06:27:52 | 308,135,669 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | from typing import Optional
from pydantic import BaseModel
class DatabaseInfo(BaseModel):
name: Optional[str]
user: Optional[str]
url: Optional[str]
databaseVersion: Optional[str]
spatialSupport: Optional[bool]
class SystemInfo(BaseModel):
contextPath: str
userAgent: str
calendar: str
dateFormat: str
serverDate: str
lastAnalyticsTableSuccess: str
intervalSinceLastAnalyticsTableSuccess: str
lastAnalyticsTableRuntime: str
lastSystemMonitoringSuccess: str
version: str
revision: str
buildTime: str
jasperReportsVersion: str
environmentVariable: str
environmentVariable: str
readOnlyMode: Optional[str]
nodeId: Optional[str]
javaVersion: Optional[str]
javaVendor: Optional[str]
javaOpts: Optional[str]
osName: Optional[str]
osArchitecture: Optional[str]
osVersion: Optional[str]
externalDirectory: Optional[str]
databaseInfo: Optional[DatabaseInfo]
readReplicaCount: Optional[int]
memoryInfo: Optional[str]
cpuCores: Optional[int]
encryption: bool
emailConfigured: bool
redisEnabled: bool
systemId: str
systemName: str
instanceBaseUrl: str
clusterHostname: str
isMetadataVersionEnabled: bool
isMetadataVersionEnabled: bool
| [
"mortenoh@gmail.com"
] | mortenoh@gmail.com |
57ed8dfd72a02f6d165108493f9b836bd6aaa42f | e019891f24aa7ad9494a74b58cd6ab3ec04e9053 | /archive/migrations/0010_auto_20170609_1109.py | 7e6c06a873009d62e7125b560bee0a2a1e6b2464 | [] | no_license | MicrobesNG/StrainArchive | b34ede9779148ea9e87efcfb2025a9088ed81fcc | 789ea7a13ad94c80f79c1c203a1a27945fccb57e | refs/heads/master | 2020-07-15T04:58:23.400489 | 2017-07-17T14:22:00 | 2017-07-17T14:22:00 | 94,303,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-09 11:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('archive', '0009_auto_20170609_1045'),
]
operations = [
migrations.AlterField(
model_name='strain',
name='uploader',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='uploader', to=settings.AUTH_USER_MODEL),
),
]
| [
"microbesng@bio1187.bham.ac.uk"
] | microbesng@bio1187.bham.ac.uk |
7d083c582e9faa973f538fd894def8a7c80648f6 | 29754007ad2fe1d8b5f029bc5fa1f9462170bb1a | /Day1to10/Day10/dayten.py | b240bb54fdef107bba341677f2d42b52ddf161c4 | [] | no_license | ivymorenomt/100daysofPython | 97a934a8789fcd9209cc0e2a9a3283ab66b99ced | 93517ff10bbac50441c9c18ed09e39da2157da02 | refs/heads/master | 2023-03-20T11:03:35.604992 | 2021-03-01T03:50:21 | 2021-03-01T03:50:21 | 339,278,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | ##### For Loop
numbers = [1,2,3,4]
doubled_numbers = []
for num in numbers:
doubled_number = num*2
doubled_numbers.append(doubled_number)
print(doubled_numbers) #[2,4,6,8]
##### List Comprehension
numbers = [1,2,3,4]
doubled_numbers = [num * 2 for num in numbers]
print(doubled_numbers) #[2,4,6,8]
name = 'colt'
new_list = [char.upper() for char in name]
print(new_list)
friends = ['ashley', 'matt', 'michael']
new_friend = [friend.capitalize() for friend in friends]
print(new_friend)
print([num*10 for num in range(1,6)])
print([bool(val) for val in [1, [], '']]) #returns true or false
numbers = [1,2,3,4,5]
string_list = [str(num) for num in numbers]
print(string_list)
with_vowels = 'This is so much fun!'
print(''.join(char for char in with_vowels if char not in 'aeiou'))
#Using list comprehensions:
answer = [person[0] for person in ["Elie", "Tim", "Matt"]]
answer2 = [val for val in [1,2,3,4,5,6] if val % 2 == 0]
print(answer)
print(answer2)
#Using good old manual loops:
answer = []
for person in ["Elie", "Tim", "Matt"]:
answer.append(person[0])
answer2 = []
for num in [1,2,3,4,5,6]:
if num % 2 == 0:
answer2.append(num)
| [
"morenomt27@gmail.com"
] | morenomt27@gmail.com |
2ce930a77f53d08bd7633bac3cdee86c6e5cdd88 | f7327136419a3b895fb185bdc0af7a08256f8aed | /python/paddle/nn/layer/fused_transformer.py | 0084f7ff339df3e185dbe727d4632f758e7e9255 | [
"Apache-2.0"
] | permissive | paddlelaw/Paddle | 45a7598535d6a4b9dd0cfb9bbc61540ff9c1c21e | 12865234fe1e28fe5df50a43901845ceaea42c2d | refs/heads/develop | 2023-08-28T01:19:16.786973 | 2021-10-09T14:39:35 | 2021-10-09T14:39:35 | 331,300,511 | 0 | 0 | Apache-2.0 | 2021-10-09T14:39:36 | 2021-01-20T12:29:27 | Python | UTF-8 | Python | false | false | 19,928 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FusedMultiHeadAttention(Layer):
"""
Attention mapps queries and a set of key-value pairs to outputs, and
Multi-Head Attention performs multiple parallel attention to jointly attending
to information from different representation subspaces.
Please refer to `Attention Is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`_
for more details.
Parameters:
embed_dim (int): The expected feature size in the input and output.
num_heads (int): The number of heads in multi-head attention.
dropout (float, optional): The dropout probability used on attention
weights to drop some attention targets. 0 for no dropout. Default 0
kdim (int, optional): The feature size in key. If None, assumed equal to
`embed_dim`. Default None.
vdim (int, optional): The feature size in value. If None, assumed equal to
`embed_dim`. Default None.
need_weights (bool, optional): Indicate whether to return the attention
weights. Default False.
weight_attr(ParamAttr, optional): To specify the weight parameter property.
Default: None, which means the default weight parameter property is used.
See usage for details in :code:`ParamAttr` .
bias_attr (ParamAttr|bool, optional): To specify the bias parameter property.
Default: None, which means the default bias parameter property is used.
If it is set to False, this layer will not have trainable bias parameter.
See usage for details in :code:`ParamAttr` .
Examples:
.. code-block:: python
import paddle
# encoder input: [batch_size, sequence_length, d_model]
query = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, num_heads, query_len, query_len]
attn_mask = paddle.rand((2, 2, 4, 4))
multi_head_attn = paddle.nn.MultiHeadAttention(128, 2)
output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128]
"""
Cache = collections.namedtuple("Cache", ["k", "v"])
StaticCache = collections.namedtuple("StaticCache", ["k", "v"])
def __init__(self,
embed_dim,
num_heads,
dropout=0.,
kdim=None,
vdim=None,
need_weights=False,
weight_attr=None,
bias_attr=None):
super(FusedMultiHeadAttention, self).__init__()
raise NotImplementedError()
def forward(self, query, key=None, value=None, attn_mask=None, cache=None):
"""
Applies multi-head attention to map queries and a set of key-value pairs
to outputs.
Parameters:
query (Tensor): The queries for multi-head attention. It is a
tensor with shape `[batch_size, query_length, embed_dim]`. The
data type should be float32 or float64.
key (Tensor, optional): The keys for multi-head attention. It is
a tensor with shape `[batch_size, key_length, kdim]`. The
data type should be float32 or float64. If None, use `query` as
`key`. Default None.
value (Tensor, optional): The values for multi-head attention. It
is a tensor with shape `[batch_size, value_length, vdim]`.
The data type should be float32 or float64. If None, use `query` as
`value`. Default None.
attn_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
cache (MultiHeadAttention.Cache|MultiHeadAttention.StaticCache, optional):
It is a namedtuple with `k` and `v` as fields, and stores tensors
shaped `[batch_size, num_heads, length, embed_dim]` which are results
of linear projection, reshape and transpose calculations in
MultiHeadAttention. If it is an instance of `Cache`, `k` and `v`
fields reserve intermediate results of previous positions, which
mostly used for decoder self attention. If it is an instance of
`StaticCache`, `key` and `value` args would be ignored, `k` and
`v` fields would be used as calculated results on `key` and
`value`, which mostly used for decoder-encoder cross attention.
It is only used for inference and should be None for training.
Default None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `query`, representing attention output. Or a tuple if \
`need_weights` is True or `cache` is not None. If `need_weights` \
is True, except for attention output, the tuple also includes \
the attention weights tensor shaped `[batch_size, num_heads, query_length, key_length]`. \
If `cache` is not None, the tuple then includes the new cache \
having the same type as `cache`, and if it is `StaticCache`, it \
is same as the input `cache`, if it is `Cache`, the new cache \
reserves tensors concatanating raw tensors with intermediate \
results of current query.
"""
raise NotImplementedError()
class FusedFeedForward(Layer):
def __init__(self,
d_model,
dim_feedforward,
dropout=0.1,
activation="relu",
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None):
super(FusedFeedForward, self).__init__()
raise NotImplementedError()
def forward(self, src, cache=None):
raise NotImplementedError()
class FusedTransformerEncoderLayer(Layer):
"""
TransformerEncoderLayer is composed of two sub-layers which are self (multi-head)
attention and feedforward network. Before and after each sub-layer, pre-process
and post-precess would be applied on the input and output accordingly. If
`normalize_before` is True, pre-process is layer normalization and post-precess
includes dropout, residual connection. Otherwise, no pre-process and post-precess
includes dropout, residual connection, layer normalization.
Parameters:
d_model (int): The expected feature size in the input and output.
nhead (int): The number of heads in multi-head attention(MHA).
dim_feedforward (int): The hidden layer size in the feedforward network(FFN).
dropout (float, optional): The dropout probability used in pre-process
and post-precess of MHA and FFN sub-layer. Default 0.1
activation (str, optional): The activation function in the feedforward
network. Default relu.
attn_dropout (float, optional): The dropout probability used
in MHA to drop some attention target. If None, use the value of
`dropout`. Default None
act_dropout (float, optional): The dropout probability used after FFN
activition. If None, use the value of `dropout`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into preprocessing of MHA and FFN sub-layers. If True, pre-process is layer
normalization and post-precess includes dropout, residual connection.
Otherwise, no pre-process and post-precess includes dropout, residual
connection, layer normalization. Default False
weight_attr(ParamAttr|list|tuple, optional): To specify the weight parameter property.
If it is a list/tuple, `weight_attr[0]` would be used as `weight_attr` for
MHA, and `weight_attr[1]` would be used as `weight_attr` for linear in FFN.
Otherwise, MHA and FFN both use it as `weight_attr` to create parameters.
Default: None, which means the default weight parameter property is used.
See usage for details in :code:`ParamAttr` .
bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property.
If it is a list/tuple, `bias_attr[0]` would be used as `bias_attr` for
MHA, and `bias_attr[1]` would be used as `bias_attr` for linear in FFN.
Otherwise, MHA and FFN both use it as `bias_attr` to create parameters.
The `False` value means the corresponding layer would not have trainable
bias parameter. See usage for details in :code:`ParamAttr` . Default: None,
which means the default bias parameter property is used.
Examples:
.. code-block:: python
import paddle
from paddle.nn import TransformerEncoderLayer
# encoder input: [batch_size, src_len, d_model]
enc_input = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, n_head, src_len, src_len]
attn_mask = paddle.rand((2, 2, 4, 4))
encoder_layer = TransformerEncoderLayer(128, 2, 512)
enc_output = encoder_layer(enc_input, attn_mask) # [2, 4, 128]
"""
def __init__(self,
d_model,
nhead,
dim_feedforward,
dropout=0.1,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None):
self._config = locals()
self._config.pop("self")
self._config.pop("__class__", None) # py3
super(FusedTransformerEncoderLayer, self).__init__()
raise NotImplementedError()
def forward(self, src, src_mask=None, cache=None):
"""
Applies a Transformer encoder layer on the input.
Parameters:
src (Tensor): The input of Transformer encoder layer. It is
a tensor with shape `[batch_size, sequence_length, d_model]`.
The data type should be float32 or float64.
src_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
cache (Tensor, optional): It is an instance of `MultiHeadAttention.Cache`.
See `TransformerEncoderLayer.gen_cache` for more details. It is
only used for inference and should be None for training. Default
None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `enc_input`, representing the output of Transformer encoder \
layer. Or a tuple if `cache` is not None, except for encoder \
layer output, the tuple includes the new cache which is same \
as input `cache` argument but `incremental_cache` has an \
incremental length. See `MultiHeadAttention.gen_cache` and \
`MultiHeadAttention.forward` for more details.
"""
raise NotImplementedError()
class FusedTransformer(Layer):
"""
A Transformer model composed of an instance of `TransformerEncoder` and an
instance of `TransformerDecoder`. While the embedding layer and output layer
are not included.
Please refer to `Attention is all you need <http://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf>`_ ,
and see `TransformerEncoder` and `TransformerDecoder` for more details.
Users can configurate the model architecture with corresponding parameters.
Note the usage of `normalize_before` representing where to apply layer
normalization (in pre-process or post-precess of multi-head attention or FFN),
and some transformer like models are different on this, such as
`BERT <https://arxiv.org/abs/1810.04805>`_ and `GPT2 <https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf>`_ .
The default architecture here places layer normalization in post-process and
applies another layer normalization on the output of last encoder/decoder layer.
Parameters:
d_model (int, optional): The expected feature size in the encoder/decoder input
and output. Default 512
nhead (int, optional): The number of heads in multi-head attention(MHA). Default 8
num_encoder_layers (int, optional): The number of layers in encoder. Default 6
num_decoder_layers (int, optional): The number of layers in decoder. Default 6
dim_feedforward (int, optional): The hidden layer size in the feedforward network(FFN). Default 2048
dropout (float, optional): The dropout probability used in pre-process
and post-precess of MHA and FFN sub-layer. Default 0.1
activation (str, optional): The activation function in the feedforward
network. Default relu.
attn_dropout (float, optional): The dropout probability used
in MHA to drop some attention target. If None, use the value of
`dropout`. Default None
act_dropout (float, optional): The dropout probability used after FFN
activition. If None, use the value of `dropout`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into preprocessing of MHA and FFN sub-layers. If True, pre-process is layer
normalization and post-precess includes dropout, residual connection.
Otherwise, no pre-process and post-precess includes dropout, residual
connection, layer normalization. Default False
weight_attr(ParamAttr|list|tuple, optional): To specify the weight parameter property.
If it is a list/tuple, the length of `weight_attr` could be 1, 2 or 3. If it is 3,
`weight_attr[0]` would be used as `weight_attr` for self attention, `weight_attr[1]`
would be used as `weight_attr` for cross attention of `TransformerDecoder`,
and `weight_attr[2]` would be used as `weight_attr` for linear in FFN.
If it is 2, `weight_attr[0]` would be used as `weight_attr` both for self attention
and cross attntion and `weight_attr[1]` would be used as `weight_attr` for
linear in FFN. If it is 1, `weight_attr[0]` would be used as `weight_attr`
for self attention, cross attention and linear in FFN. Otherwise,
the three sub-layers all uses it as `weight_attr` to create parameters.
Default: None, which means the default weight parameter property is used.
See usage for details
in :code:`ParamAttr` .
bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property.
If it is a list/tuple, the length of `bias_attr` could be 1, 2 or 3. If it is 3,
`bias_attr[0]` would be used as `bias_attr` for self attention, `bias_attr[1]`
would be used as `bias_attr` for cross attention of `TransformerDecoder`,
and `bias_attr[2]` would be used as `bias_attr` for linear in FFN.
If it is 2, `bias_attr[0]` would be used as `bias_attr` both for self attention
and cross attntion and `bias_attr[1]` would be used as `bias_attr` for
linear in FFN. If it is 1, `bias_attr[0]` would be used as `bias_attr`
for self attention, cross attention and linear in FFN. Otherwise,
the three sub-layers all uses it as `bias_attr` to create parameters.
The `False` value means the corresponding layer would not have trainable
bias parameter. See usage for details in :code:`ParamAttr` .
Default: None,which means the default bias parameter property is used.
custom_encoder (Layer, optional): If custom encoder is provided, use it as the encoder.
Default None
custom_decoder (Layer, optional): If custom decoder is provided, use it as the decoder.
Default None
Examples:
.. code-block:: python
import paddle
from paddle.nn import Transformer
# src: [batch_size, tgt_len, d_model]
enc_input = paddle.rand((2, 4, 128))
# tgt: [batch_size, src_len, d_model]
dec_input = paddle.rand((2, 6, 128))
# src_mask: [batch_size, n_head, src_len, src_len]
enc_self_attn_mask = paddle.rand((2, 2, 4, 4))
# tgt_mask: [batch_size, n_head, tgt_len, tgt_len]
dec_self_attn_mask = paddle.rand((2, 2, 6, 6))
# memory_mask: [batch_size, n_head, tgt_len, src_len]
cross_attn_mask = paddle.rand((2, 2, 6, 4))
transformer = Transformer(128, 2, 4, 4, 512)
output = transformer(enc_input,
dec_input,
enc_self_attn_mask,
dec_self_attn_mask,
cross_attn_mask) # [2, 6, 128]
"""
def __init__(self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None,
custom_encoder=None,
custom_decoder=None):
super(fusedTransformer, self).__init__()
raise NotImplementedError()
def forward(self, src, tgt, src_mask=None, tgt_mask=None, memory_mask=None):
raise NotImplementedError()
| [
"noreply@github.com"
] | paddlelaw.noreply@github.com |
dd831f71086ba26f6e526a01c435b01b2c54ac27 | 7fc24b36564f348298acac77cf4900b2204be283 | /scr_capture.py | e9ae16c23f21c4e041eb1d1eb93a3dea38226618 | [] | no_license | shadow-identity/screen_capture | ee7e41a4b754b2de2c732f66205b86454f8c0c90 | 4e4c5c3b41dfa6e445a3f1bf8a4b78e7f4145a3b | refs/heads/master | 2016-08-05T07:11:03.131181 | 2014-01-13T16:19:44 | 2014-01-13T16:19:44 | 12,612,290 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | #!/bin/python
# -*- coding: utf_8 -*-
import subprocess
import sys
# U can simply change fps
fps = 25
# get resolution
res = subprocess.check_output("xrandr | grep \*", shell=True).split()[0]
# ffmpeg command
command = ("ffmpeg -f x11grab -r {fps} -s {resolution} -i :0.0 -vcodec libx264 "
"-preset ultrafast -crf 0 -threads 0 {filename}")
if len(sys.argv) <> 2:
print "You must specify filename to save, for example:"
print "{my_name} screencast.mkv".format(my_name=__file__)
print "python {my_name} screencast.mkv".format(my_name=__file__)
sys.exit(1)
sys.exit(subprocess.call(command.format(resolution=res,
filename=sys.argv[1],
fps=fps),
shell=True))
| [
"pavel.nedr@gmail.com"
] | pavel.nedr@gmail.com |
0a53f26329b7e8f590b399d677a12e83e6704b2e | 28a124b6a2f22a53af3b6bb754e77af88b4138e1 | /DJANGO/companytodo/reports/migrations/0006_auto_20191209_0121.py | a29feb60b3e3cadd0f868274c2f14a8a99ef6f0e | [] | no_license | mebaysan/LearningKitforBeginners-Python | f7c6668a9978b52cad6cc2b969990d7bbfedc376 | 9e1a47fb14b3d81c5b009b74432902090e213085 | refs/heads/master | 2022-12-21T03:12:19.892857 | 2021-06-22T11:58:27 | 2021-06-22T11:58:27 | 173,840,726 | 18 | 4 | null | 2022-12-10T03:00:22 | 2019-03-04T23:56:27 | Python | UTF-8 | Python | false | false | 350 | py | # Generated by Django 2.2.7 on 2019-12-08 22:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0005_auto_20191209_0120'),
]
operations = [
migrations.AlterModelOptions(
name='report',
options={'ordering': ('-created',)},
),
]
| [
"menesbaysan@gmail.com"
] | menesbaysan@gmail.com |
a81f1658dd871e8e403dcf6b4e512ae458767a2f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /HBKAGJZ62JkCTgYX3_15.py | 5c5665b4393c00c704f2eb04cb3ee08dfe0d3464 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py |
def last(l,n):
if n>len(l):
return 'invalid'
elif n==0:
return []
else:
return l[len(l)-n:]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
cd4c7e6854eef7d83740ba16570aec3b7664d4ef | 0606f6af48d8e20c5231f3205d692f6a3943807f | /plot_hist.py | dd3b3b0b569537b8560a0ec250e48b02964f8d9f | [] | no_license | pahbs/HRSI | 9c95833ff89c2d4a72f8cf063781425892875fff | f83b790a04ad448589576f54bc5251781962831d | refs/heads/master | 2021-12-11T10:11:26.684339 | 2021-11-29T23:17:53 | 2021-11-29T23:17:53 | 75,756,652 | 4 | 1 | null | 2017-02-23T20:21:00 | 2016-12-06T17:51:32 | Python | UTF-8 | Python | false | false | 2,325 | py | #!/usr/bin/env python
#
# Utility to plot a histogram of a raster
import sys
import os
import argparse
import numpy as np
from pygeotools.lib import iolib
from pygeotools.lib import malib
from pygeotools.lib import geolib
from pygeotools.lib import filtlib
from pygeotools.lib import warplib
from dem_control import sample_ma
import matplotlib
##https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.mlab, math
import scipy.stats
def getparser():
parser = argparse.ArgumentParser(description="Utility to get histogram from a raster")
parser.add_argument('ras_fn', type=str, help='Raster filename')
parser.add_argument('-min_val', type=float, default=None, help='Min value that will be included')
parser.add_argument('-max_val', type=float, default=None, help='Max value that will be included')
parser.add_argument('-sample_step', type=int, default=50, help='Sampling step value')
parser.add_argument('-axis_lab_x', type=str, default="X", help='X-axis label')
return parser
def main():
parser = getparser()
args = parser.parse_args()
ras_fn = args.ras_fn
min_val = args.min_val
max_val = args.max_val
sample_step = args.sample_step
# Get ma
ma = iolib.fn_getma(ras_fn)
# Sample ma
if min_val is not None:
ma = np.ma.masked_less(ma, min_val)
if max_val is not None:
ma = np.ma.masked_greater(ma, max_val)
ma = sample_ma(ma, sample_step)
if ma is None:
print "No histogram. Array is None."
fig_name = ""
else:
sample_step_str = "%03d" % (sample_step)
histo = matplotlib.pyplot.hist(ma.compressed(), 300, normed=True, color='gray', alpha = 0.5)
matplotlib.pyplot.xticks(np.arange(min_val, max_val, 1.0))
matplotlib.pyplot.xlabel(args.axis_lab_x, fontsize=12)
#Write histogram
fig_name = ras_fn.split('/')[-1].strip('.tif') + '_hist.png'
matplotlib.pyplot.savefig(os.path.join(os.path.dirname(ras_fn),fig_name))
matplotlib.pyplot.clf()
print "Saved histogram fig:"
print os.path.join(os.path.dirname(ras_fn),fig_name)
return fig_name
if __name__ == "__main__":
main() | [
"paul.montesano@gmail.com"
] | paul.montesano@gmail.com |
31f85f215a9f769b8f6cf5f1c88dce4b0be8c037 | 4f0f411d8d9abe3d520a962d30da67959e6bff2f | /tests/sequence/test_phylo.py | d40dbd398c6fe4c8b0188a102157cb630725e1f8 | [
"BSD-3-Clause"
] | permissive | ajshedivy/biotite | 685f196416cc7be74d299a13f23947f85eb5825e | 15fe39de165aba6e8bd6376fa8f8ddf069718fb5 | refs/heads/master | 2023-08-24T14:45:25.239920 | 2021-10-06T14:32:40 | 2021-10-06T14:32:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,165 | py | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
from os.path import join
import numpy as np
import pytest
import biotite
import biotite.sequence.phylo as phylo
from ..util import data_dir
@pytest.fixture
def distances():
# Distances are based on the example
# "Dendrogram of the BLOSUM62 matrix"
# with the small modification M[i,j] += i+j
# to reduce ambiguity in the tree construction.
return np.loadtxt(join(data_dir("sequence"), "distances.txt"), dtype=int)
@pytest.fixture
def upgma_newick():
# Newick notation of the tree created from 'distances.txt',
# created via DendroUPGMA
with open(join(data_dir("sequence"), "newick_upgma.txt"), "r") as file:
newick = file.read().strip()
return newick
@pytest.fixture
def tree(distances):
return phylo.upgma(distances)
def test_upgma(tree, upgma_newick):
"""
Compare the results of `upgma()` with DendroUPGMA.
"""
ref_tree = phylo.Tree.from_newick(upgma_newick)
# Cannot apply direct tree equality assertion because the distance
# might not be exactly equal due to floating point rounding errors
for i in range(len(tree)):
for j in range(len(tree)):
# Check for equal distances and equal topologies
assert tree.get_distance(i,j) \
== pytest.approx(ref_tree.get_distance(i,j), abs=1e-3)
assert tree.get_distance(i,j, topological=True) \
== ref_tree.get_distance(i,j, topological=True)
def test_neighbor_joining():
"""
Compare the results of `neighbor_join()` with a known tree.
"""
dist = np.array([
[ 0, 5, 4, 7, 6, 8],
[ 5, 0, 7, 10, 9, 11],
[ 4, 7, 0, 7, 6, 8],
[ 7, 10, 7, 0, 5, 9],
[ 6, 9, 6, 5, 0, 8],
[ 8, 11, 8, 9, 8, 0],
])
ref_tree = phylo.Tree(phylo.TreeNode(
[
phylo.TreeNode(
[
phylo.TreeNode(
[
phylo.TreeNode(index=0),
phylo.TreeNode(index=1),
],
[1,4]
),
phylo.TreeNode(index=2),
],
[1, 2]
),
phylo.TreeNode(
[
phylo.TreeNode(index=3),
phylo.TreeNode(index=4),
],
[3,2]
),
phylo.TreeNode(index=5),
],
[1,1,5]
))
test_tree = phylo.neighbor_joining(dist)
assert test_tree == ref_tree
def test_node_distance(tree):
"""
Test whether the `distance_to()` and `lowest_common_ancestor()` work
correctly.
"""
# Tree is created via UPGMA
# -> The distances to root should be equal for all leaf nodes
dist = tree.root.distance_to(tree.leaves[0])
for leaf in tree.leaves:
assert leaf.distance_to(tree.root) == dist
# Example topological distances
assert tree.get_distance(0, 19, True) == 9
assert tree.get_distance(4, 2, True) == 10
# All pairwise leaf node distances should be sufficient
# to reconstruct the same tree via UPGMA
ref_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
ref_dist_mat[i,j] = tree.get_distance(i,j)
assert np.allclose(ref_dist_mat, ref_dist_mat.T)
new_tree = phylo.upgma(ref_dist_mat)
test_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
test_dist_mat[i,j] = new_tree.get_distance(i,j)
assert np.allclose(test_dist_mat, ref_dist_mat)
def test_leaf_list(tree):
for i, leaf in enumerate(tree.leaves):
assert i == leaf.index
def test_distances(tree):
# Tree is created via UPGMA
# -> The distances to root should be equal for all leaf nodes
dist = tree.root.distance_to(tree.leaves[0])
for leaf in tree.leaves:
assert leaf.distance_to(tree.root) == dist
# Example topological distances
assert tree.get_distance(0, 19, True) == 9
assert tree.get_distance(4, 2, True) == 10
def test_get_leaves(tree):
# Manual example cases
node = tree.leaves[6]
assert set(tree.leaves[6].parent.get_indices()) == set(
[6,11,2,3,13,8,14,5,0,15,16]
)
assert set(tree.leaves[10].get_indices()) == set([10])
assert tree.root.get_leaf_count() == 20
def test_copy(tree):
assert tree is not tree.copy()
assert tree == tree.copy()
def test_immutability():
node = phylo.TreeNode(index=0)
# Attributes are not writable
with pytest.raises(AttributeError):
node.children = None
with pytest.raises(AttributeError):
node.parent = None
with pytest.raises(AttributeError):
node.index = None
# A root node cannot be child
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
node1.as_root()
with pytest.raises(phylo.TreeError):
phylo.TreeNode([node1, node2], [0, 0])
# A child node cannot be root
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
node1.as_root()
# A node cannot be child of a two nodes
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
phylo.TreeNode([node1, node2], [0, 0])
# Tree cannot be constructed from child nodes
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=0)
# node1 and node2 have now a parent
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
phylo.Tree(node1)
@pytest.mark.parametrize("newick, labels, error", [
# Reference index out of range
("((1,0),4),2);", None, biotite.InvalidFileError),
# Empty string
("", None, biotite.InvalidFileError),
# Empty node
("();", None, biotite.InvalidFileError),
# Missing brackets
("((0,1,(2,3));", None, biotite.InvalidFileError),
# A node with three leaves
("((0,1),(2,3),(4,5));", None, None),
# A node with one leaf
("((0,1),(2,3),(4));", None, None),
# Named intermediate nodes
("((0,1,3)A,2)B;", None, None),
# Named intermediate nodes and distances
("((0:1.0,1:3.0,3:5.0)A:2.0,2:5.0)B;", None, None),
# Nodes with labels
("((((A,B),(C,D)),E),F);", ["A","B","C","D","E","F"], None),
# Nodes with labels and distances
("((((A:1,B:2),(C:3,D:4)),E:5),F:6);", ["A","B","C","D","E","F"], None),
# Newick with spaces
(" ( 0 : 1.0 , 1 : 3.0 ) A ; ", None, None),
])
def test_newick_simple(newick, labels, error):
# Read, write and read again a Newick notation and expect
# the same reult from both reads
if error is None:
tree1 = phylo.Tree.from_newick(newick, labels)
newick = tree1.to_newick(labels, include_distance=True)
tree2 = phylo.Tree.from_newick(newick, labels)
assert tree1 == tree2
else:
with pytest.raises(error):
tree1 = phylo.Tree.from_newick(newick, labels)
@pytest.mark.parametrize("use_labels", [False, True])
def test_newick_complex(upgma_newick, use_labels):
# Same as above with more complex string
if use_labels:
labels = [str(i) for i in range(20)]
else:
labels = None
tree1 = phylo.Tree.from_newick(upgma_newick, labels)
newick = tree1.to_newick(labels, include_distance=True)
tree2 = phylo.Tree.from_newick(newick, labels)
assert tree1 == tree2
@pytest.mark.parametrize("newick_in, exp_newick_out", [
("(0:1.0, 1:2.0);", "(0:1.0,1:2.0):0.0;" ),
("(0:1.0, 1:2.0, 2:3.0);", "((0:1.0,1:2.0):0.0,2:3.0):0.0;" ),
("(((0:1.0, 1:2.0):10.0):5.0, 2:8.0);", "((0:1.0,1:2.0):15.0,2:8.0):0.0;"),
("((0:1.0, 1:2.0):10.0):5.0;", "(0:1.0,1:2.0):0.0;" ),
])
def test_as_binary_cases(newick_in, exp_newick_out):
"""
Test the `as_binary()` function based on known cases.
"""
tree = phylo.Tree.from_newick(newick_in)
bin_tree = phylo.as_binary(tree)
assert bin_tree.to_newick() == exp_newick_out
def test_as_binary_distances():
"""
Test the preservation of all pairwise leaf distances after calling
`as_binary()`.
"""
# Some random newick
newick = "((((0:5, 1:1, 2:13, 5:9):4, (4:2, 6:9):7):18), 3:12);"
tree = phylo.Tree.from_newick(newick)
ref_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
ref_dist_mat[i,j] = tree.get_distance(i,j)
bin_tree = phylo.as_binary(tree)
test_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
test_dist_mat[i,j] = bin_tree.get_distance(i,j)
assert np.allclose(test_dist_mat, ref_dist_mat)
def test_equality(tree):
"""
Assert that equal trees equal each other, and non-equal trees do not
equal each other.
"""
assert tree == tree.copy()
# Order of children is not important
assert tree == phylo.Tree(phylo.TreeNode(
[tree.root.children[1].copy(), tree.root.children[0].copy()],
[tree.root.children[1].distance, tree.root.children[0].distance]
))
# Different distance -> Unequal tree
assert tree != phylo.Tree(phylo.TreeNode(
[tree.root.children[0].copy(), tree.root.children[1].copy()],
[tree.root.children[0].distance, 42]
))
# Additional node -> Unequal tree
assert tree != phylo.Tree(phylo.TreeNode(
[
tree.root.children[0].copy(),
tree.root.children[1].copy(),
phylo.TreeNode(index=len(tree))
],
[
tree.root.children[0].distance,
tree.root.children[1].distance,
42
]
))
| [
"patrick.kunzm@gmail.com"
] | patrick.kunzm@gmail.com |
c0300915f88b4cbb234193be8a08ceb789f7fd55 | c24b28c0dc4ad8f83845f4c61882f1e04d49b5cd | /Plotly_Graphs/Plotly_Introduction/plotly_charts.py | d17cd6a9de3a549f8ebb82ff2712db48bbb76398 | [] | no_license | Coding-with-Adam/Dash-by-Plotly | 759e927759513d96060a770b1e0b0a66db13f54f | 9f178f1d52536efd33827758b741acc4039d8d9b | refs/heads/master | 2023-08-31T17:23:02.029281 | 2023-08-08T05:12:50 | 2023-08-08T05:12:50 | 234,687,337 | 1,293 | 1,822 | null | 2023-07-31T15:47:07 | 2020-01-18T05:36:28 | Jupyter Notebook | UTF-8 | Python | false | false | 370 | py | import pandas as pd
import plotly.express as px
dfb = pd.read_csv("bird-window-collision-death.csv")
df = px.data.tips()
fig = px.pie(dfb, values='Deaths', names='Bldg #', color="Side", hole=0.3)
fig.update_traces(textinfo="label+percent", insidetextfont=dict(color="white"))
fig.update_layout(legend={"itemclick":False})
fig.show()
fig.write_image("images/fig1.png")
| [
"noreply@github.com"
] | Coding-with-Adam.noreply@github.com |
023f8af1473e4b26b26918a3f4baac47e3c12473 | 7de9d5c8ca5bd65cc5322a96537c88d0389bc5c9 | /apps/main_app/views.py | 0219dfc2c8e6166c15a9fe498bc9f55a6f0b9c8d | [] | no_license | SeattleAmy/deploy | f3cb6f89bc4e0cdfe234d8b80acdef109d90072f | bf549dffb468913c6daba896acefb69dad03b835 | refs/heads/master | 2021-01-12T08:12:05.262309 | 2016-12-15T00:13:45 | 2016-12-15T00:13:45 | 76,500,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | from django.shortcuts import render, redirect
from .models import Email
from django.contrib import messages
# Create your views here.
def index(request):
return render(request, 'main_app/index.html')
def create(request):
if request.method =="POST":
result= Email.objects.register(request.POST['email'])
if result[0]==False:
messages.error(request,result[1])
return redirect('/')
else:
return redirect('/success')
def success(request):
context = {
"emails" : Email.objects.all(),
}
return render (request, 'main_app/success.html', context)
def destroy(request, id):
Email.objects.get(id=id).delete()
return redirect('/')
| [
"Amy@Amys-Air.hsd1.wa.comcast.net"
] | Amy@Amys-Air.hsd1.wa.comcast.net |
1f354f1fd00dbff2a1beb175a30347ca857e0aa0 | 1f944a64873109fdc56627f5d82e8ac179f7ca83 | /inti_experiment.py | a1eaef52f47377bc58d162432c32edeb26cb7fe4 | [] | no_license | cqu-bdsc/probability-based-best-response-algorithm | 420fe8375f2be1fb84b79d5add625e150e33a318 | 82884e8f1af2c89c09ba25b6d823b64d97e47e02 | refs/heads/master | 2022-12-04T12:07:45.928258 | 2020-07-29T13:11:09 | 2020-07-29T13:11:09 | 285,524,488 | 0 | 1 | null | 2020-08-06T09:06:27 | 2020-08-06T09:06:26 | null | UTF-8 | Python | false | false | 5,880 | py | #!./venv python
# -*- encoding: utf-8 -*-
"""
@File : inti_experiment.py
@Contact : neard.ws@gmail.com
@Github : neardws
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/7/29 下午4:04 neardws 1.0 None
"""
from config import settings
import math
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plot
def get_fog_node(zone_length, communication_range):
"""
Args:
zone_length:
communication_range:
Returns:
"""
number = math.floor(zone_length / (2 * communication_range))
id = 0
fog_node = []
for i in range(number):
x = communication_range + i * 2 * communication_range
for j in range(number):
id += 1
y = communication_range + j * 2 * communication_range
fog_node.append({"id": id, "x": x, "y": y})
return fog_node
def get_vehicle_number_under_fog(fog_node, data_frame, communication_range):
"""
Args:
fog_node:
data_frame:
communication_range:
Returns:
"""
vehicle_number_under_fog = []
for node in fog_node:
vehicle_number = 0
node_id = node["id"]
node_x = node["x"]
node_y = node["y"]
x = data_frame["x"].tolist()
y = data_frame["y"].tolist()
vehicle_under_fog = []
for i in range(len(x)):
if np.sqrt(np.square(x[i] - node_x) + np.square(y[i] - node_y)) <= communication_range:
vehicle_number += 1
vehicle_under_fog.append({"x": x[i], "y": y[i]})
vehicle_number_under_fog.append({"node_id": node_id,
"vehicle_number": vehicle_number,
"vehicle_under_fog": vehicle_under_fog})
return vehicle_number_under_fog
def get_tasks_in_time_slots(fog_node, csv_file, time_slot, time_length, vehicle_task_number):
"""
Args:
fog_node:
csv_file:
time_slot:
time_length:
vehicle_task_number:
"""
df = pd.read_csv(csv_file)
time = []
fog_id = []
v_number = []
vehicles_under_fog = []
for i in range(1, time_length, time_slot):
df_second = df[df['time'] == i]
vehicle_number = get_vehicle_number_under_fog(fog_node,
df_second,
settings.communication_range)
for number in vehicle_number:
time.append(i)
fog_id.append(number["node_id"])
v_number.append(number["vehicle_number"])
vehicles_under_fog.append(number["vehicle_under_fog"])
init_df = pd.DataFrame({"time": time, "fog_id": fog_id, "vehicle_number": v_number, "vehicles": vehicles_under_fog})
init_df.to_csv(settings.init_csv_name, index=False)
task_fog_id = []
task_time = []
required_rate = []
required_sinr = []
task_x = []
task_y = []
for j in range(1, len(fog_node) + 1):
init_df_id = init_df[init_df["fog_id"] == j]
time = init_df_id["time"].tolist()
num = init_df_id["vehicle_number"].tolist()
vehicles = init_df_id["vehicles"]
for k in range(len(time)):
now_time = time[k]
now_vehicles = vehicles.tolist()[k]
for l in range(num[k]):
for n in range(vehicle_task_number):
task_required_rate = random.randint(settings.task_request_rate_min,
settings.task_request_rate_max)
task_required_sinr = random.randint(settings.task_request_SINR_min,
settings.task_request_SINR_max)
task_fog_id.append(j)
task_time.append(now_time)
required_rate.append(task_required_rate)
required_sinr.append(task_required_sinr)
vehicle = now_vehicles[l]
task_x.append(vehicle["x"])
task_y.append(vehicle["y"])
task_df = pd.DataFrame(
{"fog_id": task_fog_id, "time": task_time, "required_rate": required_rate, "required_sinr": required_sinr, "x": task_x, "y": task_y})
task_df.to_csv(settings.task_csv_name, index=False)
def draw_round(round_x, round_y, radius, width):
theta = np.arange(0, 2 * np.pi, 0.01)
x = round_x + radius * np.cos(theta)
y = round_y + radius * np.sin(theta)
plot.plot(x, y, color="gray", linestyle="--", linewidth=width)
def draw_fog_task_in_the_map(fog_node, time, zone_length, communication_range):
plot.xlim(0, zone_length)
plot.ylim(0, zone_length)
for node in fog_node:
node_x = node["x"]
node_y = node["y"]
plot.plot(int(node_x), int(node_y), color="black", marker="^", markersize=10, label="fog node")
draw_round(node_x, node_y, communication_range, 1)
df = pd.read_csv(settings.task_csv_name)
df = df[df["time"] == time]
task_x = df["x"].tolist()
task_y = df["y"].tolist()
for i in range(len(task_x)):
plot.plot(int(task_x[i]), int(task_y[i]), color="darkred", marker="o", label="task", markersize=3)
plot.show()
if __name__ == '__main__':
fog_node = get_fog_node(settings.zone_length, settings.communication_range)
# # print(node)
# get_tasks_in_time_slots(fog_node,
# settings.fill_xy_csv_name,
# settings.time_slot,
# settings.time_length,
# settings.vehicle_task_number)
draw_fog_task_in_the_map(fog_node=fog_node, time=1, zone_length=settings.zone_length, communication_range=settings.communication_range)
| [
"singleangel@vip.qq.com"
] | singleangel@vip.qq.com |
6b90cca26ffb65e7c70dea2a4bb236f579e6791d | 948ce56cc061db1cd67a9ce6aee1619604dce1fb | /Code_Kinder/elias_02.py | 2198dda1fe82d2056b19e6e552b3972ad221e481 | [] | no_license | tomobones/hort | 3bde6e61e9b47b65b8e06be903470d2d348878ee | 3c5ffe0f861221de738334f0919424be4f1d1e38 | refs/heads/master | 2023-05-29T18:07:33.967243 | 2021-06-21T14:26:28 | 2021-06-21T14:26:28 | 316,201,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | import turtle
stift=turtle.Pen()
turtle.bgcolor('navy')
for x in range(999):
stift.forward(x)
stift.left(200)
stift.pencolor('green')
stift.forward(100)
stift.left(200)
stift.pencolor('white')
stift.forward(100)
stift.left(200)
stift.pencolor('purple')
stift.forward(100)
stift.left(200)
stift.pencolor('cyan')
stift.forward(x)
stift.left(30)
stift.pencolor('red')
stift.forward(x)
stift.left(2)
stift.circle(100)
stift.up()
stift.forward(123)
stift.down()
stift.pencolor('brown')
stift.forward(220)
stift.left(201)
turtle.Screen().exitonclick()
| [
"thomas.vogg@posteo.de"
] | thomas.vogg@posteo.de |
9ff2f22cb931ef1b4b6f3de6cb5ba468dace744c | ae613a880eecf783ba23e7ca871f9e165ec2ce6e | /calculate_root.py | f6e918aef989a07665376a59101b386c993edc8e | [
"MIT"
] | permissive | h-mayorquin/capacity_code | 52d7e81026cd804677d5a5a6312b434bdff6ed32 | f885f0e409d3f9c54b8e20c902f7ef28584ca8a2 | refs/heads/master | 2020-08-28T00:30:14.760936 | 2020-01-31T17:26:29 | 2020-01-31T17:26:29 | 217,534,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import warnings
import pickle
import pandas as pd
import numpy as np
import random
from math import ceil, floor
from copy import deepcopy
from functions import *
warnings.filterwarnings('ignore')
minicolumns = 10
hypercolumns = 5
sequence_length = 2
number_of_sequences = 20
pattern_seed = np.random.randint(0, 20)
desired_root = 0.9
verbose = True
n_patterns = 100
pairs = produce_pairs_with_constant_number_of_patterns(n_patterns)[3:-3]
# Format is hypercolumns, minicolumns, extra
pairs = [(3, 66, 0)]
# Do the calculations
for pair in pairs:
hypercolumns, minicolumns, extra = pair
print('hypercolumns', hypercolumns)
print('minicolumns', minicolumns)
print('extra', extra)
pattern_seed = np.random.randint(0, 20)
aux = find_root_empirical(desired_root, hypercolumns, minicolumns, sequence_length, pattern_seed, tolerance=0.01, verbose=verbose)
capacity, p_root, trials = aux
# Read
data_frame = pd.read_csv('../storage_capacity_data.csv', index_col=0)
# Write
data_frame = data_frame.append({'hypercolumns':hypercolumns, 'minicolumns':minicolumns, 'sequence_length':sequence_length,
'capacity':capacity, 'p_critical':p_root, 'trials':trials }, ignore_index=True)
# Store the data base
data_frame.to_csv('../storage_capacity_data.csv')
print('Stored')
print('================')
| [
"h.mayorquin@gmail.com"
] | h.mayorquin@gmail.com |
f5792eceda5ffb1389886679e52cd15a6aa98aec | 8161e5820ff5ce6d780ff58b428b760559c68838 | /py_bing_search/httputil.py | 5bf4f083276f517a2481001f038230da07b7524b | [
"MIT"
] | permissive | trusty/py-bing-search | 02f8e4760e72d75ae33985a140344986e6e25efc | 9e65adb1dfb24969e3ba90bfb259fa07c85fce5a | refs/heads/master | 2021-01-15T13:05:57.868091 | 2016-06-30T07:58:12 | 2016-06-30T07:58:12 | 49,337,988 | 0 | 1 | null | 2016-06-30T07:58:12 | 2016-01-09T19:23:28 | Python | UTF-8 | Python | false | false | 373 | py | import requests
import threading
def get_requests_session():
"""Returns a new (or existing) requests.Session object for current thread"""
try:
ret = get_requests_session.tld.requests_session
except AttributeError:
ret = get_requests_session.tld.requests_session = requests.Session()
return ret
get_requests_session.tld = threading.local()
| [
"asj@vulcantechsoftware.com"
] | asj@vulcantechsoftware.com |
dc30cb6c2b1ae28d96b3d07b74b0dd419ceb9c5b | 34a2bad3033faba30cfa21d604291e5856020ee3 | /polint/q7.py | 72d0782862c0843707631d69f1ea58bc0a85eef0 | [] | no_license | karnatyrohit/computer_methods_assignment | 79b063935381b9e2406935fd8fbc1bbc7995ebed | bed9782f956c94f11ee37e47fbbc5cffad992436 | refs/heads/master | 2021-01-10T09:40:28.324788 | 2015-12-13T15:37:17 | 2015-12-13T15:37:17 | 47,776,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | from scipy import *
from matplotlib.pyplot import *
from q1 import *
def f(x):
y = zeros(x.shape)
y = sin((pi) * x) / sqrt(1-x*x)
return y
x = linspace(0.1,0.9,17)
#y = [f(x1) for x1 in x]
y = f(x)
figure(3)
plot(x, y)
max_exact=zeros(21)
max_est=zeros(21)
#n=3 # order of interpolation
#xarr=linspace(0,1,30)
#yarr=sin(xarr+xarr*xarr);
#t=linspace(0,pi,111)
for n in range(5,16):
delta = 0
xx=linspace(0.1- delta,0.9 + delta,1000)
#xx=array([-pi])
z=polint(x,y,xx,n)
yy=z[0];dyy=z[1]
y0=f(xx)
figure(0)
plot(xx,yy,'ro',xx,y0,'k')
title("Interpolation by %dth order polynomial" % n)
figure(1)
semilogy(xx,abs(yy-y0),'ro',xx,abs(dyy),'k')
title("Error in interpolation")
legend(["Actual error","Error Est"])
max_est[n]=amax(abs(dyy))
max_exact[n]=amax(abs(yy-y0))
figure("err")
semilogy(n,max_exact[n],'ro',n,max_est[n],'bo')
legend(["Actual error","Error Est"])
show()
| [
"karnaty.rohit@gmail.com"
] | karnaty.rohit@gmail.com |
927ca15778a1bf9acbf23fb2734e6a222231aeb2 | 7404a76d7c9820c77db6f73e6b37f81f6d7e40e4 | /preprocess_dreambox_ts_file.py | 23d9df49d12f5c69a4e6112aceb8d7374b09e998 | [] | no_license | svenXY/video_stuff | 2638eeb7d97890ec701b7c47cfdc8d7fb4931765 | 711d1f9a150b288c8cb5b5dfa6b2bf4c7a6ef9a7 | refs/heads/master | 2021-01-13T01:36:17.323282 | 2013-09-27T14:08:35 | 2013-09-27T14:08:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# Name : preprocess_dreambox_ts_file.py
# Description : rename a ts and ts.meta file according to it's metadata
and place them in a properly named subdirectory for
further processing
# Author : Sven Hergenhahn
'''
import os
import sys
import argparse
class ExistsError(Exception):
def __init__(self, file):
self.msg = "File exists already: %s" % file
def __str__(self):
return self.msg
pass
def get_movie_name(meta_file):
try:
with open(meta_file) as meta:
(name, description) = meta.readlines()[1:3]
except IOError, e:
print "Problem with meta file: %s" % e
sys.exit(1)
if name != description:
name = name.rstrip() + '-' + description.rstrip()
else:
name = name.rstrip()
name = name.replace(' ', '_')
name = name.replace('/', '-')
print "Movie name taken from meta file: %s" % name
return name
def restructure(tsfile, meta_file, movie_name, force=False):
try:
os.mkdir(movie_name)
except OSError, e:
if e.errno == 17:
print "Directory %s exists, continuing anyway" % movie_name
pass
try:
for ext in ['.ts', '.ts.meta' ]:
file = os.path.join(movie_name, movie_name + ext)
if os.path.exists(file):
raise ExistsError(file)
except ExistsError, e:
if force:
print "--force is set, overwriting files"
else:
print e
return
os.rename(tsfile, os.path.join(movie_name, movie_name + '.ts'))
os.rename(meta_file, os.path.join(movie_name, movie_name + '.ts.meta'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='%(prog)s [options] <movie.ts> [<movie2.ts>, ...]')
parser.add_argument("--force", '-f', help="Overwrite files in destination directory", action="store_true")
parser.add_argument("tsfile", help='one or more TS files', nargs=argparse.REMAINDER)
args = parser.parse_args()
if not args.tsfile:
parser.error('Missing TS file(s)')
for tsfile in args.tsfile:
if not tsfile.endswith('.ts'):
parser.print_usage()
print 'File %s is not a TS file. Skipping.' % tsfile
continue
print "## Processing %s" % tsfile
meta_file = tsfile + '.meta'
movie_name = get_movie_name(meta_file)
restructure(tsfile, meta_file, movie_name, force=args.force)
| [
"sven@hergenhahn-web.de"
] | sven@hergenhahn-web.de |
138d463273707cc372256d3eec3ca9c5d5fe1e11 | 590ca008f00869276f29ac6d3e1da6d82778584b | /api/migrations/0029_auto_20200914_0235.py | 4a0c74b45fc6b6bf5a3ddb60b84a2c1120fb68ac | [] | no_license | sali73/Donuts_App | 92b1226cf97968481c7be15588204c3aee6151e3 | 75911b8523cbe0792c7550466f1cedeba7ae01a6 | refs/heads/master | 2023-01-13T07:43:25.681619 | 2020-11-26T01:51:39 | 2020-11-26T01:51:39 | 293,178,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # Generated by Django 3.0.7 on 2020-09-14 02:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0028_auto_20200914_0231'),
]
operations = [
migrations.AlterField(
model_name='product',
name='qty',
field=models.IntegerField(default='0'),
),
]
| [
"sally.elgendy@hotmail.com"
] | sally.elgendy@hotmail.com |
c1337933143e4be73f495569475dbf98d651bfac | f0b52a3ae5115b9a839d6bd3e765de83ecb21a28 | /Payload_Type/Apollo/mythic/agent_functions/net_localgroup_member.py | 6b2fad53fcf068ef12c142ebdcfed4c9d96d878c | [
"BSD-3-Clause",
"MIT"
] | permissive | bopin2020/Apollo | ad98f1cb872bd2134509df55ee67a79c51e6d316 | 7660439cbc8d4f18af2b564a5b7a0ac4f8f3765a | refs/heads/master | 2023-01-12T23:50:01.266984 | 2020-11-12T07:03:13 | 2020-11-12T07:03:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,699 | py | from CommandBase import *
import json
class NetLocalgroupMemberArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"computer": CommandParameter(name="computer", required=False, type=ParameterType.String, description="Computer to enumerate."),
"group": CommandParameter(name="group", type=ParameterType.String, description="Group to enumerate.")
}
def split_commandline(self):
if self.command_line[0] == "{":
raise Exception("split_commandline expected string, but got JSON object: " + self.command_line)
inQuotes = False
curCommand = ""
cmds = []
for x in range(len(self.command_line)):
c = self.command_line[x]
if c == '"' or c == "'":
inQuotes = not inQuotes
if (not inQuotes and c == ' '):
cmds.append(curCommand)
curCommand = ""
else:
curCommand += c
if curCommand != "":
cmds.append(curCommand)
for x in range(len(cmds)):
if cmds[x][0] == '"' and cmds[x][-1] == '"':
cmds[x] = cmds[x][1:-1]
elif cmds[x][0] == "'" and cmds[x][-1] == "'":
cmds[x] = cmds[x][1:-1]
return cmds
async def parse_arguments(self):
if self.command_line[0] == "{":
self.load_args_from_json_string(self.command_line)
else:
cmds = self.split_commandline()
if len(cmds) == 1:
self.add_arg("group", cmds[0])
elif len(cmds) == 2:
self.add_arg("computer", cmds[0])
self.add_arg("group", cmds[1])
else:
raise Exception("Expected one or two arguments, but got: {}".format(cmds))
class NetLocalgroupMemberCommand(CommandBase):
cmd = "net_localgroup_member"
needs_admin = False
help_cmd = "net_localgroup_member [computer] [group]"
description = "Retrieve local group membership of the group specified by [group]. If [computer] is omitted, defaults to localhost."
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
author = "@djhohnstein"
argument_class = NetLocalgroupMemberArguments
attackmapping = []
browser_script = BrowserScript(script_name="net_localgroup_member", author="@djhohnstein")
async def create_tasking(self, task: MythicTask) -> MythicTask:
return task
async def process_response(self, response: AgentResponse):
pass | [
"djhohnstein@gmail.com"
] | djhohnstein@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.