blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
d8cd8bc92697bfc2939abe105712ee2065fda300
85574bab97569bae7368dc4e2d2aa73c73743a9b
/MultirateDSP/Lectures/Examples/11_MultirateSigProcTransfasFB/Transform Matrix.py
239810763f195419050739997bd2e31ea965b71f
[]
no_license
saradhimpardha/UdemyDSPFromGroundUpOnARMProcessors
3c0fcd7272e892f222871dc412fc214851477aea
576d4a38992533ed0733278d6b4b6444db58706b
refs/heads/main
2023-05-04T15:45:30.184864
2021-05-28T14:40:46
2021-05-28T14:40:46
458,248,148
1
0
null
null
null
null
UTF-8
Python
false
false
3,573
py
# coding: utf-8 # # Python example for Transform matrix: # ## Take a DFT of size N =4 . Its transform matrix T can be obtained with # In[1]: import numpy as np import warnings warnings.filterwarnings('ignore') T = np.fft.fft(np.eye(4)) T # **Observe that we have complex values in the transform, and hence obtain complex valued filters. To evaluate complex valued filters, we need the full circle in the frequency domain, from 0 to 2pi.** # # **If we want to obtain the frequency response of subband k=1 of this DFT filter bank, we take the second column, time-reverse it, and plot the frequency response with freqz. Because Python starts indexing at 1, we need the index 2 for the subband k. In Python, we use "fft", which stands for Fast Fourier Transform, the fast implementation of the DFT, and command "freqz" uses the fft function internally,** # In[2]: from Freqz import freqz freqz(np.flipud(T[:,1]), whole=True) # Observe: We have a bad stopband attenuation (less than 20dB). # # Also observe: The frequency axis is going from 0 to 2 (instead of just 1). This is because we have a complex impulse response. The normalized frequency 2 is the sampling frequency 2 π . Since we have a 2 π periodic frequency, this is identical to frequency 0, and the frequencies from 1 to 2 can also be seen as the negative frequencies from -1 to 0. This shows that this filter has a pass band at the positive frequencies, but not at the negative frequencies. # The equivalent passband at the negative frequencies is obtained from subband k=3, # In[3]: freqz(np.flipud(T[:,3]), whole=True) # Observe: This looks like the frequency mirrored version of the filter for k=1. This also shows how to separate positive and negative frequencies. # --- # # Example Transform as Filter Bank: # Now we show in an example that the **transform** is indeed a special case of **a critically sampled filter bank** with the above computed filters. # ## Take the example signal of length 8: # In[4]: x = np.sin(2 * np.pi/8 * np.arange(8)) # ## and its decomposition into blocks of length 4: # In[5]: xm = np.zeros((2,4)) xm[0,:] = x[0:4] xm[1,:] = x[4:8] xm # In[6]: T = np.fft.fft(np.eye(4)) T # ## We obtain the transformed blocks with: # In[7]: yt = np.dot(xm, T) np.around(yt, decimals=4) # Here, each row contains the spectrum of each corresponding block. # # Now we process the input signal **`x`** through a critically sampled filter bank with the equivalent filter impulse responses, the transform matrix columns, flipped up-down, and down-sampled with the last phase of the blocks, $n_0 =N −1=3$ (as it appeared in our derivation of the equivalent impulse responses), # In[8]: import scipy.signal as sp y = np.zeros((2,4), dtype=complex) for k in range(4): # in Octave: downsample(filter( flipud(T(:,k)),1,x).T,4,3) # Downsampling by 4 with an offset of 3 samples(indexed by 0, 1, & 2) y[:,k] = (sp.lfilter(np.flipud(T[:,k]).T, 1, x))[3::4] # Set the complex terms(real or imaginary) less than 10^(-4) as zero np.imag(y)[np.abs(np.imag(y))<= 1e-4], np.real(y)[np.abs(np.real(y))<= 1e-4] = 0, 0 #Display the result in y np.around(y, decimals=4) # We can see that **`yt`** from the **transform** and **`y`** from the critically sampled **filter bank** are indeed the **same**! # # **In conclusion**: We see that a **transform** is a special case of a filter bank. The tool of reading out the impulse responses from a transform matrix allows us to **analyze the resulting filters**, and to judge if they fulfill our requirements.
[ "franco.polo@ii-vi.com" ]
franco.polo@ii-vi.com
77e3dc899fe559151fd510ac34830257a83d6ac5
e6b4cc91077b1ab227a430ef2d6fdce1b4f30529
/blog_tastypie/myapp/api.py
41dbe547923a8dc3bd76ae6189dd555cfe437d09
[]
no_license
werberth/tastypie-tutorial
71bd5683f664b97edda448650ff3d78f17effb06
89eacd78a76787d4d5676ce1a3efe8a97ab27c17
refs/heads/master
2021-04-29T17:45:18.910239
2018-02-15T23:05:12
2018-02-16T17:53:39
121,677,319
0
0
null
null
null
null
UTF-8
Python
false
false
611
py
from django.contrib.auth.models import User from tastypie import fields from tastypie.resources import ModelResource from blog_tastypie.myapp.models import Entry class UserResource(ModelResource): class Meta: queryset = User.objects.all() resource_name = 'user' excludes = [ 'email', 'password', 'is_active', 'is_staff', 'is_superuser' ] allowed_methods = ['get'] class EntryResource(ModelResource): user = fields.ForeignKey(UserResource, 'user') class Meta: queryset = Entry.objects.all() resource_name = 'entry'
[ "werberthvinicius@gmail.com" ]
werberthvinicius@gmail.com
b4952a963d7ea011b62b131d19cfd7d814cac378
862631b5d9cf251c7067261a8ff5f3a40d382378
/test_tkinter.py
42dc0f24426f5fc8eac8e356b22422515dbe646a
[]
no_license
isakura313/codabra_neuro
ebd7ddd312cbbfd14f3e45413ac5f175c8c7f1e0
14cb6341898ef2a2d5f8608534de973dd264d89a
refs/heads/master
2021-02-07T10:47:42.799156
2020-02-29T18:12:18
2020-02-29T18:12:18
244,016,405
0
0
null
null
null
null
UTF-8
Python
false
false
681
py
from tkinter import * root = Tk() root.title("Codabra Neuro") root.geometry("300x400") def toSquare(): result = int(entry_data.get()) int_arr = [] for i in range(2,10): res = result**i int_arr.append(str(res)) s = ", " final_res = s.join(int_arr) #объединие res_square = Label(root, text=final_res) res_square.pack() entry_data = Entry() entry_data.pack() label_square = Label(root, text="Результат квадрата") label_square.pack() # сюда пишется результат # res_square.pack() square_btn = Button(root, text = "square", command=toSquare) square_btn.pack() root.mainloop()
[ "isakura313@gmail.com" ]
isakura313@gmail.com
c33af6349fac4324a2cc782f576adb2d203ebb5e
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_116/1125.py
9b069d88d81a582746184f915ca61375d37c3487
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,162
py
#!/usr/bin/python import sys if len(sys.argv) != 2: print "Please run program: python file.py inputFilename" sys.exit() try: f = open(sys.argv[1],'r') count = int(f.readline()) except IOError: print "Input File could not be opened" sys.exit() case = 1 while count > 0: complete = True count = count - 1 Matrix = [[0 for x in xrange(4)] for x in xrange(4)] line = f.readline() # XXXT if not line: break if '.' in line: complete = False Matrix[0][0] = line[0] Matrix[0][1] = line[1] Matrix[0][2] = line[2] Matrix[0][3] = line[3] line = f.readline() # XXXT if not line: break if '.' in line: complete = False Matrix[1][0] = line[0] Matrix[1][1] = line[1] Matrix[1][2] = line[2] Matrix[1][3] = line[3] line = f.readline() # XXXT if not line: break if '.' in line: complete = False Matrix[2][0] = line[0] Matrix[2][1] = line[1] Matrix[2][2] = line[2] Matrix[2][3] = line[3] line = f.readline() # XXXT if not line: break if '.' in line: complete = False Matrix[3][0] = line[0] Matrix[3][1] = line[1] Matrix[3][2] = line[2] Matrix[3][3] = line[3] #print Matrix # check Rows draw = False winner = '.' row_i = 0 while row_i < 4: test = False test = (Matrix[row_i][0] == Matrix[row_i][1] and Matrix[row_i][1] == Matrix[row_i][2] and Matrix[row_i][2] == Matrix[row_i][3] and Matrix[row_i][0] != '.') test = test or (Matrix[row_i][0] == 'T' and Matrix[row_i][1] == Matrix[row_i][2] and Matrix[row_i][2] == Matrix[row_i][3] and Matrix[row_i][3] != '.') test = test or (Matrix[row_i][0] == Matrix[row_i][2] and Matrix[row_i][1] == 'T' and Matrix[row_i][2] == Matrix[row_i][3] and Matrix[row_i][0] != '.') test = test or (Matrix[row_i][0] == Matrix[row_i][1] and Matrix[row_i][2] == 'T' and Matrix[row_i][1] == Matrix[row_i][3] and Matrix[row_i][3] != '.') test = test or (Matrix[row_i][0] == Matrix[row_i][1] and Matrix[row_i][1] == Matrix[row_i][2] and Matrix[row_i][3] == 'T' and Matrix[row_i][2] != '.') if test: if winner == '.': if Matrix[row_i][0] != 'T': winner = Matrix[row_i][0] else: winner = Matrix[row_i][3] else: if Matrix[row_i][0] != 'T': temp = Matrix[row_i][0] else: temp = Matrix[row_i][3] if temp != winner: draw = True break #print "Row " + winner row_i = row_i + 1 # check columns if not draw: col_i = 0 while col_i < 4: test = False test = (Matrix[0][col_i] == Matrix[1][col_i] and Matrix[1][col_i] == Matrix[2][col_i] and Matrix[2][col_i] == Matrix[3][col_i] and Matrix[0][col_i] != '.') test = test or (Matrix[0][col_i] == 'T' and Matrix[1][col_i] == Matrix[2][col_i] and Matrix[2][col_i] == Matrix[3][col_i] and Matrix[1][col_i] != '.') test = test or (Matrix[0][col_i] == Matrix[2][col_i] and Matrix[1][col_i] == 'T' and Matrix[2][col_i] == Matrix[3][col_i] and Matrix[0][col_i] != '.') test = test or (Matrix[0][col_i] == Matrix[1][col_i] and Matrix[2][col_i] == 'T' and Matrix[1][col_i] == Matrix[3][col_i] and Matrix[0][col_i] != '.') test = test or (Matrix[0][col_i] == Matrix[1][col_i] and Matrix[1][col_i] == Matrix[2][col_i] and Matrix[3][col_i] == 'T' and Matrix[0][col_i] != '.') if test: if winner == '.': if Matrix[0][col_i] != 'T': winner = Matrix[0][col_i] else: winner = Matrix[3][col_i] else: if Matrix[0][col_i] != 'T': temp = Matrix[0][col_i] else: temp = Matrix[3][col_i] if temp != winner: draw = True break #print "Column " + winner col_i = col_i + 1 # check daigonals if not draw: test = False test = (Matrix[0][0] == Matrix[1][1] and Matrix[1][1] == Matrix[2][2] and Matrix[2][2] == Matrix[3][3] and Matrix[0][0] != '.') test = test or (Matrix[0][0] == 'T' and Matrix[1][1] == Matrix[2][2] and Matrix[2][2] == Matrix[3][3] and Matrix[1][1] != '.') test = test or (Matrix[0][0] == Matrix[2][2] and Matrix[1][1] == 'T' and Matrix[2][2] == Matrix[3][3] and Matrix[0][0] != '.') test = test or (Matrix[0][0] == Matrix[1][1] and Matrix[2][2] == 'T' and Matrix[1][1] == Matrix[3][3] and Matrix[0][0] != '.') test = test or (Matrix[0][0] == Matrix[1][1] and Matrix[1][1] == Matrix[2][2] and Matrix[3][3] == 'T' and Matrix[0][0] != '.') if test: if winner == '.': if Matrix[0][0] != 'T': winner = Matrix[0][0] else: winner = Matrix[3][3] else: if Matrix[0][0] != 'T': temp = Matrix[0][0] else: temp = Matrix[3][3] if temp != winner: draw = True #print "Doagonal 1 " + winner if not draw: test = False test = (Matrix[0][3] == Matrix[1][2] and Matrix[1][2] == Matrix[2][1] and Matrix[2][1] == Matrix[3][0] and Matrix[0][3] != '.') test = test or (Matrix[0][3] == 'T' and Matrix[1][2] == Matrix[2][1] and Matrix[2][1] == Matrix[3][0] and Matrix[2][1] != '.') test = test or (Matrix[0][3] == Matrix[2][1] and Matrix[1][2] == 'T' and Matrix[2][1] == Matrix[3][0] and Matrix[0][3] != '.') test = test or (Matrix[0][3] == Matrix[1][2] and Matrix[2][1] == 'T' and Matrix[1][2] == Matrix[3][0] and Matrix[0][3] != '.') test = test or (Matrix[0][3] == Matrix[1][2] and Matrix[1][2] == Matrix[2][1] and Matrix[3][0] == 'T' and Matrix[0][3] != '.') if test: if winner == '.': if Matrix[0][3] != 'T': winner = Matrix[0][3] else: winner = Matrix[3][0] else: if Matrix[0][3] != 'T': temp = Matrix[0][3] else: temp = Matrix[3][0] if temp != winner: draw = True #print "Doagonal 2 " + winner if winner == "X": answer = "X won" if winner == "O": answer = "O won" elif winner!= 'X' and winner != 'O': if not complete: answer = "Game has not completed" elif draw or complete: answer = "Draw" print "Case #" + str(case) + ": " + answer case = case + 1 #empty line line = f.readline() if not line: break
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
8112918af873a81f416c9e43e9ef631e9c7748c3
d5c89ac3a62093f1b5e7ab892afed771bfbe88b0
/university/c9/NJURescruitment.py
15a55498c498a80a3dfc84a1f0065c2918ea56d7
[ "Apache-2.0" ]
permissive
WallfacerRZD/UniversityRecruitment-sSurvey
14b9197f5f67acd9570a66a69966f32b4262682c
2b7952fc78d7698154ce0f231f4e4eff97a54b8f
refs/heads/master
2021-08-27T22:27:39.856648
2017-12-10T15:22:30
2017-12-10T15:22:30
108,350,617
1
0
null
2017-10-26T02:19:44
2017-10-26T02:19:44
null
UTF-8
Python
false
false
2,514
py
# coding = utf-8 import requests from bs4 import BeautifulSoup from jedis import jedis from util import util table_name = "nju_company_info" # 获取南京大学数据 def get_nju_rescruit(): print("NJU Begin===================================================") base_url = "http://job.nju.edu.cn:9081/login/nju/home.jsp?type=zph&DZPHBH=&sfss=sfss&zphzt=&jbksrq=&jbjsrq=&sfgq=&pageSearch=2&pageNow=" req = requests.Session() header = util.get_header("job.nju.edu.cn") re = jedis.jedis() re.connect_redis() re.clear_list(table_name) for i in range(1, 120): print(i) content = req.get(headers=header, url=base_url + str(i)).content.decode("utf-8") parse_nju_info(content, re) get_zph_info(req, header, re) re.add_university(table_name) re.add_to_file(table_name) print("NJU finish ===================================================") # 获取大型招聘会的信息 def get_zph_info(req, header, re): base_url = "http://job.nju.edu.cn:9081/login/nju/home.jsp?type=dw&DZPHBH=61or8m5y-vn4s-kqae-zahn-zp4epxsp0mt4&pageNow=" for i in range(1, 10): url = base_url + str(i) print("专场招聘会:" + str(i)) content = req.get(headers=header, url=url).content.decode("utf-8") parse_zph_info(content, re) def parse_zph_info(content, re): soup = BeautifulSoup(content, "html5lib") company_list = soup.find_all("li") for item in company_list: info = item.text.split("\n") company_name = info[3].strip() date = "2017-11-25" re.save_info(table_name, date, company_name) def parse_nju_info(content, re): soup = BeautifulSoup(content, "html5lib") company_list = soup.find_all("li") length = len(company_list) for i in range(12, length): try: info = company_list[i].text.split("\n") company_name = info[3].split("\t")[1].strip() if company_name.find('澳门国际银行股份有限公司') != -1: company_name = '澳门国际银行股份有限公司' time = info[5].strip().split("\xa0\xa0") if len(time) == 3: date = time[1] else: date = time[0] print("南京大学:" + date + "\t" + company_name) re.save_info(table_name, date, company_name) except IndexError: print("error:" + company_list[i].text) continue if __name__ == '__main__': get_nju_rescruit()
[ "maicius@outlook.com" ]
maicius@outlook.com
c1739b2ad22895d37350225e7b63c48eed95e0b0
df21c9510edb6bbd325fb432d7f47b2cc0e7afb7
/.history/Shoes/models_20200323120713.py
a75f953aa9f6b39ef9d3cf365a665663f146038e
[]
no_license
Imraj423/drf_shoestore
ff5eb00bfc890c49f2dd3fcd63bcd0479a4a674d
41421a68cba0cabb7916bbddea49f640a2ce42d6
refs/heads/master
2021-04-23T23:52:40.520524
2020-03-25T21:17:21
2020-03-25T21:17:21
250,036,753
0
0
null
null
null
null
UTF-8
Python
false
false
1,462
py
from django.db import models # Create your models here. class Manufacturer(models.Model): name = models.CharField(max_length=150) website = models.URLField(max_length=200) def __str__(self): return self.name class ShoeType(models.Model): style = models.CharField(max_length=50) def __str__(self): return self.style class ShoeColor(models.Model): RED = 'RED' ORANGE = 'ORANGE' YELLOW = 'YELLOW' GREEN = 'GREEN' BLUE = 'BLUE' INDIGO = 'INDIGO' VIOLET = 'VIOLET' WHITE = 'WHITE' BLACK = 'BLACK' COLOR_CHOICES = [ (RED, 'Red'), (ORANGE, 'Orange'), (YELLOW, 'Yellow'), (GREEN, 'Green'), (BLUE, 'Blue'), (INDIGO, 'Indigo'), (VIOLET, 'Violet'), (WHITE, 'White'), (BLACK, 'Black'), ] color = models.CharField( max_length=50, choices=COLOR_CHOICES, default=RED ) def __str__(self): return self.color class Shoe(models.Model): size = models.IntegerField() brand_name = models.CharField(max_length=50) material = models.CharField(max_length=50) fasten_type = models.CharField(max_length=50) manufacturer = models.ForeignKey( Manufacturer, on_delete=models.CASCADE ) color = models.ForeignKey( ShoeColor, on_delete=models.CASCADE ) shoe_type = models.ForeignKey( ShoeType, on_delete=models.CASCADE )
[ "dahqniss@gmail.com" ]
dahqniss@gmail.com
eed162c7d9d433e50103d017d6431475cd6f8621
d3f442a963ab75af3ed310201767b8c113886576
/webnlg/utils_graph2text.py
169f40e6e834e93d4b74eee0631f05ca9e9c45a0
[ "Apache-2.0" ]
permissive
cqsss/plms-graph2text
71afbb32c9d0042988e03ecae445ac95924e21a3
62c522c84aee00ae8aeaa465287b783eb853971e
refs/heads/master
2023-09-03T01:15:34.204916
2021-11-18T10:28:38
2021-11-18T10:28:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,835
py
import re import os def convert_text(text): #return text text = text.lower() text = ' '.join(re.split('(\W)', text)) text = ' '.join(text.split()) return text def eval_meteor_test_webnlg(folder_data, pred_file, dataset): dir_path = os.path.dirname(os.path.realpath(__file__)) folder_data_before = dir_path + "/../utils" cmd_string = "java -jar " + folder_data_before + "/meteor-1.5.jar " + pred_file + " " \ + folder_data + "/" + dataset + ".target_eval_meteor -l en -norm -r 3 > " + pred_file.replace("txt", "meteor") os.system(cmd_string) meteor_info = open(pred_file.replace("txt", "meteor"), 'r').readlines()[-1].strip() return meteor_info def eval_chrf_test_webnlg(folder_data, pred_file, dataset): dir_path = os.path.dirname(os.path.realpath(__file__)) folder_data_before = dir_path + "/../utils" cmd_string = "python " + folder_data_before + "/chrf++.py -H " + pred_file + " -R " \ + folder_data + "/" + dataset + ".target_eval_crf > " + pred_file.replace("txt", "chrf") os.system(cmd_string) chrf_info_1 = open(pred_file.replace("txt", "chrf"), 'r').readlines()[1].strip() chrf_info_2 = open(pred_file.replace("txt", "chrf"), 'r').readlines()[2].strip() return chrf_info_1 + " " + chrf_info_2 def eval_bleu(folder_data, pred_file, dataset): dir_path = os.path.dirname(os.path.realpath(__file__)) folder_data_before = dir_path + "/data/" cmd_string = "perl " + folder_data_before + "/multi-bleu.perl -lc " + folder_data + "/" + dataset + ".target_eval " \ + folder_data + "/" + dataset + ".target2_eval " + folder_data + "/" + dataset + ".target3_eval < " \ + pred_file + " > " + pred_file.replace("txt", "bleu") os.system(cmd_string) try: bleu_info = open(pred_file.replace("txt", "bleu"), 'r').readlines()[0].strip() except: bleu_info = -1 return bleu_info def eval_bleu_sents_tok(pred_file, folder_data, dataset): dir_path = os.path.dirname(os.path.realpath(__file__)) folder_data_before = dir_path + "/../utils" cmd_string = "perl " + folder_data_before + "/tokenizer.perl -threads 4 -no-escape < " + pred_file + " > " +\ pred_file + "_tok" os.system(cmd_string) cmd_string = "perl " + folder_data_before + "/multi-bleu.perl -lc " + folder_data + "/" + dataset + ".target.tok"\ + " < " + pred_file + "_tok" + " > " + pred_file.replace("txt", "bleu_data") os.system(cmd_string) try: bleu_info_data = open(pred_file.replace("txt", "bleu_data"), 'r').readlines()[0].strip() except: bleu_info_data = 'no data' return bleu_info_data def eval_meteor(ref_file, pred_file): dir_path = os.path.dirname(os.path.realpath(__file__)) folder_data_before = dir_path + "/../utils" cmd_string = "java -jar " + folder_data_before + "/meteor-1.5.jar " + pred_file + " " \ + ref_file + " > " + pred_file.replace("txt", "meteor") os.system(cmd_string) meteor_info = open(pred_file.replace("txt", "meteor"), 'r').readlines()[-1].strip() return meteor_info def eval_chrf(ref_file, pred_file): dir_path = os.path.dirname(os.path.realpath(__file__)) folder_data_before = dir_path + "/../utils" cmd_string = "python " + folder_data_before + "/chrf++.py -H " + pred_file + " -R " \ + ref_file + " > " + pred_file.replace("txt", "chrf") os.system(cmd_string) try: chrf_info_1 = open(pred_file.replace("txt", "chrf"), 'r').readlines()[1].strip() chrf_info_2 = open(pred_file.replace("txt", "chrf"), 'r').readlines()[2].strip() chrf_data = chrf_info_1 + " " + chrf_info_2 except: chrf_data = "no data" return chrf_data
[ "ribeiro@aiphes.tu-darmstadt.de" ]
ribeiro@aiphes.tu-darmstadt.de
4c9b85c9333e30316e2ddc81da95e00914409219
699a43917ce75b2026a450f67d85731a0f719e01
/using_python/208_trie_tree.py
e353415550d33ab4dfcd2955ad9d96a0f51e3235
[]
no_license
wusanshou2017/Leetcode
96ab81ae38d6e04739c071acfc0a5f46a1c9620b
c4b85ca0e23700b84e4a8a3a426ab634dba0fa88
refs/heads/master
2021-11-16T01:18:27.886085
2021-10-14T09:54:47
2021-10-14T09:54:47
107,402,187
0
0
null
null
null
null
UTF-8
Python
false
false
1,065
py
class TrieTree(): def __init__(self): self.d ={} def insert(self,word:str): t = self.d for c in word: if c not in self.d: t[c]={} t=t[c] t["end"] = True def search(self,word:str): t =self.d for c in word: if c not in t: return False t = t[c] print(t) return "end" in t def startswith(self,word:str): t= self.d for c in word: if c not in t: return False t = t[c] return True def delete(self,word:str): t=self.d stack =[] for c in word: if c not in t: return stack.append(t) t =t[c] for c in word[::-1]: if c not in t: return del t[c] def info(self): print(self.d) trie= TrieTree() trie.insert("apple") # trie.insert("appbl") trie.insert("boo") trie.info() print (trie.search("a"))
[ "252652905@qq.com" ]
252652905@qq.com
fb4e6b6459e6d09ed5087263ac66e7b48cd82123
0dbbbb1dc8065ed0d27d6ef30fd585eafb481e52
/port_xnn_time/load_and_pred.py
f02df8fcd69b350ac7a412f6fd9bb8eda87c822f
[]
no_license
ethan-jiang-1/ANN-HAPT
149549e60544f5b7d2759b415940957133bf4e64
d91175f2b58986146a118a006237137cf24fac3f
refs/heads/master
2023-01-03T18:50:10.027300
2020-11-02T05:22:29
2020-11-02T05:22:29
304,639,314
0
0
null
null
null
null
UTF-8
Python
false
false
2,134
py
import numpy as np import os, sys currentdir = os.path.dirname(os.path.realpath(__file__)) parentdir = os.path.dirname(currentdir) sys.path.append(parentdir) cdr = os.path.dirname(__file__) if len(cdr) != 0: os.chdir(cdr) from dataset_loader import load_dataset rx_train, ry_train, rx_test, ry_test, labels, skip_ratio = load_dataset() cpi_filename = "cp_xnn/cp_info.text" if not os.path.isfile(cpi_filename): print("Error no cpi_filename {}".format(cpi_filename)) sys.exit(-1) with open(cpi_filename, 'r') as file: test_num = int(file.readline().rstrip()) feature_num = int(file.readline().rstrip()) skip_ratio = int(file.readline().rstrip()) model_name = file.readline() print("CPI", test_num, feature_num, skip_ratio, model_name) cpe_filename = "cp_xnn/MLPClassifier.class" if not os.path.isfile(cpe_filename): print("Error no cpe_filename {}".format(cpe_filename)) sys.exit(-1) import subprocess root_dir = os.getcwd() working_dir = root_dir + "/cp_xnn" os.chdir(working_dir) quick_skip_ratio = 10 label_raw = [] for i in range(0, test_num, quick_skip_ratio): tdat = "dat/{}_{:04d}.tdat".format(feature_num, i) with open(tdat, "r") as tdatf: line = tdatf.readline() nums = line.split(" ") # ret, stdout = run_command(["java", "MLPClassifier"], nums, cwd=working_dir) ret = True cmds = ["java", "MLPClassifier"] cmds += nums stdout = subprocess.check_output(cmds) print(i, test_num, ret, stdout, ry_test[i]-1) if ret: num_result = stdout.decode("utf-8").strip() if int(num_result) >= 0: label = int(num_result) + 1 label_raw.append(label) else: print("error at {} result: {} {}".format(tdat, ret, num_result)) label_raw.append(1) else: print("error unknown at {}".foramt(tdat)) os.chdir(root_dir) ry_pred = np.array(label_raw) ry_test = ry_test[::quick_skip_ratio] from s_confusion import print_confusion_report print_confusion_report(ry_pred, ry_test, labels) from s_confusion import plot_confusion plot_confusion(ry_pred, ry_test, labels)
[ "ethanmac@mac.com" ]
ethanmac@mac.com
27475e4dd82c4907aaa8047080839df7079d8c1c
498e99bae2b0a107a4f1c8563a74470e8516f465
/common/parse_settings.py
b06519ca6f1b8e057a56a2ea0a1be4735a283337
[ "MIT" ]
permissive
xiaoxiaolulu/MagicTestPlatform
91bcf9125c4c7f254bf8aaf425b7c72ca40b7a49
dc9b4c55f0b3ace180c30b7f080eb5d88bb38fdb
refs/heads/master
2022-05-29T00:05:48.030392
2020-01-20T09:16:44
2020-01-20T09:16:44
219,256,372
5
1
MIT
2022-05-25T02:24:22
2019-11-03T05:31:53
Python
UTF-8
Python
false
false
773
py
from tornado.util import import_object from common.storage import storage class Settings(object): def __init__(self): pass def get_settings(self, name): """ :param name: 配置名 :return:配置项 """ global_settings = import_object('settings.base') self._config = global_settings if hasattr(self._config, name): return getattr(self._config, name) elif hasattr(self._config, name): return getattr(self._config, name) else: raise Exception('config "%s" not exist!' % name) def __getattr__(self, item): setting = self.get_settings(item) return storage(setting) if type(setting) is dict else setting settings = Settings()
[ "546464268@qq.com" ]
546464268@qq.com
26f41c89d89c34f30ded1f8a1a36420cc7a518b1
186216449b5834830ef979a3c9cdd89ac8e6b0e3
/backend/vr_test_5031/urls.py
fc050338642bcfd7aeceffb784e23dd4f800d730
[]
no_license
crowdbotics-apps/vr_test-5031
df28f2e5b56e08662899201eaf2146d70dae3b1e
06b0655fa3c64d0129cea0d81383b138c02345e0
refs/heads/master
2022-12-10T20:20:29.871847
2019-06-22T17:11:02
2019-06-22T17:11:02
193,259,529
0
0
null
2022-12-09T07:20:45
2019-06-22T17:10:46
Python
UTF-8
Python
false
false
1,021
py
"""vr_test_5031 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include urlpatterns = [ path('', include('home.urls')), path('accounts/', include('allauth.urls')), path('api/v1/', include('home.api.v1.urls')), path('admin/', admin.site.urls), ] admin.site.site_header = 'VR_Test' admin.site.site_title = 'VR_Test Admin Portal' admin.site.index_title = 'VR_Test Admin'
[ "team@crowdbotics.com" ]
team@crowdbotics.com
31e8a3bbd1b390162f599cee38aabc820177b162
0be5e52bd25f7bf78275fb7a6bc8de8597ad78b7
/chapter03-functions/guessTheNumber.py
b22287de7a99fedc4137557e5a67d581b763fe43
[ "MIT" ]
permissive
t04glovern/automatingPython
aaecf115212daf138ab1a42e8b1e806680531eb6
c5f310e2c647c6513ab31917645669e10a7853c5
refs/heads/master
2021-01-18T22:26:35.452915
2016-07-19T05:08:42
2016-07-19T05:08:42
62,141,046
1
1
null
null
null
null
UTF-8
Python
false
false
590
py
import random lower = 1 upper = 20 secretNumber = random.randint(1,20) print("I am thinking of a number between " + str(lower) + " and " + str(upper)) for guessesTaken in range(1, 7): print("Take a guess") guess = int(input()) if guess < secretNumber: print("Your guess is too low") elif guess > secretNumber: print("Your guess is too high") else: break if guess == secretNumber: print("Good job! You guessed my number in " + str(guessesTaken) + " guesses!") else: print("Nope. The number I was thinking of was " + str(secretNumber))
[ "nathan@glovers.id.au" ]
nathan@glovers.id.au
b9a692de8ede4d5a96df12f34121b3fc2ff3e44d
66313e23c7a7c4202724efb500c25b009b6a207a
/tests/test_api_example.py
7bf83f3fa6e3218c04560c387d3bb8aed48df419
[ "MIT" ]
permissive
Tomasz-Kluczkowski/Weather-App
5097a2613038321155ed2acb637ababb124b7344
560f6bb84f48618f38da50b94bb19ad73941c596
refs/heads/master
2022-12-23T09:58:05.459246
2018-01-08T22:30:50
2018-01-08T22:30:50
90,778,050
6
3
MIT
2022-11-22T01:52:27
2017-05-09T18:17:26
Python
UTF-8
Python
false
false
507
py
import pytest from unittest import mock from api_example import API @pytest.fixture(scope="module") def api_class(): api = API() return api @pytest.fixture() def mock_response(): response = mock.Mock() response.return_value.status_code = 2100 return response def test_get_stuff(monkeypatch, api_class, mock_response): monkeypatch.setattr("api_example.requests.get", mock_response) assert api_class.get_stuff().status_code == 200 if __name__ == "__main__": pytest.main()
[ "tomaszk1@hotmail.co.uk" ]
tomaszk1@hotmail.co.uk
93cfc322da21e171eac1ebf0353acf2ec9eca6d0
9bb83bf5f6c2b5d2da4dda711591ef9987490c66
/3DdetectionPrototype/Yolo-Pytorch-nms-updated/pruning/l1norm.py
89cac02f44b91d0e9cd7e121cd8ef6e4c5453184
[]
no_license
nudlesoup/DeepLearning
cb5b7039a9de6098194b56143d1a72a564fed1c9
336e415b0353d6e18d106f894a97d8873a55e544
refs/heads/master
2021-06-25T00:50:40.339768
2020-12-23T02:38:58
2020-12-23T02:38:58
172,002,661
2
0
null
null
null
null
UTF-8
Python
false
false
5,382
py
from pruning.BasePruner import BasePruner from pruning.Block import * from models.backbone.baseblock import InvertedResidual, conv_bn, sepconv_bn,conv_bias class l1normPruner(BasePruner): def __init__(self, Trainer, newmodel, pruneratio=0.1): super().__init__(Trainer, newmodel) self.pruneratio = pruneratio def prune(self): blocks = [None] name2layer = {} for midx, (name, module) in enumerate(self.model.named_modules()): if type(module) not in [InvertedResidual, conv_bn, nn.Linear, sepconv_bn,conv_bias]: continue idx = len(blocks) if isinstance(module, InvertedResidual): blocks.append(InverRes(name, idx, [blocks[-1]], list(module.state_dict().values()))) if isinstance(module, conv_bn): blocks.append(CB(name, idx, [blocks[-1]], list(module.state_dict().values()))) if isinstance(module, nn.Linear): blocks.append(FC(name, idx, [blocks[-1]], list(module.state_dict().values()))) if isinstance(module, sepconv_bn): blocks.append(DCB(name, idx, [blocks[-1]], list(module.state_dict().values()))) if isinstance(module, conv_bias): blocks.append(Conv(name, idx, [blocks[-1]], list(module.state_dict().values()))) name2layer[name] = blocks[-1] blocks = blocks[1:] for b in blocks: if b.layername == 'mergelarge.conv7': b.inputlayer=[name2layer['headslarge.conv4']] if b.layername == 'headsmid.conv8': b.inputlayer.append(name2layer['mobilev2.features.13']) if b.layername == 'mergemid.conv15': b.inputlayer=[name2layer['headsmid.conv12']] if b.layername == 'headsmall.conv16': b.inputlayer.append(name2layer['mobilev2.features.6']) for b in blocks: if isinstance(b, CB): pruneweight = torch.sum(torch.abs(b.statedict[0]), dim=(1, 2, 3)) numkeep = int(pruneweight.shape[0] * (1 - self.pruneratio)) _ascend = torch.argsort(pruneweight) _descend = torch.flip(_ascend, (0,))[:numkeep] mask = torch.zeros_like(pruneweight).long() mask[_descend] = 1 b.prunemask = torch.where(mask == 1)[0] if isinstance(b, InverRes): if b.numlayer == 2: b.prunemask = torch.arange(b.outputchannel) if b.numlayer == 3: pruneweight = torch.sum(torch.abs(b.statedict[0]), dim=(1, 2, 3)) numkeep = int(pruneweight.shape[0] * (1 - self.pruneratio)) _ascend = torch.argsort(pruneweight) _descend = torch.flip(_ascend, (0,))[:numkeep] mask = torch.zeros_like(pruneweight).long() mask[_descend] = 1 b.prunemask = torch.where(mask == 1)[0] if isinstance(b, DCB): pruneweight = torch.sum(torch.abs(b.statedict[5]), dim=(1, 2, 3)) numkeep = int(pruneweight.shape[0] * (1 - self.pruneratio)) _ascend = torch.argsort(pruneweight) _descend = torch.flip(_ascend, (0,))[:numkeep] mask = torch.zeros_like(pruneweight).long() mask[_descend] = 1 b.prunemask = torch.where(mask == 1)[0] blockidx = 0 for name, m0 in self.newmodel.named_modules(): if type(m0) not in [InvertedResidual, conv_bn, nn.Linear, sepconv_bn,conv_bias]: continue block = blocks[blockidx] curstatedict = block.statedict if (len(block.inputlayer) == 1): if block.inputlayer[0] is None: inputmask = torch.arange(block.inputchannel) else: inputmask = block.inputlayer[0].outmask elif (len(block.inputlayer) == 2): first = block.inputlayer[0].outmask second = block.inputlayer[1].outmask second+=block.inputlayer[0].outputchannel second=second.to(first.device) inputmask=torch.cat((first,second),0) else: raise AttributeError if isinstance(block, CB): # conv(1weight)->bn(4weight)->relu assert len(curstatedict) == (1 + 4) block.clone2module(m0, inputmask) if isinstance(block, DCB): # conv(1weight)->bn(4weight)->relu assert len(curstatedict) == (1 + 4 + 1 + 4) block.clone2module(m0, inputmask) if isinstance(block, InverRes): # dw->project or expand->dw->project assert len(curstatedict) in (10, 15) block.clone2module(m0, inputmask) if isinstance(block, FC): block.clone2module(m0) if isinstance(block, Conv): block.clone2module(m0,inputmask) blockidx += 1 if blockidx > (len(blocks) - 1): break print("l1 norm Pruner done") # print(name,block.outmask.shape) # for k,v in self.newmodel.state_dict().items(): # print(k,v.shape) # assert 0
[ "ameyad1995@gmail.com" ]
ameyad1995@gmail.com
8cd0dc8a0badc0b48d9f1277a147dd28fdbfe85d
a59deecc5d91214601c38bd170605d9d080e06d2
/27-iterators-and-generators/04-generator/app.py
3d498d3f72f8667c2d8f8187457450c724001da9
[]
no_license
reyeskevin9767/modern-python-bootcamp-2018
a6a3abdb911716d19f6ab516835ed1a04919a13d
d0234f10c4b8aaa6a20555348aec7e3571e3d4e7
refs/heads/master
2022-12-03T18:48:50.035054
2020-08-09T03:00:55
2020-08-09T03:00:55
286,109,881
0
0
null
null
null
null
UTF-8
Python
false
false
226
py
# * Generator def count_up_to(max): count = 1 while count <= max: yield count count += 1 counter = count_up_to(5) print(next(counter)) print(next(counter)) print(next(counter)) print(next(counter))
[ "reyeskevin9767@gmail.com" ]
reyeskevin9767@gmail.com
751cb3df98c725246e7254643151d9e12a025791
cad999eacee16dc0e001a57f50b5d8b0f4d4ebf6
/p2*.py
8354a4da33be2f8d899d2544041a0abd50d9cc97
[]
no_license
divyanarra0/pythonprogram
8694a41ba3b39eb44a94a693eac3f7f5f18b588b
10d8f59a472ccd4548771bad29be84a1a44854d8
refs/heads/master
2020-03-27T10:32:21.664657
2019-05-14T07:31:00
2019-05-14T07:31:00
146,427,260
0
0
null
null
null
null
UTF-8
Python
false
false
274
py
num = 2 #num = int(input("Enter a number: ") factorial = 1 if num < 0: print("Sorry, factorial does not exist for negative numbers") elif num == 0: print("The factorial of 0 is 1") else: for i in range(1,num + 1): factorial = factorial*i print(factorial)
[ "noreply@github.com" ]
divyanarra0.noreply@github.com
e14b35975410ed918ee3102ed48d7daa49a1ed83
4be5c172c84e04c35677f5a327ab0ba592849676
/python/interviewbit/arrays/first_missing_integer/first_missing_integer_efficient.py
376fca45765107b58a416abf20705b168acf15e2
[]
no_license
niranjan-nagaraju/Development
3a16b547b030182867b7a44ac96a878c14058016
d193ae12863971ac48a5ec9c0b35bfdf53b473b5
refs/heads/master
2023-04-06T20:42:57.882882
2023-03-31T18:38:40
2023-03-31T18:38:40
889,620
9
2
null
2019-05-27T17:00:29
2010-09-05T15:58:46
Python
UTF-8
Python
false
false
3,050
py
''' https://www.interviewbit.com/problems/first-missing-integer/ First Missing Integer Given an unsorted integer array, find the first missing positive integer. Example: Given [1,2,0] return 3, [3,4,-1,1] return 2, [-8, -7, -6] returns 1 Your algorithm should run in O(n) time and use constant space. ''' ''' Solution Outline: O(n) time, O(1) memory Consider an array, A, with only +ve integers. A way to remember if a +ve integer, x, is in the array A is by flipping A[x] to -A[x] (assuming 1 <= x <= n-1) Scan the array, registering if x is present in the array if 1 <= x <= n-1. Scan 1-N, find the first missing integer. NOTE: If the array A contains -ve and 0s as well, Bubble them to the end of the array and consider only the subarray of positive integers. NOTE: In reality, Track x by A[x-1] = -A[x-1], so if A has all [1..n], we'll know to return (n+1) Sample run 1: A: [-4, 7, 1, 2, 3, 5, 4, 2, -3] Preprocess: Bubble [-4, -3] to the end of the list A: [2, 7, 1, 2, 3, 5, 4, -4, -3] Truncate -ve numbers A: [2, 7, 1, 2, 3, 5, 4] 0 1 2 3 4 5 6 i: 0, |x| = 2 A[1] = -|A[1]| = -7 A: [2, -7, 1, 2, 3, 5, 4] i: 1, |x| = 7 A[6] = -[A[6]| = -4 A: [2, -7, 1, 2, 3, 5, -4] i: 2, |x| = 1 A[0] = -|A[0]| = -2 A: [-2, -7, 1, 2, 3, 5, -4] i: 3, |x| = 2 A[1] = -|A[1]| = -7 A: [-2, -7, 1, 2, 3, 5, -4] i: 4, |x| = 3 A[2] = -|A[2]| = -1 A: [-2, -7, -1, 2, 3, 5, -4] i: 5, |x| = 5 A[4] = -|A[4]| = -3 A: [-2, -7, -1, 2, -3, 5, -4] i: 6, |x| = 4 A[3] = -|A[3]| = -2 A: [-2, -7, -1, -2, -3, 5, -4] 0 1 2 3 4 5 6 Find missing number fromm [1-7] A[0] is -ve: 1 is in A A[1] is -ve A[2] is -ve A[3] is -ve A[4] is -ve A[5] is +ve => 6 is missing return 6 ''' class Solution: # Bubble all non-positive numbers to the right in-place # and return the count of positive numbers # in the array def bubble(self, a): j = 0 n = len(a) for i in xrange(n): if a[i] > 0: a[i], a[j] = a[j], a[i] j += 1 return j # @param a : list of integers # @return an integer def firstMissingPositive(self, a): n = len(a) # Truncate non +ve numbers n = self.bubble(a) for i in xrange(n): x = abs(a[i]) if x <= n: # Flip a[x-1]'s sign to hint that 'x' is in the array a[x-1] = -abs(a[x-1]) # Look for missing number between [1..n] for i in xrange(n): if a[i] > 0: return i+1 # All [1..n] are found in the list # => (n+1) is the first +ve integer missing return (n+1) if __name__ == '__main__': s = Solution() assert s.firstMissingPositive([1,2,0]) == 3 assert s.firstMissingPositive([3,4,-1,1]) == 2 assert s.firstMissingPositive([-8, -7, -6]) == 1 assert s.firstMissingPositive([1,2,3,4,5]) == 6 assert s.firstMissingPositive([1,3,2,5,0,4]) == 6 assert s.firstMissingPositive([1,5,2,4]) == 3 assert s.firstMissingPositive([6,0,1,4,3,2]) == 5 assert s.firstMissingPositive([-4, 7, 1, 2, 3, 5, 4, 2, -3]) == 6
[ "vinithepooh@gmail.com" ]
vinithepooh@gmail.com
d66c704541687ca99b889a95adb06548fafbae94
36de14c6b188886df6a284ee9ce4a464a5ded433
/Solutions/0825/0825.py
afacf0d9a45858a176d7e3671a8f9fce69478278
[]
no_license
washing1127/LeetCode
0dca0f3caa5fddd72b299e6e8f59b5f2bf76ddd8
b910ddf32c7e727373449266c9e3167c21485167
refs/heads/main
2023-03-04T23:46:40.617866
2023-02-21T03:00:04
2023-02-21T03:00:04
319,191,720
3
0
null
null
null
null
UTF-8
Python
false
false
554
py
# -*- coding:utf-8 -*- # Author: washing # DateTime: 2021/12/27 10:11 # File: 0825.py # Desc: CV class Solution: def numFriendRequests(self, ages: List[int]) -> int: n = len(ages) ages.sort() left = right = ans = 0 for age in ages: if age < 15: continue while ages[left] <= 0.5 * age + 7: left += 1 while right + 1 < n and ages[right + 1] <= age: right += 1 ans += right - left return ans
[ "1014585392@qq.com" ]
1014585392@qq.com
fe85ae6997230fb2e31fef2b1763950eb98af560
93e5b82332af9f0d3e203d086e30794fb90a2086
/unrar/unrar.py
8d645265f998ebee2702c42c88aaa8275463ed6b
[]
no_license
swell1009/ex
cfaae0b5fe917f12416170dce60f7dea8194f368
29b274fb51adbdc43af6ebecaec89c97bc58be6f
refs/heads/master
2020-04-04T10:15:20.578932
2018-11-22T06:27:30
2018-11-22T06:27:30
155,848,187
0
0
null
null
null
null
UTF-8
Python
false
false
2,017
py
# 压缩文件破解代码 # https://blog.csdn.net/qq_41262248/article/details/79316385 import os import sys # zipfile是Python标准库 import zipfile # 尝试导入扩展库unrar from unrar import rarfile def decryptRarZipFile(filename): # 根据文件扩展名,使用不同的库 if filename.endswith('.zip'): fp = zipfile.ZipFile(filename) elif filename.endswith('.rar'): fp = rarfile.RarFile(filename) # 解压缩的目标文件夹 desPath = filename[:-4] if not os.path.exists(desPath): os.mkdir(desPath) # 先尝试不用密码解压缩,如果成功则表示压缩文件没有密码 try: fp.extractall(desPath) fp.close() print('No password') return # 使用密码字典进行暴力破解 except: try: fpPwd = open('pwddict.txt') # Open dict file except: print('No dict file pwddict.txt in current directory.') return for pwd in fpPwd: pwd = pwd.rstrip() try: if filename.endswith('.zip'): for file in fp.namelist(): # 对zip文件需要重新编码再解码,避免中文乱码 fp.extract(file, path=desPath, pwd=pwd.encode()) os.rename(desPath + '\\' + file, desPath + '\\' + file.encode('cp437').decode('gbk')) print('Success! ====>' + pwd) fp.close() break elif filename.endswith('.rar'): fp.extractall(path=desPath, pwd=pwd) print('Success! ====>' + pwd) fp.close() break except: pass fpPwd.close() if __name__ == '__main__': filename = sys.argv[1] if os.path.isfile(filename) and filename.endswith(('.zip', '.rar')): decryptRarZipFile(filename) else: print('Must be Rar or Zip file')
[ "swell1009@qq.com" ]
swell1009@qq.com
e570148bb74539c9bb9e4f7155c054907ec188c5
0e4860fecfdd34a3255003cc8c8df086c14083dd
/program/algorithm/ProgrammerXiaohuiPython/src/chapter6/part4/AStar.py
5a9d918f35d886cf2e2d469ba38b5ab5095fdd6c
[]
no_license
anzhihe/learning
503ab9a58f280227011da5eaa4b14b46c678e6f3
66f7f801e1395207778484e1543ea26309d4b354
refs/heads/master
2023-08-08T11:42:11.983677
2023-07-29T09:19:47
2023-07-29T09:19:47
188,768,643
1,443
617
null
2023-08-24T02:10:34
2019-05-27T04:04:10
Python
UTF-8
Python
false
false
3,581
py
def a_star_search(start, end): # 待访问的格子 open_list = [] # 已访问的格子 close_list = [] # 把起点加入open_list open_list.append(start) # 主循环,每一轮检查一个当前方格节点 while len(open_list) > 0: # 在open_list中查找 F值最小的节点作为当前方格节点 current_grid = find_min_gird(open_list) # 当前方格节点从openList中移除 open_list.remove(current_grid) # 当前方格节点进入 closeList close_list.append(current_grid) # 找到所有邻近节点 neighbors = find_neighbors(current_grid, open_list, close_list) for grid in neighbors: # 邻近节点不在openList中,标记父亲、G、H、F,并放入openList grid.init_grid(current_grid, end) open_list.append(grid) # 如果终点在openList中,直接返回终点格子 for grid in open_list: if (grid.x == end.x) and (grid.y == end.y): return grid # openList用尽,仍然找不到终点,说明终点不可到达,返回空 return None def find_min_gird(open_list=[]): temp_grid = open_list[0] for grid in open_list: if grid.f < temp_grid.f: temp_grid = grid return temp_grid def find_neighbors(grid, open_list=[], close_list=[]): grid_list = [] if is_valid_grid(grid.x, grid.y-1, open_list, close_list): grid_list.append(Grid(grid.x, grid.y-1)) if is_valid_grid(grid.x, grid.y+1, open_list, close_list): grid_list.append(Grid(grid.x, grid.y+1)) if is_valid_grid(grid.x-1, grid.y, open_list, close_list): grid_list.append(Grid(grid.x-1, grid.y)) if is_valid_grid(grid.x+1, grid.y, open_list, close_list): grid_list.append(Grid(grid.x+1, grid.y)) return grid_list def is_valid_grid(x, y, open_list=[], close_list=[]): # 是否超过边界 if x < 0 or x >= len(MAZE) or y < 0 or y >= len(MAZE[0]): return False # 是否有障碍物 if MAZE[x][y] == 1: return False # 是否已经在open_list中 if contain_grid(open_list, x, y): return False # 是否已经在closeList中 if contain_grid(close_list, x, y): return False return True def contain_grid(grids, x, y): for grid in grids: if (grid.x == x) and (grid.y == y): return True return False class Grid: def __init__(self, x, y): self.x = x self.y = y self.f = 0 self.g = 0 self.h = 0 self.parent = None def init_grid(self, parent, end): self.parent = parent self.g = parent.g + 1 self.h = abs(self.x - end.x) + abs(self.y - end.y) self.f = self.g + self.h # 迷宫地图 MAZE = [ [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0] ] # 设置起点和终点 start_grid = Grid(2, 1) end_grid = Grid(2, 5) # 搜索迷宫终点 result_grid = a_star_search(start_grid, end_grid) # 回溯迷宫路径 path = [] while result_grid is not None: path.append(Grid(result_grid.x, result_grid.y)) result_grid = result_grid.parent # 输出迷宫和路径,路径用星号表示 for i in range(0, len(MAZE)): for j in range(0, len(MAZE[0])): if contain_grid(path, i, j): print("*, ", end='') else: print(str(MAZE[i][j]) + ", ", end='') print()
[ "anzhihe1218@gmail.com" ]
anzhihe1218@gmail.com
b520402a863f0a03e2b8838f352802afe71d727e
bf772ff5616774dfbcace27a9d80ad318409d046
/codenode/twisted/plugins/python.py
3bd798bd6ddd2a46beb468ceedd5c2fee7a86f8a
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
jamescasbon/codenode
c51db69f61659ab0459af8eabcb7f2ef04b5df2d
f6402cc5a6405cd906e2c2751a970f3a644a9c30
refs/heads/master
2020-12-25T02:25:28.149849
2010-11-15T18:52:37
2010-11-15T18:52:37
248,694
1
0
null
null
null
null
UTF-8
Python
false
false
576
py
import os from codenode.backend.engine import EngineConfigurationBase boot = """from codenode.engine.server import EngineRPCServer from codenode.engine.interpreter import Interpreter from codenode.engine import runtime namespace = runtime.build_namespace port = runtime.find_port() server = EngineRPCServer(('localhost', port), Interpreter, namespace) runtime.ready_notification(port) server.serve_forever() """ class Python(EngineConfigurationBase): bin = 'python' args = ['-c', boot] env = os.environ path = os.path.expanduser('~') python = Python()
[ "deldotdr@gmail.com" ]
deldotdr@gmail.com
61b1e90f7e2041c22e774b010a6fc728e53f501e
3d4332662fca27b70c7d77998f535cd2325a6d16
/quiz/model/__init__.py
c7f5dbe9f9a5fb1b6e153f982571b52cdeceb5cd
[ "MIT" ]
permissive
Huangkai1008/quiz
1a3438c1e57b840f9373c558410009d6f0b8617e
3da2f7486d24b6d18a064cf8cfb50cfc0fa4ee6f
refs/heads/master
2022-12-14T18:42:16.707700
2020-03-06T13:31:50
2020-03-06T13:31:50
196,193,129
3
0
MIT
2022-12-08T10:55:28
2019-07-10T11:31:23
Python
UTF-8
Python
false
false
143
py
from .user import User from .question import Question, Answer, AnswerVote, Comment, CommentVote from .topic import Article, ArticleVote, Topic
[ "18778335525@163.com" ]
18778335525@163.com
4f658f83bdcba0be5997f78534b1972dfcd2b3a9
da4c71cef520269843aa3daf10d9cb5fcc0da644
/seleniumbase/fixtures/constants.py
1d15d0436001d1cc66ca49d9689cb87bd13548b1
[ "MIT" ]
permissive
mehmetgul/SeleniumBase
ae3e9ba45a285f7ad253170ad6f29cdf61ed775b
205aa70221e858b70a6692b190f8e44f54fcf723
refs/heads/master
2020-08-04T13:22:05.538453
2019-10-01T04:58:12
2019-10-01T04:58:12
212,150,478
0
1
MIT
2019-10-01T16:54:23
2019-10-01T16:54:23
null
UTF-8
Python
false
false
5,647
py
""" This class containts some frequently-used constants """ class Environment: # Usage Example => "--env=qa" => Then access value in tests with "self.env" QA = "qa" STAGING = "staging" DEVELOP = "develop" PRODUCTION = "production" MASTER = "master" LOCAL = "local" TEST = "test" class Files: DOWNLOADS_FOLDER = "downloaded_files" ARCHIVED_DOWNLOADS_FOLDER = "archived_files" class VisualBaseline: STORAGE_FOLDER = "visual_baseline" class JQuery: VER = "3.4.1" MIN_JS = "//cdnjs.cloudflare.com/ajax/libs/jquery/%s/jquery.min.js" % VER # MIN_JS = "//ajax.aspnetcdn.com/ajax/jQuery/jquery-%s.min.js" % VER # MIN_JS = "//ajax.googleapis.com/ajax/libs/jquery/%s/jquery.min.js" % VER class Messenger: VER = "1.5.0" MIN_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/css/messenger.min.css" % VER) MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/js/messenger.min.js" % VER) THEME_FLAT_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/js/messenger-theme-flat.js" % VER) THEME_FUTURE_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/js/messenger-theme-future.js" % VER) THEME_FLAT_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/css/messenger-theme-flat.css" % VER) THEME_FUTURE_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/css/messenger-theme-future.css" % VER) THEME_BLOCK_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/css/messenger-theme-block.css" % VER) THEME_AIR_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/css/messenger-theme-air.css" % VER) THEME_ICE_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/css/messenger-theme-ice.css" % VER) SPINNER_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "messenger/%s/css/messenger-spinner.css" % VER) class Underscore: VER = "1.9.1" MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "underscore.js/%s/underscore-min.js" % VER) class Backbone: VER = "1.4.0" MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "backbone.js/%s/backbone-min.js" % VER) class BootstrapTour: VER = "0.11.0" MIN_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "bootstrap-tour/%s/css/bootstrap-tour-standalone.min.css" % VER) MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "bootstrap-tour/%s/js/bootstrap-tour-standalone.min.js" % VER) class Hopscotch: VER = "0.3.1" MIN_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "hopscotch/%s/css/hopscotch.min.css" % VER) MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "hopscotch/%s/js/hopscotch.min.js" % VER) class IntroJS: VER = "2.9.3" MIN_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "intro.js/%s/introjs.css" % VER) MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "intro.js/%s/intro.min.js" % VER) class JqueryConfirm: VER = "3.3.4" MIN_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "jquery-confirm/%s/jquery-confirm.min.css" % VER) MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "jquery-confirm/%s/jquery-confirm.min.js" % VER) class Shepherd: VER = "1.8.1" MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "shepherd/%s/js/shepherd.min.js" % VER) THEME_ARROWS_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "shepherd/%s/css/shepherd-theme-arrows.css" % VER) THEME_ARR_FIX_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "shepherd/%s/css/shepherd-theme-arrows-fix.css" % VER) THEME_DEFAULT_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "shepherd/%s/css/shepherd-theme-default.css" % VER) THEME_DARK_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "shepherd/%s/css/shepherd-theme-dark.css" % VER) THEME_SQ_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "shepherd/%s/css/shepherd-theme-square.css" % VER) THEME_SQ_DK_CSS = ("//cdnjs.cloudflare.com/ajax/libs/" "shepherd/%s/css/shepherd-theme-square-dark.css" % VER) class Tether: VER = "1.4.7" MIN_JS = ("//cdnjs.cloudflare.com/ajax/libs/" "tether/%s/js/tether.min.js" % VER) class ValidBrowsers: valid_browsers = ( ["chrome", "edge", "firefox", "ie", "opera", "phantomjs", "safari", "android", "iphone", "ipad", "remote"]) class Browser: GOOGLE_CHROME = "chrome" EDGE = "edge" FIREFOX = "firefox" INTERNET_EXPLORER = "ie" OPERA = "opera" PHANTOM_JS = "phantomjs" SAFARI = "safari" ANDROID = "android" IPHONE = "iphone" IPAD = "ipad" REMOTE = "remote" VERSION = { "chrome": None, "edge": None, "firefox": None, "ie": None, "opera": None, "phantomjs": None, "safari": None, "android": None, "iphone": None, "ipad": None, "remote": None } LATEST = { "chrome": None, "edge": None, "firefox": None, "ie": None, "opera": None, "phantomjs": None, "safari": None, "android": None, "iphone": None, "ipad": None, "remote": None } class State: NOTRUN = "NotRun" ERROR = "Error" FAILURE = "Fail" PASS = "Pass" SKIP = "Skip" BLOCKED = "Blocked" DEPRECATED = "Deprecated"
[ "mdmintz@gmail.com" ]
mdmintz@gmail.com
f98f0ae7ad8095b1778616da4f0e09383fa205d2
47d3e3149269277b164fecb176b5d0297d398b2e
/Python_coding_dojang/Unit 21/practice.py
5ea92bf4f5e5a6252de63048d4b18b8cedea1aa7
[]
no_license
heechul90/study-python-basic-1
325e8c81fe35cd0cd22934869413e475b6734652
82d778e5960c0bde102bdc4c52fc61f61ba27745
refs/heads/master
2022-10-31T07:03:54.213599
2022-10-24T10:54:40
2022-10-24T10:54:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
469
py
### Unit 21. 터틀 그래픽스로 그림 그리기 ## 21.5 연습문제: 오각별 그리기 ## 다음 소스 코드를 완성하여 오각별이 그려지게 만드세요. ## 각 변의 길이는 100 ## 별의 꼭지점은 72도를 두 번 회전해서 144도 회전 ## 별의 다음 꼭지점을 그릴 때는 72도 회전 import turtle as t t.shape('turtle') for i in range(5): t.fd(100) t.right(360 - 72) t.fd(100) t.right(72 * 2) t.mainloop()
[ "heechul4296@gmail.com" ]
heechul4296@gmail.com
4267cfa167cf839e86c323aa0bff898ae2b2b7a4
58afefdde86346760bea40690b1675c6639c8b84
/leetcode/shuffle-string/376399352.py
10f38707e3fa99e2a8a25a199abcca25cad841c6
[]
no_license
ausaki/data_structures_and_algorithms
aaa563f713cbab3c34a9465039d52b853f95548e
4f5f5124534bd4423356a5f5572b8a39b7828d80
refs/heads/master
2021-06-21T10:44:44.549601
2021-04-06T11:30:21
2021-04-06T11:30:21
201,942,771
1
0
null
null
null
null
UTF-8
Python
false
false
350
py
# title: shuffle-string # detail: https://leetcode.com/submissions/detail/376399352/ # datetime: Wed Aug 5 16:40:38 2020 # runtime: 108 ms # memory: 13.9 MB class Solution: def restoreString(self, s: str, indices: List[int]) -> str: l = [None] * len(s) for c, i in zip(s, indices): l[i] = c return ''.join(l)
[ "ljm51689@gmail.com" ]
ljm51689@gmail.com
1b8e6b868d02f908235620ff1799906d5fcf28ee
e904bd56e931e3b06f2542f8c77e579ceaf4270b
/src/src_import.py
30caff366cef0782892e4a6cba76ddca807b0c68
[]
no_license
QMSimProject/trapped_ion_sim
5d3419cdce0373d4fb9067aded691e02c9795912
668821841530cc9055403b00e879edbbd28b297f
refs/heads/master
2021-01-01T18:08:05.632322
2015-06-25T06:17:02
2015-06-25T06:17:02
22,006,635
1
0
null
null
null
null
UTF-8
Python
false
false
342
py
#!/usr/bin/python2.7 # -*- coding: utf-8 -*- # # Author: Mario S. Könz <mskoenz@gmx.net> # Date: 01.05.2014 19:53:33 CEST # File: src_import.py # all central import that are needed in the all src-files are imported here import qutip as q import pylab as pl import numpy as np import copy import cPickle as pickle from addon import *
[ "mskoenz@gmx.net" ]
mskoenz@gmx.net
fbdd1c86e4afbd015b8a4f91b7720f00a881b949
a5829ec0c213de2489b73d2a79cd17e62e27d40c
/docs/DSDC/miniprez/miniprez/continuous_integration.py
31e76ef0dddf511d5e363ce2b9c0502413fbe8c1
[ "MIT" ]
permissive
thoppe/Presentation_Topics
13d4ea7a8dd66463f2e37023bf7e986542fd6ee0
e9aba07e9ab087b44e6044c6082ba8e873a9b4fd
refs/heads/master
2021-11-24T18:02:10.944169
2021-10-26T21:11:03
2021-10-26T21:11:03
160,071,657
0
1
MIT
2019-02-19T15:12:23
2018-12-02T17:23:32
null
UTF-8
Python
false
false
1,366
py
import asyncio import os from parser import miniprez_markdown, build_body import logging logger = logging.getLogger("miniprez") async def file_watcher(target_file, sleep_time=0.5): """ Watchs a file. If modified, yield the filename. Yield the filename once to start. """ # Yield the file first yield target_file, 0 latest_modification_time = os.path.getmtime(target_file) while True: current_time = os.path.getmtime(target_file) if current_time > latest_modification_time: delta = current_time - latest_modification_time latest_modification_time = current_time yield target_file, delta await asyncio.sleep(sleep_time) async def parser_loop(f_markdown, sleep_time=0.5): """ Main event loop. If the target file is modified, or new start a build. """ async for f_target, dt in file_watcher(f_markdown, sleep_time): build_html(f_target) def build_html(f_target): """ Build the html from the markdown. """ f_html_output = f_target.replace(".md", ".html") logger.info(f"Building {f_target} to {f_html_output}") with open(f_target) as FIN: markdown = FIN.read() html = miniprez_markdown(markdown) soup = build_body(html) with open(f_html_output, "w") as FOUT: FOUT.write(soup.prettify())
[ "travis.hoppe@gmail.com" ]
travis.hoppe@gmail.com
5c8c89e31867e31e5cba92b8c42fc023695a819c
f020d00342311f84376f8b39c09f5c0f7d6b9196
/tratum/document_manager/migrations/0023_document_template_path.py
e860d31dcb878b5e7f79f134ed39bcf19652a7c2
[]
no_license
dmontoya1/minutas
a7aa5e4a652af47b4a70e881d114e241d5bf49cc
e6447cd030e34c0fd3a980021f23624da2d2d5a3
refs/heads/master
2023-05-12T16:48:53.830140
2021-03-26T02:33:43
2021-03-26T02:33:43
371,433,520
0
0
null
null
null
null
UTF-8
Python
false
false
410
py
# Generated by Django 2.0.6 on 2018-06-21 22:38 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('document_manager', '0022_auto_20180619_1635'), ] operations = [ migrations.AddField( model_name='document', name='template_path', field=models.TextField(blank=True, null=True), ), ]
[ "dcastano@apptitud.com.co" ]
dcastano@apptitud.com.co
a3df573c4c19c7aceb0f95047d6a1a7da5d4da45
571d36f865b545c0a72134e586fbcddd6953a68b
/tools/pallette/pallette.py
3ed90c377e2e34f1b599e066a115e29e1f7b1fdb
[]
no_license
andrew-turner/Ditto
a5a79faaf31cc44d08ac5f70fa2ac51e51d1b60f
72841fc503c716ac3b524e42f2311cbd9d18a092
refs/heads/master
2020-12-24T14:19:01.164846
2015-05-20T08:42:26
2015-05-20T08:42:26
35,935,568
0
0
null
null
null
null
UTF-8
Python
false
false
3,527
py
import sys import traceback import functools from tkinter import * import tkinter.colorchooser import tkinter.filedialog import tkinter.messagebox import Image import ImageTk def myfunc(event): print("X") class app(): def __init__(self, master): self.master = master print("C") self.fn = tkinter.filedialog.askopenfilename() try: self.im = Image.open(self.fn) except IOError: tkinter.messagebox.showerror("Error", "Unable to open image: %s" % self.fn) sys.exit() return self.imageFrame = Frame(self.master) self.canvas = Canvas(self.imageFrame) self.canvas.config(bg="white", width=self.im.size[0], height=self.im.size[1]) self.canvas.bind("<Button-1>", self.onCanvasClick) self.canvas.pack(side=TOP) b = Button(self.imageFrame, text="Save", command=self.save) b.pack(side=TOP) self.imageFrame.pack(side=LEFT) self.coloursFrame = Frame(self.master) self.buttons = {} cols = getColours(self.im) if len(cols) > 50: l = Label(self.coloursFrame, text="Too many colours (%i)." % len(cols)) l.grid() else: row = 0 column = 0 for col in cols: b = Button(self.coloursFrame, text=str(col), background=convertColourString(col), command=functools.partial(self.swapColour, col)) self.buttons[col] = b b.grid(row=row, column=column, sticky=W+E) row += 1 if row > 10: row = 0 column += 1 self.coloursFrame.pack(side=LEFT) self.updateCanvas() def updateCanvas(self): self.tkIm = ImageTk.PhotoImage(self.im) self.canvas.create_image(1, 1, image=self.tkIm, anchor=NW) def swapColour(self, col): colTuple, colString = tkinter.colorchooser.askcolor(col) if colString is not None: b = self.buttons[col] b.config(background=colString, text=str(colTuple), command=functools.partial(self.swapColour, colTuple)) del self.buttons[col] self.buttons[colTuple] = b new = replaceColour(self.im, col, colTuple) self.im = new self.updateCanvas() def save(self): fn = tkinter.filedialog.asksaveasfilename(initialfile=self.fn) if fn is not None: self.fn = fn self.im.save(fn) def onCanvasClick(self, event): pos = event.x, event.y try: col = self.im.getpixel(pos) except IndexError: return self.swapColour(col) def convertColourString(col): return "#%02x%02x%02x" % col def getColours(im): ans = [] pix = im.load() for x in range(0, im.size[0]): for y in range(0, im.size[1]): col = pix[x,y] if not col in ans: ans.append(col) return ans def replaceColour(im, oldCol, newCol): pix = im.load() for x in range(0, im.size[0]): for y in range(0, im.size[1]): if pix[x,y] == oldCol: pix[x,y] = newCol return im if __name__ == "__main__": try: root = Tk() myApp = app(root) root.mainloop() except Exception: print("Python exception generated!") print("-"*20) traceback.print_exc(file=sys.stdout) print("-"*20) input()
[ "andrew.turner@merton.ox.ac.uk" ]
andrew.turner@merton.ox.ac.uk
a215a3acc6128563bc49d271d002ea4ae0233711
d50685a3f3d612349b1f6627ed8b807f0eec3095
/start/fayuan/selenium/fayuan_download_firefox.py
5dd44f918ecca4b522e3809e69c2a499937ea1d5
[]
no_license
Erich6917/python_littlespider
b312c5d018bce17d1c45769e59243c9490e46c63
062206f0858e797945ce50fb019a1dad200cccb4
refs/heads/master
2023-02-12T23:22:27.352262
2021-01-05T06:21:20
2021-01-05T06:21:20
113,631,826
1
0
null
null
null
null
UTF-8
Python
false
false
6,626
py
# -*- coding: utf-8 -*- # @Time : 2019/4/14 # @Author : ErichLee ErichLee@qq.com # @File : fayuan_download.py # @Comment : # import sys import re import sys import requests import json import time from selenium import webdriver from bs4 import BeautifulSoup from util.file_check_util import * from util.logger_util import * class mainAll(object): __path_source = 'output' def __init__(self): self.file_url = None # URL读取文件 self.file_error = None # 日志错误记录 self.file_log = None # 日志常规记录 try: self.init() # 初始化日志 self.scrapy_start() except Exception as e: infos('异常退出!{}'.format(e)) finally: self.the_end() print("ok,the work is done!") def scrapy_start(self): url_list = self.get_source_url() for each in url_list: init_url, url = each.split('\t')[0], each.split('\t')[1] try: # url = 'http://tingshen.court.gov.cn/live/5202044' # url = 'http://player.videoincloud.com/vod/3243925?src=gkw&cc=1' print 'telnet', url driver = self.login(url) if not driver: errors('登录失败!') return source_target = self.search_real_url(driver) if not source_target: print '获取是失败', url err_msg = '{}\t{}\n'.format(init_url, url) self.file_error.write(err_msg) self.flush() else: rt_msg = '{}\t{}\t{}\n'.format(init_url, url, source_target) self.file_url.write(rt_msg) self.file_url.flush() except Exception as e: print url, e err_msg = '{}\n'.format(each) self.file_error.write(err_msg) self.file_error.flush() continue finally: if driver: driver.close() # print url_jump # driver.get(url_jump) # time.sleep(1) # # source_target = self.search_real_url(driver) # if source_target: # print 'SUCCESS ! ! !', source_target # rt_msg = '{}\t{}\n'.format(url, source_target) # self.file_url.write(rt_msg) # self.file_url.flush() # else: # err_msg = '{}\n'.format(url) # self.file_error.write(err_msg) # self.flush() def search_real_url(self, driver): download_href = driver.find_element_by_xpath('' '//video[@id="index_player_html5_api"]' '') if download_href: return download_href.get_attribute('src') def search_jump_url(self, driver): # download_href = driver.find_element_by_xpath('//div[@id="container"]').text # http://player.videoincloud.com/vod/3248777?src=gkw&cc=1 download_href = driver.find_element_by_xpath('' '//div[@class="live-video-content"]' '/div' '/iframe[@id="player"]' '' ) # download_href = driver.find_element_by_xpath('//video[@id="index_player_html5_api"]/source') if download_href: return download_href.get_attribute('src') def login(self, url): driver = webdriver.Chrome() # driver.implicitly_wait(20) driver.get(url) time.sleep(1) return driver def get_source_url(self): with open('url_source.txt', 'r+') as file_url: lines = file_url.readlines() rt_list = [line.replace('\n', '') for line in lines] return rt_list def init(self): self.file_url = open('source_mp4.txt', 'a+') # URL读取文件 self.file_error = open('error_mp4.txt', 'a+') # 日志错误记录 self.file_log = None # 日志常规记录 def the_end(self): if self.file_url: self.file_url.close() if self.file_error: self.file_error.close() if self.file_log: self.file_log.close() if __name__ == '__main__': mainAll() # # def search_jump_url(url): # if True: # return # res = requests.get(url, headers=hd) # rsoup = BeautifulSoup(res.text, 'html5lib') # iframe = rsoup.select_one('iframe#player') # if iframe: # return iframe['src'] # else: # print res.text # # # def search_real_url(url): # # url = 'http://player.videoincloud.com/vod/3242311?src=gkw&cc=1' # res = requests.get(url, headers=hd_jump2) # matcher = re.findall('src:\'(.+\.mp4)', res.text) # if matcher: # return str(matcher[0]) # # # def start_download(self): # # url = 'http://tingshen.court.gov.cn/live/5191388' # file_rt = open('url_source.txt', 'a+') # file_error = open('error.txt', 'a+') # # with open('url_telnet.txt', 'r+') as file_url: # lines = file_url.readlines() # for line in lines: # try: # url = line.replace('\n', '') # url_jump = search_jump_url(url) # if not url_jump: # file_error.write(line) # file_error.flush() # fail_msg = 'TEL ERROR \t{}'.format(url) # print fail_msg # continue # # url_real = search_real_url(url_jump) # if url_real: # msg = '{}\n'.format(url_real) # file_rt.write(msg) # file_rt.flush() # time.sleep(0.2) # else: # file_error.write(line) # file_error.flush() # fail_msg = 'TEL ERROR \t{}\t{}'.format(url, url_jump) # print fail_msg # except Exception as e: # file_error.write(line) # file_error.flush() # print 'Code err \t{}'.format(url), e
[ "1065120559@qq.com" ]
1065120559@qq.com
8429a1879154030926ba2b938b313d4f4f44f0c2
45520037aa5b1a71764a856ce96cb99c008e437e
/Section 8 Introduction to PyMongo/src/4_counting_documents.py
24d5f2b275eb37f33b5dc50dda70d0bfc2c373c6
[]
no_license
syurskyi/Learn_How_Python_Works_with_NoSql_Database_MongoDB_PyMongo
1a9fabf062b57f36e86a920584b74f350e463906
505d68b3eb16fc5e0d5dd35f64756df1369c11ba
refs/heads/master
2020-09-01T08:09:42.629734
2019-11-06T04:31:29
2019-11-06T04:31:29
218,916,529
1
0
null
null
null
null
UTF-8
Python
false
false
258
py
import pymongo from pymongo import MongoClient # connect to host and server client = MongoClient('localhost', 27017) # connect to database myFirstE db = client.myFirstE # connect to collection stud1 = db.stud results = stud1.find().count() print(results)
[ "sergejyurskyj@yahoo.com" ]
sergejyurskyj@yahoo.com
ba821ab7ffc4db06873819b566c29b3eda27c646
d67aac932b0c1a91bc064884260504e46c526839
/page_objects/CatalogPage.py
242d796358e13163213769082593fd114cf4c106
[]
no_license
maslovaleksandr/opencart_tests
8ad3abc57d2a3e5f26fa41a92fdd35825f43ebe8
a11d3cba601b1282523022cad67c19ac660a1a9a
refs/heads/master
2023-05-07T08:55:45.080737
2020-08-24T20:41:48
2020-08-24T20:41:48
284,944,354
0
1
null
2021-06-02T02:45:07
2020-08-04T10:12:14
Python
UTF-8
Python
false
false
2,021
py
from .DefaultPage import DefaultPage from ..locators.Catalog import Catalog from selenium.webdriver.support.ui import Select from ..locators.Main import Main class CatalogPage(DefaultPage): def open_phones_catalog(self): self._click(Main.main_menu.phones_and_pda) return self def sort_opened_catalog_by_name(self, param): """A_Z --- Z_A""" sort = self.select_from(Catalog.Sort.options) if param == "A_Z": sort.select_by_visible_text(Catalog.Sort.by_name_A_Z) elif param == "Z_A": sort.select_by_visible_text(Catalog.Sort.by_name_Z_A) else: raise("No such sort option") return self def sort_opened_catalog_by_price(self, param): """low_high --- high_low""" sort = self.select_from(Catalog.Sort.options) if param == "low_high": sort.select_by_visible_text(Catalog.Sort.by_price_low_high) elif param == "high_low": sort.select_by_visible_text(Catalog.Sort.by_price_high_low) else: raise("No such sort option") return self def sort_opened_catalog_by_model_name(self, param): """A_Z --- Z_A""" sort = self.select_from(Catalog.Sort.options) if param == "A_Z": sort.select_by_visible_text(Catalog.Sort.by_model_A_Z) elif param == "Z_A": sort.select_by_visible_text(Catalog.Sort.by_model_Z_A) else: raise("No such sort option") return self def change_view(self): if "active" in self._get_attr(Catalog.Navigation.grid_view, "class"): self._click(Catalog.Navigation.list_view) else: self._click(Catalog.Navigation.grid_view) return self def add_to_cart(self, index): self._click(Catalog.Buttons.add_to_cart_button, index) return self def add_wish_list(self, index): self._click(Catalog.Buttons.add_to_wish_list_button, index) return self
[ "a.mslv@outlook.com" ]
a.mslv@outlook.com
daab9dc5727a358eb8f51da3f48cb1743571831e
077c91b9d5cb1a6a724da47067483c622ce64be6
/trigger_priority_mismatch_small_mcs/replay_config.py
104c6f06cb0af0fdbcea1e53bc81c5b86fb3cdae
[]
no_license
Spencerx/experiments
0edd16398725f6fd9365ddbb1b773942e4878369
aaa98b0f67b0d0c0c826b8a1565916bf97ae3179
refs/heads/master
2020-04-03T10:11:40.671606
2014-06-11T23:55:11
2014-06-11T23:55:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,299
py
from config.experiment_config_lib import ControllerConfig from sts.topology import * from sts.control_flow import Replayer from sts.simulation_state import SimulationConfig from sts.input_traces.input_logger import InputLogger simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose openflow.discovery forwarding.l2_multi forwarding.capabilities_manager sts.util.socket_mux.pox_monkeypatcher openflow.of_01 --address=__address__ --port=__port__', label='c1', address='127.0.0.1', cwd='pox')], topology_class=MeshTopology, topology_params="num_switches=2", patch_panel_class=BufferedPatchPanel, multiplex_sockets=True, kill_controllers_on_exit=True) control_flow = Replayer(simulation_config, "experiments/trigger_priority_mismatch_small_mcs/mcs.trace.notimeouts", input_logger=InputLogger(), wait_on_deterministic_values=False, allow_unexpected_messages=False, delay_flow_mods=False, pass_through_whitelisted_messages=True, invariant_check_name='check_for_flow_entry', bug_signature="123Found")
[ "cs@cs.berkeley.edu" ]
cs@cs.berkeley.edu
b4b375c2227967f88a29066916bb8465946bfb35
511f572c5c6d0e73e3f57495c065af4a3e8b3b76
/4.19.1.py
e4f436847e7b7ae74a880ac8ec310b64be6ecf16
[]
no_license
lancecopper/python_cookbook_exercises
595b1c9dea0b5197ca951f85012821f5d6a25e1f
58f76503f01941b65fa884049b2fa9589e98bc38
refs/heads/master
2021-01-01T04:39:49.823408
2016-05-08T14:34:41
2016-05-08T14:34:41
56,480,930
0
0
null
null
null
null
UTF-8
Python
false
false
1,573
py
from ply.lex import lex from ply.yacc import yacc #Token list tokens = ['NUM', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'LPAREN', 'RPAREN'] #Ignored characters t_ignore = ' \t\n' #Token splecifications(as regexs) t_PLUS = r'\+' t_MINUS = r'-' t_TIMES = r'\*' t_DIVIDE = r'/' t_LPAREN = r'\(' t_RPAREN = r'\)' #Token processing functions def t_NUM(t): r'\d+' t.value = int(t.value) return t #Error handler def t_error(t): print('Bad character: {!r}'.format(t.value[0])) t.skip(1) #Build the lexer lexer = lex() #Grammar rules and handler functions def p_expr(p): """ expr : expr PLUS term | expr MINUS term """ if p[2] == '+': p[0] = p[1] + p[3] elif p[2] == '-': p[0] = p[1] - p[3] def p_expr_term(p): """ expr : term """ p[0] = p[1] def p_term(p): """ term : term TIMES factor | term DIVIDE factor """ if p[2] == '*': p[0] = p[1] * p[3] elif p[2] == '/': p[0] = p[1] / p[3] def p_term_factor(p): """ term : factor """ p[0] = p[1] def p_factor(p): """ factor : NUM """ p[0] = p[1] def p_factor_group(p): """ factor : LPAREN expr RPAREN """ p[0] = p[2] def p_error(p): print('Syntax error') parser = yacc() if __name__ == '__main__': print("testing!") a=parser.parse('2') b=parser.parse('2+3') c=parser.parse('2+(3+4)*5') print(a) print(b) print(c)
[ "she47637370@gamil.com" ]
she47637370@gamil.com
bb451f850f72923c84366c71aa4b2e63ca0f862f
1c53494183e0011cfa31a31dba366a41ce5e05a7
/Section 3/git_compex_use_case/fabfile.py
92c39c61b6639a9dd353920f4585673b96d95d53
[ "MIT" ]
permissive
abdallahbouhannache/Enterprise-Automation-with-Python-
ff41031469357847de4a680f5579c205b4fba894
8b8de8b56adf7df781578ee110988d915a46cb59
refs/heads/master
2021-09-15T09:46:19.959568
2018-05-30T07:00:22
2018-05-30T07:00:22
null
0
0
null
null
null
null
UTF-8
Python
true
false
829
py
from __future__ import with_statement from fabric.api import * from fabric.contrib.console import confirm env.hosts = ['localhost'] def test(): with settings(warn_only=True): result = local('./manage.py test my_app', capture=True) if result.failed and not confirm("Tests failed. Continue anyway?"): abort("Aborting at user request.") def commit(): local("git add manage.py && git commit -m'automatic message'") def push(): local("git push") def prepare_deploy(): test() commit() push() def deploy(): code_dir = '/srv/django/myproject' with settings(warn_only=True): if run("test -d %s" % code_dir).failed: run("git clone user@vcshost:/path/to/repo/.git %s" % code_dir) with cd(code_dir): run("git pull") run("touch app.wsgi")
[ "35484449+cleonb-packt@users.noreply.github.com" ]
35484449+cleonb-packt@users.noreply.github.com
7355f43c3d17c1f39e35428d76ecdd9778bd2102
d2e634676dc1d35f129b1a5c3f4c519c80e5be38
/cross_lingual/pre_process/parse_args.py
18be617d868f508cb9b8abdc5d02ab2adb7ff165
[]
no_license
livelifeyiyi/DLworks
4442b121abb17c0a8daf73c9e23c16527584a34a
6a1d44069f33c9c5a04abe641eb0970b592459a9
refs/heads/master
2020-04-11T15:39:12.291709
2019-11-10T13:47:00
2019-11-10T13:47:00
161,898,477
0
1
null
null
null
null
UTF-8
Python
false
false
4,385
py
import os from collections import OrderedDict def parse_args(args): # Parse parameters params = OrderedDict() params['train'] = args.train params['bi_train'] = args.bi_train params['dev'] = args.dev params['test'] = args.test params['model_dp'] = args.model_dp params['tag_scheme'] = args.tag_scheme params['lower'] = args.lower == 1 params['zeros'] = args.zeros == 1 params['char_dim'] = args.char_dim params['char_lstm_dim'] = args.char_lstm_dim params['char_cnn'] = args.char_cnn params['char_conv'] = args.char_conv params['word_dim'] = args.word_dim params['word_lstm_dim'] = args.word_lstm_dim # params['pre_emb'] = args.pre_emb # params['bi_pre_emb'] = args.bi_pre_emb params['all_emb'] = args.all_emb == 1 # params['feat'] = args.feat params['crf'] = args.crf == 1 params['dropout'] = args.dropout params['tagger_learning_rate'] = args.tagger_learning_rate params['tagger_optimizer'] = args.tagger_optimizer params['dis_seq_learning_rate'] = args.dis_seq_learning_rate params['dis_seq_optimizer'] = args.dis_seq_optimizer params['mapping_seq_learning_rate'] = args.mapping_seq_learning_rate params['mapping_seq_optimizer'] = args.mapping_seq_optimizer params['num_epochs'] = args.num_epochs params['batch_size'] = args.batch_size params['gpu'] = args.gpu params['cuda'] = args.cuda params['signal'] = args.signal params['target_lang'] = args.target_lang params['related_lang'] = args.related_lang params['max_vocab'] = args.max_vocab params['map_id_init'] = args.map_id_init params['map_beta'] = args.map_beta params['dis_layers'] = args.dis_layers params['dis_hid_dim'] = args.dis_hid_dim params['dis_dropout'] = args.dis_dropout params['dis_input_dropout'] = args.dis_input_dropout params['dis_steps'] = args.dis_steps params['dis_lambda'] = args.dis_lambda params['dis_most_frequent'] = args.dis_most_frequent params['dis_smooth'] = args.dis_smooth params['dis_clip_weights'] = args.dis_clip_weights params['adversarial'] = args.adversarial params['adv_epochs'] = args.adv_epochs params['adv_iteration'] = args.adv_iteration params['adv_batch_size'] = args.adv_batch_size params['map_learning_rate'] = args.map_learning_rate params['map_optimizer'] = args.map_optimizer params['dis_learning_rate'] = args.dis_learning_rate params['dis_optimizer'] = args.dis_optimizer params['lr_decay'] = args.lr_decay params['min_lr'] = args.min_lr params['lr_shrink'] = args.lr_shrink params['n_refinement'] = args.n_refinement params['dico_eval'] = args.dico_eval params['dico_method'] = args.dico_method params['dico_build'] = args.dico_build params['dico_threshold'] = args.dico_threshold params['dico_max_rank'] = args.dico_max_rank params['dico_min_size'] = args.dico_min_size params['dico_max_size'] = args.dico_max_size params['target_emb'] = args.target_emb params['related_emb'] = args.related_emb params['normalize_embeddings'] = args.normalize_embeddings params['seq_dis_smooth'] = args.seq_dis_smooth # Check parameters validity # assert os.path.isfile(args.dev) # assert os.path.isfile(args.test) assert params['char_dim'] > 0 or params['word_dim'] > 0 assert 0. <= params['dropout'] < 1.0 assert params['tag_scheme'] in ['iob', 'iobes', 'classification'] assert not params['related_emb'] or params['target_emb'] assert not params['target_emb'] or params['word_dim'] > 0 assert not params['target_emb'] or os.path.isfile(params['target_emb']) model_names = OrderedDict() model_names = {'signal': args.signal, 'tag_scheme': args.tag_scheme, 'lower': args.lower == 1, 'zeros': args.zeros == 1, 'char_dim': args.char_dim, 'char_lstm_dim': args.char_lstm_dim, 'char_conv': args.char_conv, 'word_dim': args.word_dim, 'word_lstm_dim': args.word_lstm_dim, 'target_emb': args.target_emb, 'related_emb': args.related_emb == 1, 'crf': args.crf == 1, 'dropout': args.dropout, 'tagger_learning_rate': args.tagger_learning_rate, 'num_epochs': args.num_epochs, 'batch_size': args.batch_size, 'tagger_optimizer': args.tagger_optimizer} return params, model_names
[ "xiaoya_0922@126.com" ]
xiaoya_0922@126.com
813170ffbc1c200e9d01cd3b6c7fe63ff19c4ce6
aaa4eb09ebb66b51f471ebceb39c2a8e7a22e50a
/Lista 09/exercício 13.py
5b758ff4ae3bf0877f20e01225830517e2e1e44d
[ "MIT" ]
permissive
Brenda-Werneck/Listas-CCF110
c0a079df9c26ec8bfe194072847b86b294a19d4a
271b0930e6cce1aaa279f81378205c5b2d3fa0b6
refs/heads/main
2023-09-03T09:59:05.351611
2021-10-17T00:49:03
2021-10-17T00:49:03
411,115,920
0
1
null
null
null
null
UTF-8
Python
false
false
514
py
#Crie um algoritmo que leia uma matriz A[NxN] (N ≤ 10) e calcule a respectiva matriz transposta At. N = int(input("Digite um valor: ")) if N <= 10: A = [[0 for i in range(N)] for j in range(N)] for i in range(N): for j in range(N): A[i][j] = int(input(f"Digite o valor para o índice ({i + 1}, {j + 1}): ")) At = [[0 for i in range(N)] for j in range(N)] for i in range(N): for j in range(N): At[i][j] = A[j][i] for i in range(N): print(At[i])
[ "89711195+Brenda-Werneck@users.noreply.github.com" ]
89711195+Brenda-Werneck@users.noreply.github.com
87d15f8ee0f387eb5b5b0513d59853680e217b9b
282a4529e74349989b54201cba3be4bcff5349d5
/todo/apps/activity/templatetags/activity_tags.py
8140818a58ca7766d1bdc0fd264af741a3fc2dd8
[ "WTFPL" ]
permissive
arthuralvim/todo
e34c2a92431574bacca76afda5de5dfa40391d51
a159b17f45b002b0bff9bdd70f79b27173865689
refs/heads/master
2016-09-06T09:14:02.116711
2014-06-03T11:56:02
2014-06-03T11:56:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
446
py
from django import template register = template.Library() @register.filter def attr(field, arg): s = arg.split(':') attr = s[0] value = s[1] return field.as_widget(attrs={attr: value}) @register.filter def attrs(field, args): el = args.split(';') attrs = {} for e in el: s = e.split(':') attr = s[0] value = s[1] attrs.update({attr: value}) return field.as_widget(attrs=attrs)
[ "afmalvim@gmail.com" ]
afmalvim@gmail.com
cbeacded22e0ac363e3028fad703bdf1b4a84750
e76f6fdb1a2ea89d4f38ac1ed28e50a7625e21b7
/qytdjg_learning/views/DB.py
7ce9dfc397aefc79f1d8899ca6196b489352c7fd
[]
no_license
collinsctk/qytdjg_learning
4d61a2a236f0bc4bf9be8d999352a8e3c1b87408
72a6d6153f6ca6bf9fccad76612450fdaf83d9fd
refs/heads/master
2020-03-24T19:45:31.145059
2018-07-31T06:51:14
2018-07-31T06:51:14
142,943,470
0
0
null
null
null
null
UTF-8
Python
false
false
3,556
py
#!/usr/bin/env python3 # -*- coding=utf-8 -*- # 本脚由亁颐堂现任明教教主编写,用于乾颐盾Python课程! # 教主QQ:605658506 # 亁颐堂官网www.qytang.com # 教主技术进化论拓展你的技术新边疆 # https://ke.qq.com/course/271956?tuin=24199d8a from mt.models import Movie from django.http import HttpResponse def testORM(request): result = Movie.objects.all() name = '' for x in result: name = x.name break return HttpResponse(name) def changeData(request): # 插入 m1 = Movie(name='狂暴巨兽', show_time='2018-4', type='科幻') m1.save() m2 = Movie(name='托尔斯泰', show_time='2018-4', type='科幻') m2.save() m3 = Movie(name='烦烦烦', show_time='2018-5', type='科幻') m3.save() # 修改 # m = Movie.objects.get(name='托尔斯泰') # m.name = '太空堡垒' # m.save() # 删除 # m = Movie.objects.get(name='烦烦烦') # m.delete() # 删除所有 # Movie.objects.all().delete() return HttpResponse('数据修改成功') def filter(request): # 过滤所有数据 # all = Movie.objects.all() # result = "" # for d in all: # result = result + d.name + ',' # 过滤特定对象 # ok = Movie.objects.filter(name='托尔斯泰', type='科幻') # 包含字段 ok = Movie.objects.filter(name__icontains='泰', type='科幻') return HttpResponse(ok[0].type) def oneObject(request): # filter return QuerySet # get 返回一个对象(模型对象) try: m = Movie.objects.get(name='托尔斯泰1') print(type(m)) # class 'mt.models.Movie'> print(type(Movie.objects.filter(name='托尔斯泰'))) # class 'django.db.models.query.QuerySet'> # 如果没有查询到任何数据会抛出DoesNotExist异常,或多余一条记录会抛出MultiObjectsReturned异常, except Movie.DoesNotExist: return HttpResponse('没有查到任何数据') except Movie.MultipleObjectsReturned: return HttpResponse('查询结果多余一条记录') return HttpResponse('查询成功') def dataOrder(requets): # 默认为升序, 加'-'为降序 # dataSet = Movie.objects.order_by('name') # 正序 dataSet = Movie.objects.order_by('-name') # 倒序 # dataSet = Movie.objects.order_by('-show_time', type) # 复合排序 result = '' for d in dataSet: result = result + d.name + ',' return HttpResponse(result) # 连锁查询 def multiQuery(request): dataSet = Movie.objects.filter(type__icontains='科幻').order_by('name') result = '' for d in dataSet: result = result + d.name + ',' return HttpResponse(result) # 限制返回的数量(返回查询结果集的子集) """ select * from table1 where name like '%abc' limit 1,10 # 返回从第2记录开始的10条记录,2-11条记录 """ def limitData(request): # 不支持负数索引 dataSet = Movie.objects.order_by('name')[1:3] # limit 1,2 result = '' for d in dataSet: result = result + d.name + ',' return HttpResponse(result) """ 指定更新列 m = Movie.objects.get(name='托尔斯泰') m.name = '太空堡垒' m.save() update mt_movie set name='太空堡垒',type='科幻',show_time='2018-4' where name='托尔斯泰' """ def update(request): # update mt_movie set name='太空堡垒' where name='托尔斯泰' Movie.objects.filter(name='托尔斯泰').update(name='西虹市首富') return HttpResponse('更新成功')
[ "collinsctk@qytang.com" ]
collinsctk@qytang.com
d85945315cbd2eaeee8a064729d59b1d75757973
f9c59022d1e5c60345f9bc369df5d6a86f6f64e4
/src/synthetic/create_simple.py
e16fbf952c9af26033ffd13841cafe7957a5aca0
[]
no_license
YingjingLu/Music_Onset
3530c78ac1cf370b9cbb8487aeeea829b80eaa06
99af3c6a1f06bb86e3a09375ab2a8a628e6716da
refs/heads/master
2020-05-26T18:12:40.387918
2019-06-06T03:52:55
2019-06-06T03:52:55
188,331,780
0
1
null
null
null
null
UTF-8
Python
false
false
6,075
py
from mido import Message, MetaMessage, MidiFile, MidiTrack from utils import * import numpy as np from random import randint import os import math from midi2audio import FluidSynth import os # store sound fonts in this dir ~/.fluidsynth/default_sound_font.sf2 sound_font = "gs.sf2" fs = FluidSynth( sound_font, sample_rate = 44100 ) def get_sample_index_from_tick( cur_tick, tick_per_beat, tempo, sample_rate ): beat = cur_tick / tick_per_beat ms = beat * tempo return math.ceil( ms / 1000000 * sample_rate ) def create_gaussian( array, position, window_resolution = 10 ): pass # length in miliseconds # tempo 500000 is 120 beats per minute def create_midi_file( file_name, length, instrument = 1, _range = ( 21, 108 ), simul = 1, key_signature = "C", tempo = 500000, sample_rate = 44100, include_offset = True, ticks_per_beat = 480 ): note_floor, note_ceil = get_instrument_note_range( instrument ) mid = MidiFile() mid.ticks_per_beat = ticks_per_beat track = MidiTrack() mid.tracks.append(track) num_sample = math.ceil( length / 1000000 * sample_rate ) num_beat = length / tempo num_tick = math.floor( mid.ticks_per_beat * num_beat ) # assert( num_tick % num_sample == 0, "sample not divisible by ticks" ) label = np.zeros( num_sample, dtype = np.float32 ) """ ALL IN TICKS """ # setup metadata and control track.append( MidoMessage.track_name( name = file_name ) ) track.append( MidoMessage.key_signature( key = key_signature ) ) track.append( MidoMessage.set_tempo( tempo = tempo ) ) track.append( MidoMessage.time_signature() ) track.append( MidoMessage.program_change( chan = 0, prog = instrument, dt = 0 ) ) track.append( MidoMessage.control_change( chan = 0, control = 64, value = 0, dt = 0 ) ) track.append( MidoMessage.control_change( chan = 0, control = 91, value = 48, dt = 0 ) ) track.append( MidoMessage.control_change( chan = 0, control = 10, value = 51, dt = 0 ) ) track.append( MidoMessage.control_change( chan = 0, control = 7, value = 100, dt = 0 ) ) portion = math.floor( num_tick / 8 ) start_tick = randint( portion, 2*portion ) sound_length = randint( 2*portion, 5*portion ) lower, upper = _range # if single note if simul == 1: note = randint( lower, upper ) track.append( MidoMessage.note_on( chan = 0, note = note, vel = 100, dt = start_tick ) ) track.append( MidoMessage.note_off( chan = 0, note = note, vel = 100, dt = sound_length ) ) else: # if multi note num_note = randint( 2, simul ) note_list = [] for i in range( num_note ): note = randint( lower, upper ) note_list.append( note ) note_start = True for note in note_list: if note_start: track.append( MidoMessage.note_on( chan = 0, note = note, vel = 100, dt = start_tick ) ) note_start = False else: track.append( MidoMessage.note_on( chan = 0, note = note, vel = 100, dt = 0 ) ) note_off = True for note in note_list: if note_off: track.append( MidoMessage.note_off( chan = 0, note = note, vel = 100, dt = sound_length ) ) note_off = False else: track.append( MidoMessage.note_off( chan = 0, note = note, vel = 100, dt = 0 ) ) label[ get_sample_index_from_tick( start_tick, mid.ticks_per_beat, tempo, sample_rate ) ] = 1. if include_offset: label[ get_sample_index_from_tick( start_tick + sound_length, mid.ticks_per_beat, tempo, sample_rate ) ] = 1. return mid, label name_dict = { 1:"piano", 41:"violin", 43:"cello", 47:"harp", 26:"acoustic_guitar_steel", 65:"oprano_sax" } range_dict = { 1:(21,108), 41:(55,105), 43:(36,84), 47:(24,100), 26:(52,88), 65:(59,91) } skip_list = [] os.mkdir( "train" ) if not os.path.isdir( "train" ) else print() for instrument_index, name in name_dict.items(): try: instrument = name _range = range_dict[ instrument_index ] base_dir = "train/" + instrument os.mkdir( base_dir ) if not os.path.isdir( base_dir ) else print() single_dir = base_dir + "/" + "single" os.mkdir( single_dir ) if not os.path.isdir( single_dir ) else print() sound_folder = single_dir + "/" + "mono_sound" label_folder = single_dir + "/" + "mono_label" midi_folder = single_dir + "/" + "mono_midi" os.mkdir( sound_folder ) if not os.path.isdir( sound_folder ) else print() os.mkdir( label_folder ) if not os.path.isdir( label_folder ) else print() os.mkdir( midi_folder ) if not os.path.isdir( midi_folder ) else print() for i in range( 1000 ): file_name = instrument + "_mono_" + str( i ) mid, arr = create_midi_file( file_name, length = 6000000, instrument = instrument_index, _range = _range, tempo = 441000, include_offset = False ) mid.save( midi_folder + "/" + file_name + '.mid' ) fs.midi_to_audio( midi_folder + '/' + file_name + '.mid', sound_folder + "/" + file_name + ".wav" ) np.save( label_folder + "/" + file_name + ".npy", arr ) # double_dir = base_dir + "/" + "double" # os.mkdir( double_dir ) if not os.path.isdir( double_dir ) else print() # sound_folder = double_dir + "/" + "mono_sound" # label_folder = double_dir + "/" + "mono_label" # midi_folder = double_dir + "/" + "mono_midi" # os.mkdir( sound_folder ) if not os.path.isdir( sound_folder ) else print() # os.mkdir( label_folder ) if not os.path.isdir( label_folder ) else print() # os.mkdir( midi_folder ) if not os.path.isdir( midi_folder ) else print() # for i in range( 1000 ): # file_name = instrument + "_mono_" + str( i ) # mid, arr = create_midi_file( file_name, length = 6000000, instrument = instrument_index, _range = _range, # simul = 5, tempo = 441000, include_offset = False ) # mid.save( midi_folder + "/" + file_name + '.mid' ) # fs.midi_to_audio( midi_folder + '/' + file_name + '.mid', sound_folder + "/" + file_name + ".wav" ) # np.save( label_folder + "/" + file_name + ".npy", arr ) except: skip_list.append( name ) print( "Skipping instrument", skip_list )
[ "yingjinglu2019us@gmail.com" ]
yingjinglu2019us@gmail.com
02ac0c4c993e42497f95af752fe4e03c17a76bc3
c97af07addfa42c282d6b5d6ae8c05d00f9d8c50
/Lib/asyncio/windows_utils.py
de7b71d809255b9e76caca102cfbf095f86e277c
[ "Python-2.0", "MIT" ]
permissive
khg0712/RustPython
6c5a4842f557e617c36793f9e5a735130e2fc40e
a04c19ccb0f5e7e1774d5e6f267ffed3ee27aeae
refs/heads/master
2020-11-24T05:02:01.820429
2019-12-06T22:30:58
2019-12-06T22:30:58
227,976,324
1
0
MIT
2019-12-14T06:20:46
2019-12-14T06:20:45
null
UTF-8
Python
false
false
6,915
py
""" Various Windows specific bits and pieces """ import sys if sys.platform != 'win32': # pragma: no cover raise ImportError('win32 only') import _winapi import itertools # XXX RustPython TODO: msvcrt # import msvcrt import os import socket import subprocess import tempfile import warnings __all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle'] # Constants/globals BUFSIZE = 8192 PIPE = subprocess.PIPE STDOUT = subprocess.STDOUT _mmap_counter = itertools.count() if hasattr(socket, 'socketpair'): # Since Python 3.5, socket.socketpair() is now also available on Windows socketpair = socket.socketpair else: # Replacement for socket.socketpair() def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): """A socket pair usable as a self-pipe, for Windows. Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. """ if family == socket.AF_INET: host = '127.0.0.1' elif family == socket.AF_INET6: host = '::1' else: raise ValueError("Only AF_INET and AF_INET6 socket address " "families are supported") if type != socket.SOCK_STREAM: raise ValueError("Only SOCK_STREAM socket type is supported") if proto != 0: raise ValueError("Only protocol zero is supported") # We create a connected TCP socket. Note the trick with setblocking(0) # that prevents us from having to create a thread. lsock = socket.socket(family, type, proto) try: lsock.bind((host, 0)) lsock.listen(1) # On IPv6, ignore flow_info and scope_id addr, port = lsock.getsockname()[:2] csock = socket.socket(family, type, proto) try: csock.setblocking(False) try: csock.connect((addr, port)) except (BlockingIOError, InterruptedError): pass csock.setblocking(True) ssock, _ = lsock.accept() except: csock.close() raise finally: lsock.close() return (ssock, csock) # Replacement for os.pipe() using handles instead of fds def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE): """Like os.pipe() but with overlapped support and using handles not fds.""" address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' % (os.getpid(), next(_mmap_counter))) if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = bufsize, bufsize else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, bufsize openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE if overlapped[0]: openmode |= _winapi.FILE_FLAG_OVERLAPPED if overlapped[1]: flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED else: flags_and_attribs = 0 h1 = h2 = None try: h1 = _winapi.CreateNamedPipe( address, openmode, _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, flags_and_attribs, _winapi.NULL) ov = _winapi.ConnectNamedPipe(h1, overlapped=True) ov.GetOverlappedResult(True) return h1, h2 except: if h1 is not None: _winapi.CloseHandle(h1) if h2 is not None: _winapi.CloseHandle(h2) raise # Wrapper for a pipe handle class PipeHandle: """Wrapper for an overlapped pipe handle which is vaguely file-object like. The IOCP event loop can use these instead of socket objects. """ def __init__(self, handle): self._handle = handle def __repr__(self): if self._handle is not None: handle = 'handle=%r' % self._handle else: handle = 'closed' return '<%s %s>' % (self.__class__.__name__, handle) @property def handle(self): return self._handle def fileno(self): if self._handle is None: raise ValueError("I/O operatioon on closed pipe") return self._handle def close(self, *, CloseHandle=_winapi.CloseHandle): if self._handle is not None: CloseHandle(self._handle) self._handle = None def __del__(self): if self._handle is not None: warnings.warn("unclosed %r" % self, ResourceWarning, source=self) self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() # Replacement for subprocess.Popen using overlapped pipe handles class Popen(subprocess.Popen): """Replacement for subprocess.Popen using overlapped pipe handles. The stdin, stdout, stderr are None or instances of PipeHandle. """ def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds): assert not kwds.get('universal_newlines') assert kwds.get('bufsize', 0) == 0 stdin_rfd = stdout_wfd = stderr_wfd = None stdin_wh = stdout_rh = stderr_rh = None if stdin == PIPE: stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True) stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY) else: stdin_rfd = stdin if stdout == PIPE: stdout_rh, stdout_wh = pipe(overlapped=(True, False)) stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) else: stdout_wfd = stdout if stderr == PIPE: stderr_rh, stderr_wh = pipe(overlapped=(True, False)) stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) elif stderr == STDOUT: stderr_wfd = stdout_wfd else: stderr_wfd = stderr try: super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd, stderr=stderr_wfd, **kwds) except: for h in (stdin_wh, stdout_rh, stderr_rh): if h is not None: _winapi.CloseHandle(h) raise else: if stdin_wh is not None: self.stdin = PipeHandle(stdin_wh) if stdout_rh is not None: self.stdout = PipeHandle(stdout_rh) if stderr_rh is not None: self.stderr = PipeHandle(stderr_rh) finally: if stdin == PIPE: os.close(stdin_rfd) if stdout == PIPE: os.close(stdout_wfd) if stderr == PIPE: os.close(stderr_wfd)
[ "33094578+coolreader18@users.noreply.github.com" ]
33094578+coolreader18@users.noreply.github.com
38a90db6d164678ec8455e591bfe98bf3218361d
fe096ed06c34ae3adf958760886dd5f2fc64fa90
/reverseInteger.py
926bf94629cb20ed9c275d76abe51bbefa820f53
[]
no_license
harshmalviya7/LeetCode_Coding_Questions
c9d8a93f4a5664dcf57098cd58f3f1d95667b0c0
47edb51e55e390861ed539972d8bf66b41b4cdd7
refs/heads/master
2023-08-23T01:09:40.110710
2021-10-21T12:53:36
2021-10-21T12:53:36
373,072,675
4
0
null
null
null
null
UTF-8
Python
false
false
361
py
# 7. Reverse Integer # https://leetcode.com/problems/reverse-integer/ class Solution: def reverse(self, x: int) -> int: def no(x): a=0 while(x>0): a=a*10+x%10 x=x//10 return 0 if a> 0x7fffffff else a if x<0: return -1*no(abs(x)) else: return no(x)
[ "harsh.malviya.9869@gmail.com" ]
harsh.malviya.9869@gmail.com
c766d0590cce8bc6d1c6f198c01824d8154a8701
26dbbed416176ec2e929d54c689d5206357f6fb6
/course/migrations/0003_post_author.py
953378e3d0edbaa11fa3ea9da57ff7b7b4762560
[]
no_license
coderrohanpahwa/dynamic_blog
12084d8f3336525ef400d6891ec231a5260c90f7
081b32691c3edd9208c654a6c101f786ff514a3f
refs/heads/master
2022-12-22T09:46:30.059928
2020-09-15T17:06:48
2020-09-15T17:06:48
295,687,840
0
1
null
2020-09-30T19:15:49
2020-09-15T10:07:25
Python
UTF-8
Python
false
false
382
py
# Generated by Django 3.1 on 2020-09-14 17:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('course', '0002_post'), ] operations = [ migrations.AddField( model_name='post', name='author', field=models.CharField(default='Rohan', max_length=50), ), ]
[ "coderrohanpahwa@gmail.com" ]
coderrohanpahwa@gmail.com
ffcae26716030284c8ceff004efda0e5f3cff23d
20da592bc2e37f18e172aa1ad8219b773e30290c
/plotConfiguration/WH_SS/Full2017nanov6/cuts.py
acec7312774cbfe76f166f6eae0f02438edbb52d
[]
no_license
LambdaFramework/Analyzer-Nano
000721114a7f568474406727c2dbf614f49dd81b
39cfd650037ac4e852d5b5cba36f94782c5dcdd9
refs/heads/master
2022-11-27T12:41:31.007899
2020-08-03T09:00:43
2020-08-03T09:00:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,856
py
# cuts supercut = 'mll>12 \ && Lepton_pt[0]>25 && Lepton_pt[1]>20 \ && bVeto \ && PuppiMET_pt > 30 \ ' cuts={} cuts['OSee'] = 'nCleanJet>=2 && nLepton>=2 && isbVeto && ((Lepton_pdgId[0]*Lepton_pdgId[1]==11*-11)||(Lepton_pdgId[0]*Lepton_pdgId[1]==-11*11)) \ && Electron_pt[Lepton_electronIdx[0]]>25 && Electron_pt[Lepton_electronIdx[1]]>20 \ && Electron_pfRelIso03_all[Lepton_electronIdx[0]]<0.15 && Electron_pfRelIso03_all[Lepton_electronIdx[1]]<0.15 \ && mll>30 && ht>100 \ ' ''' ## SR 2jets cuts['hww2l2v_13TeV_of2j_WH_SS_uu_2j'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == 13*13) \ && nLepton==2 \ && Alt$(CleanJet_pt[0],0)>30 \ && Alt$(CleanJet_pt[1],0)>30 \ && mjj < 100 \ && abs(Lepton_eta[0] - Lepton_eta[1])<2.0 \ && abs(mll-91.2)>15 \ && mlljj20_whss > 50. \ ' cuts['hww2l2v_13TeV_of2j_WH_SS_eu_2j'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == 11*13) \ && nLepton==2 \ && Alt$(CleanJet_pt[0],0)>30 \ && Alt$(CleanJet_pt[1],0)>30 \ && mjj < 100 \ && abs(Lepton_eta[0] - Lepton_eta[1])<2.0 \ && mlljj20_whss > 50. \ ' ## SR 1jet cuts['hww2l2v_13TeV_of2j_WH_SS_uu_1j'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == 13*13) \ && nLepton==2 \ && Alt$(CleanJet_pt[0],0)>30 \ && Alt$(CleanJet_pt[1],0)<30 \ && abs(Lepton_eta[0] - Lepton_eta[1])<2.0 \ && abs(mll-91.2)>15 \ && mlljj20_whss > 50. \ ' cuts['hww2l2v_13TeV_of2j_WH_SS_eu_1j'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == 11*13) \ && nLepton==2 \ && Alt$(CleanJet_pt[0],0)>30 \ && Alt$(CleanJet_pt[1],0)<30 \ && abs(Lepton_eta[0] - Lepton_eta[1])<2.0 \ && mlljj20_whss > 50. \ ' ''' ### WZ CR ''' cuts['hww2l2v_13TeV_of2j_WH_SS_WZ_1j'] = '((Lepton_pdgId[0]*Lepton_pdgId[1] == 13*13) || (Lepton_pdgId[0]*Lepton_pdgId[1] == 11*13))\ && (nLepton>=3 && Alt$(Lepton_pt[3],0)<10) \ && Lepton_pt[2]>15 \ && Alt$(CleanJet_pt[0],0)>30 \ && Alt$(CleanJet_pt[1],0)<30 \ && WH3l_mlll > 100 \ && abs(WH3l_chlll) == 1 \ ' cuts['hww2l2v_13TeV_of2j_WH_SS_WZ_2j'] = '((Lepton_pdgId[0]*Lepton_pdgId[1] == 13*13) || (Lepton_pdgId[0]*Lepton_pdgId[1] == 11*13)) \ && (nLepton>=3 && Alt$(Lepton_pt[3],0)<10) \ && Lepton_pt[2]>15 \ && Alt$(CleanJet_pt[0],0)>30 \ && Alt$(CleanJet_pt[1],0)>30 \ && WH3l_mlll > 100 \ && abs(WH3l_chlll) == 1 \ ' '''
[ "siew.yan.hoh@cern.ch" ]
siew.yan.hoh@cern.ch
2b600a43cf5e371445358316b0fd5c47d22183da
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/59/usersdata/189/29370/submittedfiles/testes.py
61a1f9b2b067f7ce6955f2759012caf516befc2c
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
374
py
# -*- coding: utf-8 -*- import math f=float(input('digite f:')) l=float(input('digite l:')) q=float(input('digite q:')) deltaH=float(input('digite deltaH:')) v=float(input('digite v:')) d=((8*f*l)*(q*q)/((math.pi*math.pi)*(9.81*deltaH)))**(1/5) rey=(4*q)/(math.pi*d*v) k=0.25/(math.log10(0.000002/3.7*d+5.74/reu**0.9))**2 print('%.4f' %d) print('%.4f' %rey) print('%.4f' %k)
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
6787d8861d092534356152dbdb92a603032e13df
9a69af2f759f336c039865c3070d6f089b5f99c5
/src/users/urls.py
2920b743c267f900eb4733933316205ab4235c29
[]
no_license
henryfrstr/clarusway_quiz
ae108a7b19713dae0566d31dc35a6d0b221160d6
0788624bc9ec409a6381d2ad2210f0a92d91d5fb
refs/heads/master
2023-02-16T04:51:01.574223
2021-01-16T18:04:27
2021-01-16T18:04:27
329,702,753
1
4
null
null
null
null
UTF-8
Python
false
false
143
py
from django.urls import path from .views import RegisterView urlpatterns = [ path("register/", RegisterView.as_view(), name="register") ]
[ "63148122+henryfrstr@users.noreply.github.com" ]
63148122+henryfrstr@users.noreply.github.com
bc8cd076fe7eadc1630d631831ac486840b85d24
d1742451b25705fc128acc245524659628ab3e7d
/Hacker Rank/Circular Array Rotation.py
3e38d7459203e8054ccb009e6296f48f481c5dee
[]
no_license
Shovon588/Programming
ebab793a3c97aedddfcad5ea06e7e22f5c54a86e
e4922c9138998358eed09a1be7598f9b060c685f
refs/heads/master
2022-12-23T18:29:10.141117
2020-10-04T17:29:32
2020-10-04T17:29:32
256,915,133
1
0
null
null
null
null
UTF-8
Python
false
false
168
py
n, k , q = map(int,input().split()) a = list(map(int,input().split())) for i in range(q): x = int(input()) x -= k if x<0: x += n print(a[x])
[ "mainulislam588@gmail.com" ]
mainulislam588@gmail.com
a1452afad856f94cafa5affff3204f3068558ba1
5b6b2018ab45cc4710cc5146040bb917fbce985f
/78_longest-common-prefix/longest-common-prefix.py
6572b867bf8bc37e729ca4c4843bc0ed87c6fc1e
[]
no_license
ultimate010/codes_and_notes
6d7c7d42dcfd84354e6fcb5a2c65c6029353a328
30aaa34cb1c840f7cf4e0f1345240ac88b8cb45c
refs/heads/master
2021-01-11T06:56:11.401869
2016-10-30T13:46:39
2016-10-30T13:46:39
72,351,982
0
0
null
null
null
null
UTF-8
Python
false
false
806
py
# coding:utf-8 ''' @Copyright:LintCode @Author: ultimate010 @Problem: http://www.lintcode.com/problem/longest-common-prefix @Language: Python @Datetime: 16-05-19 13:22 ''' class Solution: # @param strs: A list of strings # @return: The longest common prefix def longestCommonPrefix(self, strs): # write your code here if len(strs) == 0: return '' subStr = strs[0] for pos in range(len(subStr)): subStr = strs[0][0:pos+1] done = False for pos in range(1, len(strs)): if not strs[pos].startswith(subStr): done = True break if done: subStr = subStr[:-1] break return subStr
[ "ultimate010@gmail.com" ]
ultimate010@gmail.com
a0ccbf9ce3fca0cb7257b67b8b9efa6a5a764607
a275cec1fddb6e034b4e9df72f8039536c009990
/codes/leetcode/jump-game.py
4526b397d6eea0cef6435139c00d07e5f17ed4b2
[]
no_license
taoste/dirtysalt
a3cbd16710c81de65f00aa919f4e67a1fc66d226
bd68294fb7727d598ea1c8bf0a559247e07c1aea
refs/heads/master
2021-08-24T10:44:47.607924
2017-12-09T08:47:12
2017-12-09T08:47:12
113,807,519
1
0
null
null
null
null
UTF-8
Python
false
false
613
py
#!/usr/bin/env python # coding:utf-8 # Copyright (C) dirlt class Solution(object): def canJump(self, nums): """ :type nums: List[int] :rtype: bool """ st = [1 << 31] * len(nums) st[0] = 0 for i in xrange(0, len(nums)): v = nums[i] # range = [i + 1, i + v] for j in xrange(min(len(nums) - 1, i + v), i, -1): if (st[i] + 1) < st[j]: st[j] = st[i] + 1 else: # print('prune') break return st[len(nums) - 1] != (1 << 31)
[ "dirtysalt1987@gmail.com" ]
dirtysalt1987@gmail.com
25333131e13d1ee7e948085d5402a8cf5caee001
8d29fd856250e746f19e086975e83d2dea2cf6a3
/ResourceStatusSystem/Client/test/Test_ResourceStatusClient/Test_Unit_withDB.py
aec1f7ea0f55ffd3b55801e7ffdf86002f39bbea
[]
no_license
hanyl/DIRAC
048c749154192e3940e17b24396afe0e667444b2
82eb56888fc039f94ba1033ea4b6d3ad503bf96e
refs/heads/master
2021-01-16T00:23:00.344192
2013-01-19T00:01:05
2013-01-19T00:02:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
795
py
################################################################################ # $HeadURL $ ################################################################################ __RCSID__ = "$Id: $" ''' Set the fixture needed to run this test. ''' from DIRAC.ResourceStatusSystem.Client.test.Test_ResourceStatusClient import fixtures _fixture = fixtures.Description_withDB ################################################################################ ''' Add test cases to the suite ''' from DIRAC.ResourceStatusSystem.Client.test.Test_ResourceStatusClient.TestCase_Unit import TestCase_Unit Test_Unit_withDB = TestCase_Unit ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
[ "mario.ubeda.garcia@cern.ch" ]
mario.ubeda.garcia@cern.ch
f8959fe24507665771a88e6aecc9952fc4c48693
3aa63e91138b03cfed43b614dc11b90b1b12a395
/tests/python/py4j_signals_test.py
8696aa82f864214cbdd76dba6af8b02f0fa90a55
[ "BSD-3-Clause" ]
permissive
karpierz/jtypes.py4j
d73ba4c062fa8415bd9e486156766fbecd0445c7
1bf48c022357c558da4d0df45fe4a0100df99a99
refs/heads/master
2021-05-09T08:07:48.050913
2018-11-13T15:55:42
2018-11-13T15:55:42
119,380,797
0
0
null
null
null
null
UTF-8
Python
false
false
6,556
py
# -*- coding: UTF-8 -*- from __future__ import unicode_literals, absolute_import from collections import defaultdict import unittest from jt.py4j.java_gateway import ( server_connection_started, server_connection_stopped, server_started, server_stopped, pre_server_shutdown, post_server_shutdown, JavaGateway, GatewayParameters, CallbackServerParameters) from jt.py4j.clientserver import ( ClientServer, JavaParameters, PythonParameters) from .client_server_test import ( # <AK> was: from py4j.tests. clientserver_example_app_process) from .java_callback_test import ( # <AK> was: from py4j.tests. IHelloImpl, gateway_example_app_process) from .py4j_callback_recursive_example import ( # <AK> was: from py4j.tests. HelloState) class MockListener(object): def __init__(self, test_case): self.test_case = test_case self.received = defaultdict(int) def started(self, sender, **kwargs): self.test_case.assertTrue(kwargs["server"] is not None) self.received["started"] += 1 def connection_started(self, sender, **kwargs): self.test_case.assertTrue(kwargs["connection"] is not None) self.received["connection_started"] += 1 def connection_stopped(self, sender, **kwargs): self.test_case.assertTrue(kwargs["connection"] is not None) self.received["connection_stopped"] += 1 def stopped(self, sender, **kwargs): self.test_case.assertTrue(kwargs["server"] is not None) self.received["stopped"] += 1 def pre_shutdown(self, sender, **kwargs): self.test_case.assertTrue(kwargs["server"] is not None) self.received["pre_shutdown"] += 1 def post_shutdown(self, sender, **kwargs): self.test_case.assertTrue(kwargs["server"] is not None) self.received["post_shutdown"] += 1 @unittest.skip("jt.py4j: !!!") class JavaGatewayTest(unittest.TestCase): def test_all_regular_signals_auto_start(self): listener = MockListener(self) with gateway_example_app_process(None): server_started.connect(listener.started) gateway = JavaGateway( gateway_parameters=GatewayParameters(), callback_server_parameters=CallbackServerParameters()) server_stopped.connect( listener.stopped, sender=gateway.get_callback_server()) server_connection_started.connect( listener.connection_started, sender=gateway.get_callback_server()) server_connection_stopped.connect( listener.connection_stopped, sender=gateway.get_callback_server()) pre_server_shutdown.connect( listener.pre_shutdown, sender=gateway.get_callback_server()) post_server_shutdown.connect( listener.post_shutdown, sender=gateway.get_callback_server()) example = gateway.entry_point.getNewExample() impl = IHelloImpl() self.assertEqual("This is Hello!", example.callHello(impl)) gateway.shutdown() self.assertEqual(1, listener.received["started"]) self.assertEqual(1, listener.received["stopped"]) self.assertEqual(1, listener.received["pre_shutdown"]) self.assertEqual(1, listener.received["post_shutdown"]) self.assertEqual(1, listener.received["connection_started"]) self.assertEqual(1, listener.received["connection_stopped"]) @unittest.skip("jt.py4j: !!!") class ClientServerTest(unittest.TestCase): def test_all_regular_signals(self): listener = MockListener(self) server_started.connect(listener.started) hello_state = HelloState() client_server = ClientServer( JavaParameters(), PythonParameters(), hello_state) server_stopped.connect( listener.stopped, sender=client_server.get_callback_server()) server_connection_started.connect( listener.connection_started, sender=client_server.get_callback_server()) server_connection_stopped.connect( listener.connection_stopped, sender=client_server.get_callback_server()) pre_server_shutdown.connect( listener.pre_shutdown, sender=client_server.get_callback_server()) post_server_shutdown.connect( listener.post_shutdown, sender=client_server.get_callback_server()) with clientserver_example_app_process(True): client_server.shutdown() self.assertEqual(1, listener.received["started"]) self.assertEqual(1, listener.received["stopped"]) self.assertEqual(1, listener.received["pre_shutdown"]) self.assertEqual(1, listener.received["post_shutdown"]) self.assertEqual(1, listener.received["connection_started"]) self.assertEqual(1, listener.received["connection_stopped"]) def test_signals_started_from_python(self): listener = MockListener(self) with clientserver_example_app_process(): server_started.connect(listener.started) client_server = ClientServer( JavaParameters(), PythonParameters()) example = client_server.entry_point.getNewExample() impl = IHelloImpl() self.assertEqual("This is Hello!", example.callHello(impl)) server_stopped.connect( listener.stopped, sender=client_server.get_callback_server()) server_connection_started.connect( listener.connection_started, sender=client_server.get_callback_server()) server_connection_stopped.connect( listener.connection_stopped, sender=client_server.get_callback_server()) pre_server_shutdown.connect( listener.pre_shutdown, sender=client_server.get_callback_server()) post_server_shutdown.connect( listener.post_shutdown, sender=client_server.get_callback_server()) client_server.shutdown() self.assertEqual(1, listener.received["started"]) self.assertEqual(1, listener.received["stopped"]) self.assertEqual(1, listener.received["pre_shutdown"]) self.assertEqual(1, listener.received["post_shutdown"]) # Connection initiated from JavaClient, so no signal sent. self.assertEqual(0, listener.received["connection_started"]) self.assertEqual(0, listener.received["connection_stopped"])
[ "akarpierz@gmail.com" ]
akarpierz@gmail.com
6341b6eb5cb3884055973d5c3798b6f351e076eb
cd876d32aa66112892dc9550837ad843e3e03afd
/env_carzone/Lib/site-packages/django/contrib/postgres/apps.py
55b9aa66774ce3be3618cf456cc52abf25d0aa3e
[ "BSD-3-Clause" ]
permissive
viplavdube/Car-Yard-App
7665b7e6e54f3b0e4a4da563151f85d65c225cef
65381a50f828e80f31d25d4f35e497f51c2d224d
refs/heads/master
2023-04-19T03:49:18.991604
2021-04-27T17:51:10
2021-04-27T17:51:10
349,094,392
0
0
null
null
null
null
UTF-8
Python
false
false
3,112
py
from psycopg2.extras import ( DateRange, DateTimeRange, DateTimeTZRange, NumericRange, ) from django.apps import AppConfig from django.db import connections from django.db.backends.signals import connection_created from django.db.migrations.writer import MigrationWriter from django.db.models import CharField, TextField from django.test.signals import setting_changed from django.utils.translation import gettext_lazy as _ from .lookups import SearchLookup, TrigramSimilar, Unaccent from .serializers import RangeSerializer from .signals import register_type_handlers RANGE_TYPES = (DateRange, DateTimeRange, DateTimeTZRange, NumericRange) def uninstall_if_needed(setting, value, enter, **kwargs): """ Undo the effects of PostgresConfig.ready() when django.contrib.postgres is "uninstalled" by override_settings(). """ if ( not enter and setting == "INSTALLED_APPS" and "django.contrib.postgres" not in set(value) ): connection_created.disconnect(register_type_handlers) CharField._unregister_lookup(Unaccent) TextField._unregister_lookup(Unaccent) CharField._unregister_lookup(SearchLookup) TextField._unregister_lookup(SearchLookup) CharField._unregister_lookup(TrigramSimilar) TextField._unregister_lookup(TrigramSimilar) # Disconnect this receiver until the next time this app is installed # and ready() connects it again to prevent unnecessary processing on # each setting change. setting_changed.disconnect(uninstall_if_needed) MigrationWriter.unregister_serializer(RANGE_TYPES) class PostgresConfig(AppConfig): name = "django.contrib.postgres" verbose_name = _("PostgreSQL extensions") def ready(self): setting_changed.connect(uninstall_if_needed) # Connections may already exist before we are called. for conn in connections.all(): if conn.vendor == "postgresql": conn.introspection.data_types_reverse.update( { 3802: "django.contrib.postgres.fields.JSONField", 3904: "django.contrib.postgres.fields.IntegerRangeField", 3906: "django.contrib.postgres.fields.DecimalRangeField", 3910: "django.contrib.postgres.fields.DateTimeRangeField", 3912: "django.contrib.postgres.fields.DateRangeField", 3926: "django.contrib.postgres.fields.BigIntegerRangeField", } ) if conn.connection is not None: register_type_handlers(conn) connection_created.connect(register_type_handlers) CharField.register_lookup(Unaccent) TextField.register_lookup(Unaccent) CharField.register_lookup(SearchLookup) TextField.register_lookup(SearchLookup) CharField.register_lookup(TrigramSimilar) TextField.register_lookup(TrigramSimilar) MigrationWriter.register_serializer(RANGE_TYPES, RangeSerializer)
[ "viplav45@gmail.com" ]
viplav45@gmail.com
76a0bbfd022c6f4c1d86600c0f0904057ee9bda2
ce819ddd76427722d967e06190fc24ac98758009
/rename_text.py
ae28d659413d875c5c7e82598cf7ca20e2735792
[]
no_license
huilizhou/Deeplearning_Python_DEMO
cb4164d21899757a4061836571b389dad0e63094
0a2898122b47b3e0196966a2fc61468afa99f67b
refs/heads/master
2021-08-16T10:28:51.992892
2020-04-04T08:26:07
2020-04-04T08:26:07
148,308,575
0
0
null
null
null
null
UTF-8
Python
false
false
472
py
import os # 输入新的文件名 path = r"D:\11Graduate program\2019(5)huili_cloud_data_operate\test\1" filelist = os.listdir(path) count = 1 for file in filelist: print(file) for file in filelist: Olddir = os.path.join(path, file) if os.path.isdir(Olddir): continue filename = os.path.splitext(file)[0] filetype = os.path.splitext(file)[1] Newdir = os.path.join(path, str(count) + filetype) os.rename(Olddir, Newdir) count += 1
[ "2540278344@qq.com" ]
2540278344@qq.com
6c8d6ad8b9978edd217bc465cff7ad8587cc0f93
05ba1957e63510fd8f4f9a3430ec6875d9ecb1cd
/.history/fh/a_20200817000254.py
f0c59d004ec636878ebb5c8b26bca4ded5475ef2
[]
no_license
cod-lab/try
906b55dd76e77dbb052603f0a1c03ab433e2d4d1
3bc7e4ca482459a65b37dda12f24c0e3c71e88b6
refs/heads/master
2021-11-02T15:18:24.058888
2020-10-07T07:21:15
2020-10-07T07:21:15
245,672,870
0
0
null
null
null
null
UTF-8
Python
false
false
544
py
import fileinput as fi # for line in fi.FileInput("a.md",inplace=1): # print(line) # for i,line in enumerate(fi.FileInput("a.md",inplace=1)): # # for line in fi.FileInput("a.md",inplace=1): # # print(line, end='') # if 37<i<43: # # print(i+1, line, end='') # # print("type of line: ",type(line)) # # line="" # print("deleted") temp={} for line in fi.input("a.md",inplace=1): if 38<fi.lineno()<44: # line=line+ temp[fi.lineno()] = line print(temp) # print(temp)
[ "arihant806@gmail.com" ]
arihant806@gmail.com
f29b21a1e7edffc022ca91ee7d3c3b2e2dbef0ee
7792b03540784a0d28073899dd4ad78689e9a9fb
/ProxyPool/schedules/check_available.py
45aeed3af5049802012d255d7e782367cef804fb
[]
no_license
ayiis/coding
3b1362f813a22a7246af3725162cfb53dea2f175
c73e4622e1811cc3fd8729a92df6537bd73dc802
refs/heads/master
2021-06-02T14:55:38.451288
2021-04-26T08:39:16
2021-04-26T08:39:16
134,660,001
0
0
null
2020-06-05T04:03:58
2018-05-24T04:14:14
CSS
UTF-8
Python
false
false
2,072
py
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- import tornado, tornado.gen, tornado.web from tornado.options import define, options import datetime import config import do_check from common import my_logger logging = my_logger.Logger("schedules.check_available.py", False, True, True) setting = { "running": False, # when i was running, Lock me "db": None, "count_remove": 0, "count_update": 0, } @tornado.gen.coroutine def do_check_available(proxy_item, ip_data): if ip_data: setting["count_update"] += 1 yield setting["db"][config.setting["available_pool"]["db_name"]].update({ "_id": proxy_item["_id"], }, { "$set": ip_data }) else: setting["count_remove"] += 1 # move to unavailable_pool yield [ setting["db"][config.setting["available_pool"]["db_name"]].remove({"_id": proxy_item["_id"]}), setting["db"][config.setting["unavailable_pool"]["db_name"]].update({ "proxy_host": proxy_item["proxy_host"], "proxy_port": proxy_item["proxy_port"], }, { x:proxy_item[x] for x in proxy_item if x != "_id" }, upsert=True, multi=False) ] @tornado.gen.coroutine def do(db): logging.info("Job check_available start!") if setting["running"] == True: logging.warn("Another Job check_available still running!") while setting["running"] == True: yield tornado.gen.sleep(1) setting["db"] = db job_check = do_check.DoCheck({ "collection": setting["db"][config.setting["available_pool"]["db_name"]], "callback_func": do_check_available, "page_size":config.setting["available_pool"]["count"] * 2, "timeout":config.setting["available_pool"]["timeout"], }) try: setting["running"] = True yield job_check.do() except: logging.my_exc("Exception on check_available.") finally: setting["running"] = False logging.info("Job check_available Done!") logging.info(setting)
[ "ayiis@126.com" ]
ayiis@126.com
0d17ac87bad6a6dd7c52846005bd600179e5ad9e
f160d992d0ea5fa4e36af0025b5637c8962f2a29
/achihuo-mini/achihuo_mini/adapter.py
0901a2ddd7aa3613733d0e09659843fc1a5050eb
[]
no_license
Zachypentakill/Afanti_tiku
369dde43a32cecb136eb1207bf4223f6decd9843
aebee5b3d8dce76f95620cb52fda5a0f19965945
refs/heads/master
2021-07-11T15:59:03.099600
2017-10-11T10:27:43
2017-10-11T10:27:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,245
py
import os import json import signal from functools import wraps from .models import CacheInferface, QueueInferface from .arg import parse_arguments from .exceptions import NotCommand from .utils import ( md5_string, serialize_obj, unserialize_obj ) from .settings import ( LOCAL_CACHE_DIR, PIDS, KILL_SIGNAL, KILL_TERM, ) def hash_key(func): @wraps(func) def wrapper(*args): self = args[0] h = md5_string(args[1]) rs = func(self, h) return rs return wrapper class Adapter: def __init__(self, queue, cache): assert isinstance(queue, QueueInferface) assert isinstance(cache, CacheInferface) if not cache.initiated: cache.initiate() self._queue = queue self._cache = cache self._current_tasks = {} self.tdx = 0 @property def queue(self): return self._queue @property def cache(self): return self._cache def parse_args(self, argv): args = parse_arguments(argv) if not args.xxx: raise NotCommand() args.comd = args.xxx[0] args.opts = args.xxx[1:] return args def register(self, namespace): self._cache.register(namespace) def get_pids(self, script_filename): pid_file = os.path.join(LOCAL_CACHE_DIR, script_filename + '.pids.json') if not os.path.exists(pid_file): return dict(PIDS) pids = json.load(open(pid_file)) return pids def save_pids(self, pids, script_filename): pid_file = os.path.join(LOCAL_CACHE_DIR, script_filename + '.pids.json') if not os.path.exists(LOCAL_CACHE_DIR): os.makedirs(LOCAL_CACHE_DIR) with open(pid_file, 'w') as fd: json.dump(pids, fd) def kill(self, pid): os.kill(pid, KILL_SIGNAL.value) def restore_current_tasks(self): ''' put all current tasks to queue ''' for k, v in self._current_tasks.items(): self._queue.put(v, priority=True) # max_priority def get_task(self): ''' get one task from queue ''' task = self._queue.get() if not task: return 0, task else: tdx = self.tdx self.tdx += 1 # add task to current tasks self._current_tasks[tdx] = task u_task = unserialize_obj(task) return tdx, u_task def task_over(self, tdx): ''' asynchronous task is over, then remove it from current tasks ''' try: del self._current_tasks[tdx] except Exception: pass def add_task(self, task, priority=0): ''' add one task to queue ''' s_task = serialize_obj(task) self._queue.put(s_task, priority=priority) @hash_key def task_done(self, task_name): ''' task is done, then save it to task_table ''' self._cache.table_set(0, task_name, 1) @hash_key def is_task_done(self, task_name): ''' task_name in task_table ''' return self._cache.in_table(0, task_name) @hash_key def queue_add(self, task_name): ''' add one 'task_name' to current_task_table ''' self._cache.table_incr(1, task_name, 1) @hash_key def is_in_queue(self, task_name): ''' task_name in current_task_table ''' return self._cache.in_table(1, task_name) @hash_key def queue_remove(self, task_name): ''' subtract one 'task_name' from current_task_table ''' val = self._cache.table_incr(1, task_name, -1) if not val or val < 1: self._cache.table_del(1, task_name) @hash_key def miss_task(self, task_name): ''' ignore one task, remove it from task_table and current_task_table ''' self._cache.table_del(0, task_name) self._cache.table_del(1, task_name) def clear_cache(self): self._cache.table_clear(0) self._cache.table_clear(1) def clear_queue(self): self._queue.delete()
[ "yanfeng.li@lejent.com" ]
yanfeng.li@lejent.com
eba7e3aebbc608c07fc39591f5589a89cda60824
05709017c8f6f329c939b5984b02704c34b1119d
/hurst.py
12ebcf4f4d0d413229129edee3837fb15b0aaf2f
[]
no_license
fndjjx/emd
4fed1f09e1657bf7b31421365d88f6593ffe8b9c
d12b7cf3182d1e3bf43a706943f50298684cb72a
refs/heads/master
2021-01-19T05:15:28.908832
2020-06-16T06:55:17
2020-06-16T06:55:17
61,512,684
0
1
null
null
null
null
UTF-8
Python
false
false
768
py
from numpy.fft import fft from numpy.random import randn from numpy import zeros, floor, log10, log, mean, array, sqrt, vstack, cumsum, ones, log2, std from numpy.linalg import svd, lstsq import time ######################## Functions contributed by Xin Liu ################# def hurst(X): N = len(X) T = array([float(i) for i in xrange(1,N+1)]) Y = cumsum(X) Ave_T = Y/T S_T = zeros((N)) R_T = zeros((N)) for i in xrange(1,N): S_T[i] = std(X[:i+1]) X_T = Y - T * Ave_T[i] R_T[i] = max(X_T[:i + 1]) - min(X_T[:i + 1]) R_T[0] = R_T[1] S_T[0] = S_T[1] R_S = R_T/S_T R_S = log(R_S) n = log(T).reshape(N, 1) H = lstsq(n[1:], R_S[1:])[0] return H[0] if __name__=="__main__": a = randn(4096) print hurst(a)
[ "thunderocean@163.com" ]
thunderocean@163.com
d2ca3476978606ac4c7790dfc6731e1e08443cb3
6c1527b2dc3f944b8907d0de5bda6cdfbaeb1f7f
/otree-core-master/otree/bots/__init__.pyi
10d160cfdad49ca7bf4a07ebd7a9b551ac431221
[ "MIT" ]
permissive
dcthomas4679/otree
f0a9204b12cd395e55fd9b77ac90584c2cd3c049
363a05d2f70f9225628e4857473dedcb449018dc
refs/heads/master
2021-06-23T20:07:02.499724
2020-11-18T15:32:30
2020-11-18T15:32:30
37,225,765
1
1
NOASSERTION
2021-06-10T23:28:55
2015-06-10T22:22:33
Python
UTF-8
Python
false
false
400
pyi
from otree.models import Participant from typing import Any, List class Bot: html = '' # type: str case = None # type: Any cases = [] # type: List participant = None # type: Participant session = None # type: Participant def Submission(PageClass, post_data: dict={}, check_html=True): pass def SubmissionMustFail(PageClass, post_data: dict={}, check_html=True): pass
[ "dcthomas@gmail.com" ]
dcthomas@gmail.com
2f9cf4817c20491d9cc8f559864e3b8932b249eb
e5329001263e67a4d3c13d57bb91f2502280e206
/InvTL/lm_py/pypy/doc/tool/makecontributor.py
aad3b3c64f28255071d5bea67690c5e5ec5af08f
[]
no_license
yanhongliu/DARLAB
d9432db6e005a39e33501d7ffffe6e648b95b3fc
f739318c9620b44ef03d155f791c7ed4111d80fa
refs/heads/master
2021-05-27T19:58:58.458846
2014-02-04T12:09:26
2014-02-04T12:09:26
null
0
0
null
null
null
null
UTF-8
Python
false
false
934
py
""" generates a contributor list """ import py try: path = py.std.sys.argv[1] except IndexError: print "usage: %s ROOTPATH" %(py.std.sys.argv[0]) raise SystemExit, 1 d = {} for logentry in py.path.svnwc(path).log(): a = logentry.author if a in d: d[a] += 1 else: d[a] = 1 items = d.items() items.sort(lambda x,y: -cmp(x[1], y[1])) import uconf # http://codespeak.net/svn/uconf/dist/uconf # Authors that don't want to be listed excluded = set("anna gintas ignas".split()) cutoff = 5 # cutoff for authors in the LICENSE file mark = False for author, count in items: if author in excluded: continue user = uconf.system.User(author) try: realname = user.realname.strip() except KeyError: realname = author if not mark and count < cutoff: mark = True print '-'*60 print " ", realname #print count, " ", author
[ "mickg10@gmail.com" ]
mickg10@gmail.com
772fff4ae83dea2282200c1e01eea64b1f9523a4
be4df0f5ef586ad6d1b54bad9eb82ccd4544d5ac
/rook/build/interfaces/notify.py
2c07b830767195089069e9143c08e14c37117f08
[]
no_license
FlorianLudwig/rook
9ac1575fe84d90747cb1c96c17b50ebb895063ab
1698cd03d6f0f73045d9cc153f8c214474a3775d
refs/heads/master
2020-09-26T22:03:44.032131
2016-08-29T16:23:28
2016-08-29T16:23:28
66,860,073
0
0
null
null
null
null
UTF-8
Python
false
false
248
py
import rbusys class EMailInterface(rbusys.SinglePlug): def send(self, toaddrs, subject, body): """send e-mail toaddrs - List of receipients subject - E-Mail subject body - E-Mail text """ pass
[ "f.ludwig@greyrook.com" ]
f.ludwig@greyrook.com
30a6e9335281d9b6a98149191d44c4c9a3653317
c8dcfaa31695fe8167b8f5e4e8fd2ce50576026c
/order_book_recorder/opportunity.py
46e17ba130371112839c1fd888ddd8c064dedc4c
[]
no_license
smartiesss/arbitrage-opportunity-tracker
b362613c748b70eb40fc607b8f5bce5257c6b180
2703d0652298358447914e05b7647be85727ee7d
refs/heads/master
2023-08-17T17:45:58.658479
2021-10-14T19:30:39
2021-10-14T19:30:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,680
py
"""Find trading opportunitiess in different depths.""" from dataclasses import dataclass from typing import Dict, List @dataclass class Opportunity: """Describe a found arbitrage opportunity.""" market: str buy_exchange: str sell_exchange: str #: Market depth for this opportunity quantity: float buy_price: float sell_price: float @property def profit_without_fees(self) -> float: """Get % arbitrage profit this trade would make""" return (self.sell_price - self.buy_price) / self.buy_price @property def diff(self) -> float: """Fiat arbitrage window""" return self.sell_price - self.buy_price def find_opportunities(market: str, depth_quantity: float, depth_asks: Dict[str, float], depth_bids: Dict[str, float]) -> List[Opportunity]: """Get a list of opportunities, for each depth level, ranked from the best to high. A wasteful way of computation to evaluate every opportunity there is. :param depth_asks: (exchange, price) tuples of prices :param depth_bids: (exchange, price) tuples of prices :return: """ opportunities = [] for ask_exchange, ask_price in depth_asks.items(): for bid_exchange, bid_price in depth_bids.items(): o = Opportunity( market=market, buy_exchange=ask_exchange, sell_exchange=bid_exchange, buy_price=ask_price, sell_price=bid_price, quantity=depth_quantity, ) opportunities.append(o) opportunities.sort(key=lambda o: o.profit_without_fees, reverse=True) return opportunities
[ "mikko@opensourcehacker.com" ]
mikko@opensourcehacker.com
815ab5db55c70d2d819c9d5761b7e9b6761aa48f
4c6fd81d318a008a70e7a8af3e6ec34f3ea1006b
/src/www/index.py
184dd6d66cc5bc01f7fcfc0ef9bb032ab4871add
[ "MIT" ]
permissive
janhybs/automate
46a777cef593eac330b07142b6c827b49a18a5f5
ba8d7d812ca4211d69ece9442ff212a6c59629fc
refs/heads/master
2020-03-28T20:46:55.180930
2019-04-11T11:16:29
2019-04-11T12:05:10
149,101,207
0
0
null
null
null
null
UTF-8
Python
false
false
1,538
py
#!/bin/python3 # author: Jan Hybs import sys from flask import redirect, url_for, send_file from env import Env from www import app, login_required from loguru import logger from utils.crypto import b64decode max_file_view_limit = 1024*1024 @app.route('/') @app.route('/index') @login_required def index(): return redirect(url_for('view_courses')) @app.route('/log') def print_log(): log = Env.log_file.read_text() import ansi2html converter = ansi2html.Ansi2HTMLConverter() html = converter.convert(log) return '<h1>Automate log</h1>' + html @app.route('/log/clear') @login_required def clear_log(): Env.log_file.unlink() Env.log_file.touch() logger.configure(handlers=[ dict(sink=sys.stdout), dict(sink=Env.log_file, colorize=True) ]) logger.info('--- log file cleared ---') return redirect('/log') @app.route('/file/<string:data>/<string:as_name>') def serve_file(data: str, as_name: str): result = b64decode(data) local = Env.root.joinpath(*result['url']) assert local.parts[-2] in ('input', 'output', '.error') if local.exists(): if local.stat().st_size > max_file_view_limit: return send_file(str(local), mimetype='text/plain', as_attachment=True, attachment_filename=as_name) else: return send_file(str(local), mimetype='text/plain', attachment_filename=as_name) return 'File not found' # return send_file(str(local), mimetype='text/plain', as_attachment=True, attachment_filename=as_name)
[ "jan.hybs@tul.cz" ]
jan.hybs@tul.cz
78b7c58db3ccbe2c99a05e756341db5ea0bc7605
5335dbd5e836ecb816fd69e5e4b623eea4366e0d
/max/apps/portfolio/migrations/0003_auto__chg_field_portfolioitem_pub_date.py
7594b989144e25fca96cbbb6d4e8c21b7e3d94d0
[]
no_license
rootart/elayarch.com
c8c162f16b06ebdb578dd6d967cfb43add9575d1
d5287bc335a7c86a6e18be3943719ee5b3ffa014
refs/heads/master
2021-01-01T17:17:08.638798
2013-03-20T14:21:00
2013-03-20T14:21:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,434
py
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'PortfolioItem.pub_date' db.alter_column('portfolio_portfolioitem', 'pub_date', self.gf('django.db.models.fields.DateField')(null=True)) def backwards(self, orm): # Changing field 'PortfolioItem.pub_date' db.alter_column('portfolio_portfolioitem', 'pub_date', self.gf('django.db.models.fields.DateTimeField')(null=True)) models = { 'portfolio.itemimage': { 'Meta': {'object_name': 'ItemImage'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'descrption': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'portfolioitem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portfolio.PortfolioItem']"}), 'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'portfolio.portfolioitem': { 'Meta': {'object_name': 'PortfolioItem'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portfolio.PortfolioItemCategory']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'pub_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'top_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'portfolio.portfolioitemcategory': { 'Meta': {'ordering': "('-position',)", 'object_name': 'PortfolioItemCategory'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) } } complete_apps = ['portfolio']
[ "dijakroot@gmail.com" ]
dijakroot@gmail.com
0c4091b7b05386040beb712b8027b0dcc48e17ba
bb85c41f3e6aad131d4725ad400cbd028d60d098
/EraseMask/core/Model.py
4b0e8f547072053a28ca37c7ddb59d7cc7832613
[ "Apache-2.0" ]
permissive
CoinCheung/Mossaic-Handling
c946c0496896a694af5e647e1b2fa425774e52a8
231e8b168c36d5ca4be527b19d8aa4821cda2ed5
refs/heads/master
2021-01-24T22:52:56.467185
2018-06-18T08:20:46
2018-06-18T08:20:46
123,270,887
2
0
null
null
null
null
UTF-8
Python
false
false
5,642
py
#!/usr/bin/python # -*- encoding: utf-8 -*- import mxnet as mx import mxnet.gluon as gluon ### models ## generator unet block class UnetSkipBlock(gluon.nn.HybridBlock): def __init__(self, inner_channels, outer_channels, innermost=False, outermost=False, use_dropout=False, unit=None): super(UnetSkipBlock, self).__init__() self.outermost = outermost with self.name_scope(): conv = gluon.nn.Conv2D(channels=inner_channels, in_channels=outer_channels, kernel_size=4, strides=(2,2), padding=(1,1)) leaky = gluon.nn.LeakyReLU(alpha=0.2) down_BN = gluon.nn.BatchNorm(momentum=0.1, in_channels=inner_channels) relu = gluon.nn.Activation(activation='relu') up_BN = gluon.nn.BatchNorm(momentum=0.1, in_channels=outer_channels) tanh = gluon.nn.Activation(activation='tanh') dropout = gluon.nn.Dropout(0.5) if innermost: deconv = gluon.nn.Conv2DTranspose(channels=outer_channels, in_channels=inner_channels, kernel_size=4, strides=(2,2), padding=(1,1)) down = [leaky, conv] up = [relu, deconv, up_BN] model = down + up elif outermost: deconv = gluon.nn.Conv2DTranspose(channels=outer_channels, in_channels=2*inner_channels, kernel_size=4, strides=(2,2), padding=(1,1)) down = [conv] up = [relu, deconv, tanh] model = down + [unit] + up else: deconv = gluon.nn.Conv2DTranspose(channels=outer_channels, in_channels=2*inner_channels, kernel_size=4, strides=(2,2), padding=(1,1)) down = [leaky, conv, down_BN] up = [relu, deconv, up_BN] model = down + [unit] + up if use_dropout: model += [dropout] self.mod = gluon.nn.HybridSequential() with self.mod.name_scope(): for layer in model: self.mod.add(layer) # self.mod.hybridize() def hybrid_forward(self, F, x): if self.outermost: return self.mod(x) else: return F.concat(self.mod(x), x, dim=1) ## Unet generator HybridBlock implementation class gen_unet(gluon.nn.HybridBlock): def __init__(self, in_channels, filter_base=64, use_dropout=True, num_down=5): super(gen_unet, self).__init__() # inner most layer unet = UnetSkipBlock(filter_base*8, filter_base*8, innermost=True) # middle layers with the most layers to be 8xfilter_base for i in range(num_down - 5): unet = UnetSkipBlock(filter_base*8, filter_base*8, unit=unet, use_dropout=use_dropout) unet = UnetSkipBlock(filter_base*8, filter_base*4, unit=unet) unet = UnetSkipBlock(filter_base*4, filter_base*2, unit=unet) unet = UnetSkipBlock(filter_base*2, filter_base, unit=unet) # outer most layer unet = UnetSkipBlock(filter_base, in_channels, unit=unet, outermost=True) with self.name_scope(): self.gen_model = unet def hybrid_forward(self, F, x): return self.gen_model(x) def generator_unet(in_channels, filter_base=64, use_dropout=True, num_down=5): # inner most layer unet = UnetSkipBlock(filter_base*8, filter_base*8, innermost=True) # middle layers with the most layers to be 8xfilter_base for i in range(num_down - 5): unet = UnetSkipBlock(filter_base*8, filter_base*8, unit=unet, use_dropout=use_dropout) unet = UnetSkipBlock(filter_base*8, filter_base*4, unit=unet) unet = UnetSkipBlock(filter_base*4, filter_base*2, unit=unet) unet = UnetSkipBlock(filter_base*2, filter_base, unit=unet) # outer most layer unet = UnetSkipBlock(filter_base, in_channels, unit=unet, outermost=True) gen_model = gluon.nn.HybridSequential() with gen_model.name_scope(): gen_model.add(unet) return gen_model def discriminator(in_channels, filter_base=64, nlayer=3): model = gluon.nn.HybridSequential() with model.name_scope(): conv = gluon.nn.Conv2D(channels=filter_base, in_channels=in_channels, kernel_size=(4,4), strides=(2,2), padding=(1,1)) leaky = gluon.nn.LeakyReLU(0.2) model.add(conv) model.add(leaky) filter_num = filter_base for i in range(1, nlayer): filter_prev = filter_num filter_num = min(filter_base*8, filter_prev*2) conv = gluon.nn.Conv2D(channels=filter_num, in_channels=filter_prev, kernel_size=(4,4), strides=(2,2), padding=(1,1)) bn = gluon.nn.BatchNorm(momentum=0.1, in_channels=filter_num) leaky = gluon.nn.LeakyReLU(0.2) model.add(conv) model.add(bn) model.add(leaky) filter_prev = filter_num filter_num = min(filter_base*8, filter_prev*2) conv = gluon.nn.Conv2D(channels=filter_num, in_channels=filter_prev, kernel_size=(4,4), strides=(1,1), padding=(1,1)) bn = gluon.nn.BatchNorm(momentum=0.1, in_channels=filter_num) leaky = gluon.nn.LeakyReLU(0.2) out = gluon.nn.Conv2D(channels=1, in_channels=filter_num, kernel_size=(4,4), strides=(1,1), padding=(1,1)) model.add(conv) model.add(bn) model.add(leaky) model.add(out) return model
[ "867153576@qq.com" ]
867153576@qq.com
71496e68c06c89690d2027aa5e33a9b6bfb11e5a
55c53e4ffbb7bd5ff6b2f43cbf7d37c1b969b4a4
/talentbuddy/rains.py
eaca0a81f7bfa927ac67b56c56eb20875bfbc641
[]
no_license
Xuefeng-Zhu/Code_Practice
a005e558c8627e83abc961ad12e2c6ae84c44e02
7d25aa7cc46a66ef176796f5dd23addd4734ebf2
refs/heads/master
2021-01-13T02:27:28.263472
2014-11-16T06:53:44
2014-11-16T06:53:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,260
py
from collections import deque def search(i, j, m, heights): visited = [0] * len(heights) count = 0 min = max(heights) d = deque() d.append((i,j)) while len(d): x, y = d.popleft() if x == 0 or y == 0 or x == m -1 or y == len(heights)/m - 1: if heights[y * m + x] < heights[j * m + i]: return 0 else: continue if not visited[y * m + x]: if heights[y * m + x] <= heights[j * m + i]: visited[y * m + x] = 1 count += 1 d.append((x-1, y)) d.append((x+1, y)) d.append((x, y-1)) d.append((x, y+1)) else: if heights[y * m + x] < min: min = heights[y * m + x] return count * (min - heights[j * m + i]) def rain(m, heights): # Write your code here # To print results to the standard output you can use print # Example: print "Hello world!" result = 0 for j in range(1, len(heights)/m-1): for i in range(1, m-1): result += search(i, j, m, heights) print result if __name__ == '__main__': rain(3, [5, 5, 5, 5, 1, 5, 5, 5, 5])
[ "xzhu15@illinois.edu" ]
xzhu15@illinois.edu
e93b91455a7985a732cad91ba8a1867878da50a4
f07a42f652f46106dee4749277d41c302e2b7406
/Data Set/bug-fixing-5/7207c78e101a0620b8fe2891c74f94de181e15fc-<legacy_dense_support>-bug.py
31700e1d3c92d9c07050e7386873e55e0e1f4113
[]
no_license
wsgan001/PyFPattern
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
cc347e32745f99c0cd95e79a18ddacc4574d7faa
refs/heads/main
2023-08-25T23:48:26.112133
2021-10-23T14:11:22
2021-10-23T14:11:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,209
py
def legacy_dense_support(func): 'Function wrapper to convert the `Dense` constructor from Keras 1 to 2.\n\n # Arguments\n func: `__init__` method of `Dense`.\n\n # Returns\n A constructor conversion wrapper.\n ' @six.wraps(func) def wrapper(*args, **kwargs): if (len(args) > 2): raise TypeError('The `Dense` layer can have at most one positional argument (the `units` argument).') if ('output_dim' in kwargs): if (len(args) > 1): raise TypeError('Got both a positional argument and keyword argument for argument `units` (`output_dim` in the legacy interface).') if ('units' in kwargs): raise_duplicate_arg_error('output_dim', 'units') output_dim = kwargs.pop('output_dim') args = (args[0], output_dim) conversions = [('init', 'kernel_initializer'), ('W_regularizer', 'kernel_regularizer'), ('b_regularizer', 'bias_regularizer'), ('W_constraint', 'kernel_constraint'), ('b_constraint', 'bias_constraint'), ('bias', 'use_bias')] kwargs = convert_legacy_kwargs('Dense', args, kwargs, conversions) return func(*args, **kwargs) return wrapper
[ "dg1732004@smail.nju.edu.cn" ]
dg1732004@smail.nju.edu.cn
758b577b9d36079667cb04f3eaf5089cdf36d64a
7159e9970ce2cc58482416392d1b489087b913c1
/tests/gis_tests/inspectapp/tests.py
c272b65aa841e9617eec552f04cb53c518c83025
[ "BSD-3-Clause" ]
permissive
ufgstores/django19
d4a53f12aa6d89405e2332d0c53a29447e6f8650
644716493ecac37271610de0e9cf97fc1fe46f10
refs/heads/master
2020-09-20T13:54:04.608149
2019-11-28T14:07:07
2019-11-28T14:29:02
224,502,004
0
0
BSD-3-Clause
2019-11-28T14:08:25
2019-11-27T19:23:16
Python
UTF-8
Python
false
false
8,022
py
from __future__ import unicode_literals import os import re from unittest import skipUnless from django.contrib.gis.gdal import HAS_GDAL from django.core.management import call_command from django.db import connection, connections from django.test import TestCase, skipUnlessDBFeature from django.test.utils import modify_settings from django.utils.six import StringIO from ..test_data import TEST_DATA if HAS_GDAL: from django.contrib.gis.gdal import Driver, GDALException, GDAL_VERSION from django.contrib.gis.utils.ogrinspect import ogrinspect from .models import AllOGRFields @skipUnless(HAS_GDAL, "InspectDbTests needs GDAL support") class InspectDbTests(TestCase): @skipUnlessDBFeature("gis_enabled") def test_geom_columns(self): """ Test the geo-enabled inspectdb command. """ out = StringIO() call_command( 'inspectdb', table_name_filter=lambda tn: tn == 'inspectapp_allogrfields', stdout=out ) output = out.getvalue() if connection.features.supports_geometry_field_introspection: self.assertIn('geom = models.PolygonField()', output) self.assertIn('point = models.PointField()', output) else: self.assertIn('geom = models.GeometryField(', output) self.assertIn('point = models.GeometryField(', output) @skipUnlessDBFeature("supports_3d_storage") def test_3d_columns(self): out = StringIO() call_command( 'inspectdb', table_name_filter=lambda tn: tn == 'inspectapp_fields3d', stdout=out ) output = out.getvalue() if connection.features.supports_geometry_field_introspection: self.assertIn('point = models.PointField(dim=3)', output) self.assertIn('line = models.LineStringField(dim=3)', output) self.assertIn('poly = models.PolygonField(dim=3)', output) else: self.assertIn('point = models.GeometryField(', output) self.assertIn('line = models.GeometryField(', output) self.assertIn('poly = models.GeometryField(', output) @skipUnless(HAS_GDAL, "OGRInspectTest needs GDAL support") @modify_settings( INSTALLED_APPS={'append': 'django.contrib.gis'}, ) class OGRInspectTest(TestCase): maxDiff = 1024 def test_poly(self): shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp') model_def = ogrinspect(shp_file, 'MyModel') expected = [ '# This is an auto-generated Django model module created by ogrinspect.', 'from django.contrib.gis.db import models', '', 'class MyModel(models.Model):', ' float = models.FloatField()', ' int = models.{}()'.format('BigIntegerField' if GDAL_VERSION >= (2, 0) else 'FloatField'), ' str = models.CharField(max_length=80)', ' geom = models.PolygonField(srid=-1)', ] self.assertEqual(model_def, '\n'.join(expected)) def test_poly_multi(self): shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp') model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True) self.assertIn('geom = models.MultiPolygonField(srid=-1)', model_def) # Same test with a 25D-type geometry field shp_file = os.path.join(TEST_DATA, 'gas_lines', 'gas_leitung.shp') model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True) self.assertIn('geom = models.MultiLineStringField(srid=-1)', model_def) def test_date_field(self): shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp') model_def = ogrinspect(shp_file, 'City') expected = [ '# This is an auto-generated Django model module created by ogrinspect.', 'from django.contrib.gis.db import models', '', 'class City(models.Model):', ' name = models.CharField(max_length=80)', ' population = models.{}()'.format('BigIntegerField' if GDAL_VERSION >= (2, 0) else 'FloatField'), ' density = models.FloatField()', ' created = models.DateField()', ' geom = models.PointField(srid=-1)', ] self.assertEqual(model_def, '\n'.join(expected)) def test_time_field(self): # Getting the database identifier used by OGR, if None returned # GDAL does not have the support compiled in. ogr_db = get_ogr_db_string() if not ogr_db: self.skipTest("Unable to setup an OGR connection to your database") try: # Writing shapefiles via GDAL currently does not support writing OGRTime # fields, so we need to actually use a database model_def = ogrinspect(ogr_db, 'Measurement', layer_key=AllOGRFields._meta.db_table, decimal=['f_decimal']) except GDALException: self.skipTest("Unable to setup an OGR connection to your database") self.assertTrue(model_def.startswith( '# This is an auto-generated Django model module created by ogrinspect.\n' 'from django.contrib.gis.db import models\n' '\n' 'class Measurement(models.Model):\n' )) # The ordering of model fields might vary depending on several factors (version of GDAL, etc.) self.assertIn(' f_decimal = models.DecimalField(max_digits=0, decimal_places=0)', model_def) self.assertIn(' f_int = models.IntegerField()', model_def) self.assertIn(' f_datetime = models.DateTimeField()', model_def) self.assertIn(' f_time = models.TimeField()', model_def) self.assertIn(' f_float = models.FloatField()', model_def) self.assertIn(' f_char = models.CharField(max_length=10)', model_def) self.assertIn(' f_date = models.DateField()', model_def) # Some backends may have srid=-1 self.assertIsNotNone(re.search(r' geom = models.PolygonField\(([^\)])*\)', model_def)) def test_management_command(self): shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp') out = StringIO() call_command('ogrinspect', shp_file, 'City', stdout=out) output = out.getvalue() self.assertIn('class City(models.Model):', output) def get_ogr_db_string(): """ Construct the DB string that GDAL will use to inspect the database. GDAL will create its own connection to the database, so we re-use the connection settings from the Django test. """ db = connections.databases['default'] # Map from the django backend into the OGR driver name and database identifier # http://www.gdal.org/ogr/ogr_formats.html # # TODO: Support Oracle (OCI). drivers = { 'django.contrib.gis.db.backends.postgis': ('PostgreSQL', "PG:dbname='%(db_name)s'", ' '), 'django.contrib.gis.db.backends.mysql': ('MySQL', 'MYSQL:"%(db_name)s"', ','), 'django.contrib.gis.db.backends.spatialite': ('SQLite', '%(db_name)s', '') } db_engine = db['ENGINE'] if db_engine not in drivers: return None drv_name, db_str, param_sep = drivers[db_engine] # Ensure that GDAL library has driver support for the database. try: Driver(drv_name) except: return None # SQLite/Spatialite in-memory databases if db['NAME'] == ":memory:": return None # Build the params of the OGR database connection string params = [db_str % {'db_name': db['NAME']}] def add(key, template): value = db.get(key, None) # Don't add the parameter if it is not in django's settings if value: params.append(template % value) add('HOST', "host='%s'") add('PORT', "port='%s'") add('USER', "user='%s'") add('PASSWORD', "password='%s'") return param_sep.join(params)
[ "slenart@alefajnie.pl" ]
slenart@alefajnie.pl
0eb85f794e58990a2c1f81d075205d9ae6c00710
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_118/1407.py
da7697f02acb65b0afe0600629f286dd3dd3792c
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
526
py
import math def is_square(integer): root = math.sqrt(integer) if int(root + 0.5) ** 2 == integer: return int(root) else: return 0 def palindrome(L): return L == L[::-1] f=open('C-small-attempt1.in') T=int(f.readline()) for i in range(0,T): AB=f.readline().split() AB = map(int, AB) A=AB[0] B=AB[1] c=0 for k in range(A,B+1): p=is_square(k) if(p!=0 and palindrome(str(k)) and palindrome(str(p))): c=c+1 g=open('output','a') g.write('Case #'+str(i+1)+': '+str(c)+'\n')
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
89731aa069ceb76f20e907b1477225369950f2fd
fd173195d07b5a5ce229a0c1a20ee61884d8c8a1
/python_practice/rock_paper_scissors.py
c75e9f18562123af4d9b33b82a0891f081a6ec1a
[]
no_license
anitrajpurohit28/PythonPractice
f7e71946144e04b7f9cb9682087e5d4f79839789
8b75b67c4c298a135a5f8ab0b3d15bf5738859f1
refs/heads/master
2023-04-12T07:04:12.150646
2021-04-24T19:52:24
2021-04-24T19:52:24
293,912,662
0
0
null
null
null
null
UTF-8
Python
false
false
2,400
py
from random import choice options = ["rock", "paper", "scissors", "garbage"] print("...rock...") print("...paper...") print("...scissors...") # === implementation 1 start === # for i in range(10): # p1 = choice(options) # p2 = choice(options) # print(f"(enter Player 1's choice): {p1}") # print(f"(enter Player 2's choice): {p2}") # if p1 == "rock" and p2 == "paper": # print("player 2 wins") # elif p1 == "rock" and p2 == "scissors": # print("player 1 wins") # elif p1 == "paper" and p2 == "rock": # print("player 1 wins") # elif p1 == "paper" and p2 == "scissors": # print("player 2 wins") # elif p1 == "scissors" and p2 == "rock": # print("player 2 wins") # elif p1 == "scissors" and p2 == "paper": # print("player 1 wins") # else: # print("draw! same choice") # === implementation 1 ends === # === implementation 2 start === # for i in range(10): # p1 = choice(options) # p2 = choice(options) # print(f"(enter Player 1's choice): {p1}") # print(f"(enter Player 2's choice): {p2}") # if (p1 == "rock" and p2 == "scissors") or\ # (p1 == "paper" and p2 == "rock") or\ # (p1 == "scissors" and p2 == "paper"): # print("player 1 wins") # elif (p1 == "scissors" and p2 == "rock") or\ # (p1 == "rock" and p2 == "paper") or\ # (p1 == "paper" and p2 == "scissors"): # print("player 2 wins") # else: # print("draw! same choice") # === implementation 2 start === # === implementation 3 start === for i in range(10): p1 = choice(options) p2 = choice(options) print(f"(enter Player 1's choice): {p1}") print(f"(enter Player 2's choice): {p2}") if p1 == p2: print("draw! same choice") elif p1 == "garbage" or p2 == "garbage": print("Please select rock scissors or paper!") else: if p1 == "rock": if p2 == "scissors": print("player 1 wins") else: print("player 2 wins") elif p1 == "paper": if p2 == "scissors": print("player 2 wins") else: print("player 1 wins") elif p1 == "scissors": if p2 == "rock": print("player 2 wins") else: print("player 2 wins") # === implementation 3 start ===
[ "anitrajpurohit28@gmail.com" ]
anitrajpurohit28@gmail.com
fb733c6e8e48fc3ab9f1c4c7fd9d997c88c5f216
13cf27e496536b31f158fd117af1cb22841e53da
/year_2017/day_08/solution_part_2.py
7130d61f1ea755a4db35d2991785bf6db3857f04
[]
no_license
timofurrer/advent-of-code
8ab34e4ae58c103d351697b0bd80eef276708a04
45839eff32b1bfec2e2560438a6c1f002b9a9269
refs/heads/master
2021-10-08T05:31:29.844886
2018-12-08T13:59:02
2018-12-08T13:59:02
112,767,579
2
0
null
null
null
null
UTF-8
Python
false
false
2,930
py
""" Solution for the first puzzle of Day 4 """ import os import operator import functools from collections import namedtuple, defaultdict from typing import Tuple, List import pytest # My puzzle input MY_PUZZLE_INPUT = os.path.join(os.path.dirname(__file__), 'input.txt') Instruction = namedtuple('Instruction', [ 'target_operator', 'cond_operator', ]) OPERATORS = { 'inc': operator.add, 'dec': operator.sub, '==': operator.eq, '!=': operator.ne, '>': operator.gt, '>=': operator.ge, '<': operator.lt, '<=': operator.le } def parse_input(raw_instructions: str) -> List[Instruction]: """ Parse instructions """ instructions = [] for raw_instruction in raw_instructions.splitlines(): (target_register, target_operator, target_value, _, cond_register, cond_operator, cond_value) = raw_instruction.split() def __target_operator(target_register, target_operator, target_value, registers): registers[target_register] = OPERATORS[target_operator]( registers[target_register], int(target_value)) def __cond_operator(cond_register, cond_operator, cond_value, registers): return OPERATORS[cond_operator]( registers[cond_register], int(cond_value)) target_operator_func = functools.partial( __target_operator, target_register, target_operator, target_value) cond_operator_func = functools.partial( __cond_operator, cond_register, cond_operator, cond_value) instructions.append( Instruction(target_operator_func, cond_operator_func)) return instructions def run_instructions(instructions: List[Instruction]) -> Tuple[dict, int]: """ Run instructions and return registers """ registers = defaultdict(lambda: 0) max_value = 0 for instruction in instructions: if instruction.cond_operator(registers): instruction.target_operator(registers) current_max_value = max(registers.values()) if current_max_value > max_value: max_value = current_max_value return registers, max_value def find_largest_register_value(instructions: list) -> int: """ Find the largest register value """ registers, max_value = run_instructions(instructions) return max_value @pytest.mark.parametrize('raw_instructions, expected_value', [ (open(os.path.join(os.path.dirname(__file__), 'sample_input.txt')).read(), 10), # my puzzle input (open(MY_PUZZLE_INPUT).read(), 5443) ]) def test_find_largest_register_value(raw_instructions, expected_value): """Test the solution""" # given parsed_input = parse_input(raw_instructions) # when actual_value = find_largest_register_value(parsed_input) # then assert actual_value == expected_value
[ "tuxtimo@gmail.com" ]
tuxtimo@gmail.com
649c1843d6146a2e3ecd918e102c168566d7362a
d5da22b51c111f3d0ede725dddd97bb4cf94de4e
/calib/calib/misc.py
422549fd63db29a3ed3e856530ac20f20edde71d
[ "BSD-2-Clause" ]
permissive
jabozzo/delta_sigma_pipe_lascas_2020
bb5a18fa6fcf3753b9e7901d313546bc9368ef67
1351371aa03a12385c6fd9b29cf606ddd421ee18
refs/heads/master
2023-01-09T07:46:48.246573
2020-02-19T01:59:27
2020-02-19T01:59:27
241,505,258
0
1
BSD-2-Clause
2022-12-27T15:36:34
2020-02-19T01:24:33
Python
UTF-8
Python
false
false
4,124
py
#! /usr/bin/env python import copy import json import numpy as np NO_PARAM = object() """ Object to use instead of None when None is a valid value and cannot be used as default. """ def default(value, default): """ Same as default if value is None else value, but less efficient. :param value: Value to return if not None. :param default: Value to return if value is None. """ return default if value is None else value def push_random_state(state=None): """ Returns a context manager that will reset the numpy.random state once the context is left to the state that was before entering the context. Useful for setting a seed or another state temporarily. :param state: A valid numpy random state .. seealso :: :func:`numpy.random.get_state` :type state: tuple """ class StateStore(object): def __init__(self, state): self.state = copy.deepcopy(state) def __enter__(self): self._prev_state = np.random.get_state() if self.state is not None: np.random.set_state(self.state) return self def __exit__(self, type, value, tb): self.state = copy.deepcopy(np.random.get_state()) np.random.set_state(self._prev_state) return StateStore(state) class Namespace(object): def __init__(self, **kwargs): self._my_attrs = set() for key, value in kwargs.items(): setattr(self, key, value) def __setattr__(self, name, value): if name != "_my_attrs": self._my_attrs.add(name) super().__setattr__(name, value) def __delattr__(self, name): if name != "_my_attrs": self._my_attrs.discard(name) super().__delattr__(name) def __copy__(self, memo=None): return Namespace(**{name : getattr(self, name) for name in self._my_attrs}) def __deepcopy__(self, memo=None): return Namespace(**{name : copy.deepcopy(getattr(self, name), memo) for name in self._my_attrs}) def getitem(iterable, idx): if len(idx) > 1: return getitem(iterable[idx[0]], idx[1:]) elif len(idx) == 1: return iterable[idx[0]] else: return iterable def setitem(iterable, idx, value): if len(idx) > 1: setitem(iterable[idx[0]], idx[1:], value) elif len(idx) == 1: iterable[idx[0]] = value else: raise ValueError("Does not work on scalars") def multiget(dct, *keys): return tuple(dct[key] for key in keys) def json_args(lst): result = [] for element in lst: element = json.loads(element) if isinstance(element, str): # If quotes where escaped element = json.loads(element) assert isinstance(element, dict) result.append(element) return result def ogrid(idx, length, tot_len=None): tot_len = default(tot_len, idx + 1) assert tot_len >= idx + 1 shape = (1,)*idx + (length,) + (1,)*(tot_len - idx - 1) return np.reshape(list(range(length)), shape) def iterate_combinations(n, k): if k > n: raise ValueError("k ({}) must be less or equal than n ({}).".format(k, n)) elif k <= 0: raise ValueError("k ({}) must be greater than 0.".format(k)) state = tuple(range(k)) def update(state, index): inv_index = k-index limit = n - inv_index if state[index] >= limit: return update(state, index-1) else: start_point = state[index] + 1 return state[0:index] + tuple(range(start_point, start_point + inv_index)) while state[0] <= n - k: yield state try: state = update(state, k-1) except IndexError: break def iterate_permutations(lst): if len(lst) > 1: for ii in range(len(lst)): element = lst[ii:ii+1] rest = lst[:ii] + lst[ii+1:] for combination in iterate_permutations(rest): yield element + combination elif len(lst) == 1: yield lst[0:1] else: yield []
[ "unconfigured@null.spigotmc.org" ]
unconfigured@null.spigotmc.org
3517a4562f9c5c46f13d3beb5eb663b05736e6e4
134267f2244954d48c65daae0b58051aba757fed
/236B.py
8f53d741c174b829cf1bd613281e0eee391ae21a
[]
no_license
mobin-zaman/misc_python
47fe836d1eae154210912b8b353f241303523e6b
7a22329ae38b2d5ee9cd9ce29d995686759f5f87
refs/heads/master
2020-04-28T00:48:06.774434
2019-07-24T15:28:15
2019-07-24T15:28:15
174,829,343
0
0
null
null
null
null
UTF-8
Python
false
false
354
py
a, b, c = map(int, input().split()) max_number = 100 * 100 * 100 d=[0]*(max_number+1) for i in range(1, (max_number+1)): j = i while j <= max_number: d[j] += 1 j += i count = 0 for i in range(1, a + 1): for j in range(1, b + 1): for k in range(1, c + 1): count += d[i * j * k] print(count % 1073741824)
[ "mobin_zaman@hotmail.com" ]
mobin_zaman@hotmail.com
3e5288257df2d5b216c1c97b8925e7775fc045e1
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03569/s275849554.py
ddfe141423dd23ba336384b2047d8b947eccc03c
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
455
py
from collections import deque def main(): s = deque(input()) ans = 0 while len(s)>1: if s[0]==s[-1]: s.popleft() s.pop() continue if s[0]!='x' and s[-1]!='x': ans = -1 break if s[0]=='x': ans+=1 s.popleft() if s[-1]=='x': ans+=1 s.pop() print(ans) if __name__ == "__main__": main()
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
defefdf63b8f66c146cde4e733d11504ee4ef9d3
97eb35e37480f2b6cfb7c8055d467cec1e1c018e
/script.module.resolveurl/lib/resolveurl/plugins/cda.py
7fa2ddcdbc0f79a0437d7eb925fc94b4abf3931b
[]
no_license
CYBERxNUKE/xbmc-addon
46163c286197e21edfdeb3e590ca4213eade0994
eb4d38f11da7ea54d194359c7dcaeddfb3a40e52
refs/heads/master
2023-01-10T20:24:08.629399
2023-01-07T21:28:06
2023-01-07T21:28:06
55,860,463
7
2
null
2022-12-08T10:39:26
2016-04-09T18:27:34
Python
UTF-8
Python
false
false
2,694
py
""" Plugin for ResolveURL Copyright (C) 2020 gujal This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re from six.moves import urllib_parse import json from resolveurl.lib import helpers from resolveurl import common from resolveurl.resolver import ResolveUrl, ResolverError class CdaResolver(ResolveUrl): name = 'cda' domains = ['m.cda.pl', 'cda.pl', 'www.cda.pl', 'ebd.cda.pl'] pattern = r'(?://|\.)(cda\.pl)/(?:.\d+x\d+|video)/([0-9a-zA-Z]+)' def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'Referer': web_url, 'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content match = re.search(r"player_data='([^']+)", html) if match: qdata = json.loads(match.group(1)).get('video', {}).get('qualities') sources = [(q, '?wersja={0}'.format(q)) for q in qdata.keys()] if len(sources) > 1: html = self.net.http_GET(web_url + helpers.pick_source(helpers.sort_sources_list(sources)), headers=headers).content match = re.search(r"player_data='([^']+)", html) src = json.loads(match.group(1)).get('video').get('file') return self.cda_decode(src) + helpers.append_headers(headers) raise ResolverError('Video Link Not Found') def get_url(self, host, media_id): return self._default_get_url(host, media_id, template='https://ebd.cda.pl/647x500/{media_id}') def cda_decode(self, a): a = a.replace("_XDDD", "") a = a.replace("_CDA", "") a = a.replace("_ADC", "") a = a.replace("_CXD", "") a = a.replace("_QWE", "") a = a.replace("_Q5", "") a = a.replace("_IKSDE", "") a = urllib_parse.unquote(a) a = ''.join([chr(33 + (ord(char) + 14) % 94) if 32 < ord(char) < 127 else char for char in a]) a = a.replace(".cda.mp4", "") a = a.replace(".2cda.pl", ".cda.pl") a = a.replace(".3cda.pl", ".cda.pl") return "https://{0}.mp4".format(a)
[ "cyberxnuke@computertechs.org" ]
cyberxnuke@computertechs.org
a3e1822a2db66eb4ca0a66cf3086570f3deaaee9
2a61b02c26e77686e38cd9039e6f4b0530ddb7c9
/bitbots_misc/bitbots_live_tool_rqt/scripts/position_msg.py
fbe1130cfa173d142c489a4d9e0284074fa48118
[ "MIT" ]
permissive
fly-pigTH/bitbots_thmos_meta
931413e86929751024013b8e35f87b799243e22c
f45ccc362dc689b69027be5b0d000d2a08580de4
refs/heads/master
2023-08-27T02:58:08.397650
2021-10-22T17:17:11
2021-10-22T17:17:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,018
py
from geometry_msgs.msg import PoseWithCovarianceStamped, Point, Pose from std_msgs.msg import Header import yaml import rospy import tf from name import Name class PositionMsg: label_orientation = "o" label_pos = "p" label_yaw = "yw" title = "position_msg" def __init__(self): # the dict containing message data self.data = {} #self.param_robot_id = "live_tool_id" # labels for dict #self.s_position = "pos" #self.s_orientation = "orientation" # Takes a PoseWithCovarianceStamped Msg and writes necessary information in data def setPoseWithCovarianceStamped(self, data): """ dictionary with information about the position of the robot :param data: a dictionary with the transmitted information :return: """ self.data[Name.last_update] = data.header.stamp.secs self.data[PositionMsg.label_pos] = {"x": data.pose.pose.position.x, "y": data.pose.pose.position.y} #"z": data.pose.pose.position.z} quat = data.pose.pose.orientation euler = tf.transformations.euler_from_quaternion([quat.x, quat.y, quat.z, quat.w]) #roll = euler[0] #pitch = euler[1] yaw = euler[2] self.data[PositionMsg.label_orientation] = {PositionMsg.label_yaw: yaw} #, #"pitch": pitch, #"roll": roll} # Returns yaml string containing relevant information def getMsg(self): """ :return: """ # makes time stamp before sending udp package time = rospy.get_rostime() self.data[Name.timestamp] = {Name.secs: time.secs, Name.nsecs: time.nsecs} id = "NO_ID" if rospy.has_param( Name.param_robot_id ): id = rospy.get_param(Name.param_robot_id) return id + "::" + PositionMsg.title + "::" + yaml.dump(self.data)
[ "759074800@qq.com" ]
759074800@qq.com
168c09948d663d36f0a4ff326f00c0c1ade3eec7
e2081f2f873825a3cc8b529614eb784f5cf5e8c5
/queue2.py
db104feb8fff4a762dc98e1e2cba55592c776324
[]
no_license
yilinanyu/Leetcode-with-Python
17b454058c673381dbafa5a2a154c4e84b449399
a55d2a3e383f858477170effbf8f6454e5dfd218
refs/heads/master
2021-01-21T04:55:31.025194
2016-07-11T20:10:18
2016-07-11T20:10:18
36,630,923
5
1
null
null
null
null
UTF-8
Python
false
false
366
py
class Queue: def __init__(self): self.items = [] def isEmpty(self): return self.items == [] def enqueue(self, item): self.items.insert(0,item) def dequeue(self): return self.items.pop() def size(self): return len(self.items) q=Queue() q.enqueue(4) q.enqueue('dog') q.enqueue(True) print(q.size())
[ "ly783@nyu.edu" ]
ly783@nyu.edu
f2d296fe0c7e4c778ff85a9d45217c4f02b624de
52b2e3470cd4b91975b2e1caed8d1c93c20e5d05
/examples/python/amod/__init__.py
220052cc03a60dd492d9e8d8c3160016148fad78
[]
no_license
xprime480/projects
c2f9a82bbe91e00859568dc27ae17c3b5dd873e3
3c5eb2d53bd7fa198edbe27d842ee5b5ff56e226
refs/heads/master
2020-04-27T03:51:29.456979
2019-04-12T14:34:39
2019-04-12T14:34:39
174,037,060
0
0
null
null
null
null
UTF-8
Python
false
false
165
py
__version__ = '0.0.1' __all__ = [ 'basic' ] def basic(a, b) : from .first import fnfirst from .second import fnsecond return fnfirst(a) + fnsecond(b)
[ "mi.davis@sap.com" ]
mi.davis@sap.com
1979f8687f6086dc166fa09e25c94817cd1bcf04
53edf6b0f4262ee76bb4e3b943394cfeafe54865
/animated_tests/sine_wave.py
9b6aed48f367410b1b6083d3726e44fa4928ee59
[]
no_license
Yoshi2112/hybrid
f86265a2d35cb0a402ba6ab5f718717d8eeb740c
85f3051be9368bced41af7d73b4ede9c3e15ff16
refs/heads/master
2023-07-07T21:47:59.791167
2023-06-27T23:09:23
2023-06-27T23:09:23
82,878,960
0
1
null
2020-04-16T18:03:59
2017-02-23T03:14:49
Python
UTF-8
Python
false
false
1,487
py
# -*- coding: utf-8 -*- """ Created on Thu Feb 7 23:10:32 2019 @author: Yoshi """ """ Matplotlib Animation Example author: Jake Vanderplas email: vanderplas@astro.washington.edu website: http://jakevdp.github.com license: BSD Please feel free to use and modify this, but keep the above information. Thanks! """ import numpy as np from matplotlib import pyplot as plt from matplotlib import animation # First set up the figure, the axis, and the plot element we want to animate fig = plt.figure() ax = plt.axes(xlim=(0, 2), ylim=(-2, 2)) line, = ax.plot([], [], lw=2) # initialization function: plot the background of each frame def init(): line.set_data([], []) return line, # animation function. This is called sequentially def animate(i): x = np.linspace(0, 2, 1000) y = np.sin(2 * np.pi * (x - 0.01 * i)) line.set_data(x, y) return line, # call the animator. blit=True means only re-draw the parts that have changed. anim = animation.FuncAnimation(fig, animate, init_func=init, frames=200, interval=20, blit=True) # save the animation as an mp4. This requires ffmpeg or mencoder to be # installed. The extra_args ensure that the x264 codec is used, so that # the video can be embedded in html5. You may need to adjust this for # your system: for more information, see # http://matplotlib.sourceforge.net/api/animation_api.html #anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264']) plt.show()
[ "joshua.s.williams@uon.edu.au" ]
joshua.s.williams@uon.edu.au
405182fd5a3647c295d939623d3db62d878ff687
4e8ac215b672b333f19da87787c0d8768fee439e
/MIDI Remote Scripts/ableton/v2/control_surface/components/undo_redo.py
b00227651ef063ea6efa1784468fe529d1d3cedb
[ "MIT" ]
permissive
aarkwright/ableton_devices
593f47293c673aa56f6e0347ca6444b7fce2812a
fe5df3bbd64ccbc136bba722ba1e131a02969798
refs/heads/master
2020-07-02T08:11:21.137438
2019-08-09T13:48:06
2019-08-09T13:48:06
201,467,890
0
0
null
null
null
null
UTF-8
Python
false
false
903
py
# uncompyle6 version 3.3.5 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)] # Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\ableton\v2\control_surface\components\undo_redo.py # Compiled at: 2019-04-23 16:19:13 from __future__ import absolute_import, print_function, unicode_literals from .. import Component from ..control import ButtonControl class UndoRedoComponent(Component): undo_button = ButtonControl() redo_button = ButtonControl() @undo_button.pressed def undo_button(self, button): self._undo() @redo_button.pressed def redo_button(self, button): self._redo() def _redo(self): if self.song.can_redo: self.song.redo() def _undo(self): if self.song.can_undo: self.song.undo()
[ "apollo.arkwright@gmail.com" ]
apollo.arkwright@gmail.com
6982cf3dba3e276b311b1ddba1e3ea9aeb85fe86
760e1c14d056dd75958d367242c2a50e829ac4f0
/string/20_valid_parentheses.py
4eb8636be7ce1b2fe9b74d2985cb111214afc6a1
[]
no_license
lawtech0902/py_imooc_algorithm
8e85265b716f376ff1c53d0afd550470679224fb
74550d68cd3fd2cfcc92e1bf6579ac3b8f31aa75
refs/heads/master
2021-04-26T22:54:42.176596
2018-09-23T15:45:22
2018-09-23T15:45:22
123,894,744
0
0
null
null
null
null
UTF-8
Python
false
false
843
py
# _*_ coding: utf-8 _*_ """ 给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。 有效字符串需满足: 左括号必须用相同类型的右括号闭合。 左括号必须以正确的顺序闭合。 注意空字符串可被认为是有效字符串。 __author__ = 'lawtech' __date__ = '2018/4/28 下午3:11' """ class Solution: def isValid(self, s): """ :type s: str :rtype: bool """ stack = [] left = ['(', '[', '{'] right = [')', ']', '}'] for ch in s: if ch in left: stack.append(ch) else: if not stack: return False if left.index(stack.pop()) != right.index(ch): return False return not stack
[ "584563542@qq.com" ]
584563542@qq.com
b0d859ae3fd1591ce741d3883f653465621fb851
af9268e1ead8cdb491868c14a2240d9e44fb3b56
/last-minute-env/lib/python2.7/site-packages/django/views/decorators/http.py
a1fb98382d892ffad2f8bbdc655d574b650903d8
[]
no_license
frosqh/Cousinade2017
d5154c24c93ca8089eeba26b53c594e92cb6bd82
c34d5707af02402bf2bb7405eddc91297da399ff
refs/heads/master
2021-01-20T07:57:34.586476
2017-10-22T18:42:45
2017-10-22T18:42:45
90,074,802
1
0
null
null
null
null
UTF-8
Python
false
false
4,752
py
""" Decorators for views based on HTTP headers. """ import logging from calendar import timegm from functools import wraps from django.http import HttpResponseNotAllowed from django.middleware.http import ConditionalGetMiddleware from django.utils.cache import get_conditional_response from django.utils.decorators import available_attrs, decorator_from_middleware from django.utils.http import http_date, quote_etag conditional_page = decorator_from_middleware(ConditionalGetMiddleware) logger = logging.getLogger('django.request') def require_http_methods(request_method_list): """ Decorator to make a view only accept particular request methods. Usage:: @require_http_methods(["GET", "POST"]) def my_view(request): # I can assume now that only GET or POST requests make it this far # ... Note that request methods should be in uppercase. """ def decorator(func): @wraps(func, assigned=available_attrs(func)) def inner(request, *args, **kwargs): if request.method not in request_method_list: logger.warning( 'Method Not Allowed (%s): %s', request.method, request.path, extra={'status_code': 405, 'request': request} ) return HttpResponseNotAllowed(request_method_list) return func(request, *args, **kwargs) return inner return decorator require_GET = require_http_methods(["GET"]) require_GET.__doc__ = "Decorator to require that a view only accepts the GET method." require_POST = require_http_methods(["POST"]) require_POST.__doc__ = "Decorator to require that a view only accepts the POST method." require_safe = require_http_methods(["GET", "HEAD"]) require_safe.__doc__ = "Decorator to require that a view only accepts safe methods: GET and HEAD." def condition(etag_func=None, last_modified_func=None): """ Decorator to support conditional retrieval (or change) for a view function. The parameters are callables to compute the ETag and last modified time for the requested resource, respectively. The callables are passed the same parameters as the view itself. The ETag function should return a string (or None if the resource doesn't exist), while the last_modified function should return a datetime object (or None if the resource doesn't exist). The ETag function should return a complete ETag, including quotes (e.g. '"etag"'), since that's the only way to distinguish between weak and strong ETags. If an unquoted ETag is returned (e.g. 'etag'), it will be converted to a strong ETag by adding quotes. This decorator will either pass control to the wrapped view function or return an HTTP 304 response (unmodified) or 412 response (precondition failed), depending upon the request method. In either case, it will add the generated ETag and Last-Modified headers to the response if it doesn't already have them. """ def decorator(func): @wraps(func, assigned=available_attrs(func)) def inner(request, *args, **kwargs): # Compute values (if any) for the requested resource. def get_last_modified(): if last_modified_func: dt = last_modified_func(request, *args, **kwargs) if dt: return timegm(dt.utctimetuple()) # The value from etag_func() could be quoted or unquoted. res_etag = etag_func(request, *args, **kwargs) if etag_func else None res_etag = quote_etag(res_etag) if res_etag is not None else None res_last_modified = get_last_modified() response = get_conditional_response( request, etag=res_etag, last_modified=res_last_modified, ) if response is None: response = func(request, *args, **kwargs) # Set relevant headers on the response if they don't already exist. if res_last_modified and not response.has_header('Last-Modified'): response['Last-Modified'] = http_date(res_last_modified) if res_etag and not response.has_header('ETag'): response['ETag'] = res_etag return response return inner return decorator # Shortcut decorators for common cases based on ETag or Last-Modified only def etag(etag_func): return condition(etag_func=etag_func) def last_modified(last_modified_func): return condition(last_modified_func=last_modified_func)
[ "frosqh@gmail.com" ]
frosqh@gmail.com
a38b83a00e47e07950f6c79fb27eddfac0d2ae1e
5a61eb222fda029d2b0a8169d6508bf8b3222d57
/group_reputation_v2/one_learn_group_network_new/long_time_pgg_original_price_positive_hete.py
7bd98b75c6b236f97a34eb07cde340c2d903c430
[]
no_license
Dcomplexity/research
f7b5ed539ce63b16026bddad0d08b3d23c3aa2a8
7e487f765b7eee796464b6a1dc90baa5d3e5d5db
refs/heads/master
2022-04-16T19:02:38.634091
2020-04-13T02:31:28
2020-04-13T02:31:28
199,882,553
0
0
null
null
null
null
UTF-8
Python
false
false
6,282
py
import numpy as np import pandas as pd import random import math import matplotlib.pyplot as plt import networkx as nx import datetime def pgg_game(a_l, gamma): # r in (0, 1] a_n = len(a_l) p = np.array([np.sum(a_l) * gamma * a_n / a_n for _ in range(a_n)]) - np.array(a_l) return p def price_model(n, m, r): p = 1 / (r - 1) G = nx.Graph() t0 = 3 all_node = np.arange(n) node_array = [] for i in range(t0): for j in range(t0): if i != j: G.add_edge(i, j) node_array.append(j) for t in range(t0, n): to_link_list = [] m_flag = 0 while m_flag < m: if random.random() < p: to_link_node = np.random.choice(node_array) if to_link_node not in to_link_list and t != to_link_node: if (to_link_node, t) not in G.edges and (t, to_link_node) not in G.edges: G.add_edge(t, to_link_node) to_link_list.append(to_link_node) m_flag += 1 else: to_link_node = np.random.choice(t) if to_link_node not in to_link_list and t != to_link_node: if (to_link_node, t) not in G.edges and (t, to_link_node) not in G.edges: G.add_edge(t, to_link_node) to_link_list.append(to_link_node) m_flag += 1 node_array.extend(to_link_list) return G.to_undirected() def generate_price(n, m, r): g_network = price_model(n, m, r) adj_array = nx.to_numpy_array(g_network) adj_link = [] for i in range(adj_array.shape[0]): adj_link.append(np.where(adj_array[i] == 1)[0]) g_edge = nx.Graph() for i in range(len(adj_link)): for j in range(len(adj_link[i])): g_edge.add_edge(i, adj_link[i][j]) return np.array(adj_link), np.array(g_edge.edges()) class SocialStructure(): def __init__(self, g_s, g_n, t_n): self.g_s = g_s # group_size self.g_n = g_n # group_num self.t_n = t_n # total_num if self.t_n != self.g_s * self.g_n: print ("Error: The total num of individuals does not correspond to the social structure") def build_social_structure(self): ind_pos = [0 for x in range(self.t_n)] pos_ind = [[] for x in range(self.g_n)] for i in range(self.g_n): for j in range(i*self.g_s, (i+1)*self.g_s): ind_pos[j] = i pos_ind[i].append(j) return ind_pos, pos_ind def build_structure(g_s, g_n): t_n = g_s * g_n s_s = SocialStructure(g_s, g_n, t_n) ind_pos, pos_ind = s_s.build_social_structure() return ind_pos, pos_ind def initial_action(f_0, ind_pos, pos_ind): init_a = [] g_n = len(pos_ind) for pos in range(g_n): group_ind = pos_ind[pos] group_ind_n = len(group_ind) group_ind_a = np.random.choice([0, 1], group_ind_n, p = [1 - f_0[pos], f_0[pos]]) init_a.append(group_ind_a) return np.array(init_a) def game_one_round(a_l, gamma, ind_pos, pos_ind, g_s, w, mu, adj_link): ind_n = len(ind_pos) pos_n = len(pos_ind) a_l_old = np.copy(a_l) ind_a_l = a_l.flatten() ind_a_l_old = a_l_old.flatten() p_l = [] for pos in range(pos_n): g_a = np.copy(a_l[pos]) g_p = pgg_game(g_a, gamma) p_l.append(g_p) p_l = np.array(p_l) ind_p_l = p_l.flatten() for pos in range(pos_n): if random.random() < mu: g_ind = pos_ind[pos] ind = random.choice(g_ind) ind_a_l[ind] = 1 - ind_a_l_old[ind] else: g_ind = pos_ind[pos] ind = random.choice(g_ind) potential_pos = adj_link[pos] potential_pos = np.append(potential_pos, pos) while True: oppon_pos = random.choice(potential_pos) oppon_ind = pos_ind[oppon_pos] oppon = random.choice(oppon_ind) if oppon != ind: break ind_p = ind_p_l[ind] oppon_p = ind_p_l[oppon] t1 = 1 / (1 + math.e ** (2.0 * (ind_p - oppon_p))) # t1 = (1 / 2 + w / (2 * delta_pi) * (oppon_p - ind_p)) t2 = random.random() if t2 < t1: ind_a_l[ind] = ind_a_l_old[oppon] return ind_a_l.reshape((pos_n, int(ind_n / pos_n))) def run_game(f_0, init_time, run_time, gamma, ind_pos, pos_ind, g_s, w, mu): f_history = [] for round in range(init_time): a_l = initial_action(f_0, ind_pos, pos_ind) adj_link, edge = generate_price(g_n, 2, r_value) for step in range(run_time): if round == 0: f_history.append(a_l.mean(axis=1)) else: f_history[step] = round / (round + 1) * f_history[step] + 1 / (round + 1) * a_l.mean(axis=1) a_l = game_one_round(a_l, gamma, ind_pos, pos_ind, g_s, w, mu, adj_link) if round == 0: f_history.append(a_l.mean(axis=1)) else: f_history[run_time] = round / (round + 1) * f_history[run_time] + 1 / (round + 1) * a_l.mean(axis=1) return f_history if __name__ == '__main__': g_s = 5; g_n = 30; w = 1.0; run_time = 2000; init_time = 100 c = 1.0; mu = 0.01 # gamma = 0.5; r = gamma * g_s ind_pos, pos_ind = build_structure(g_s, g_n) gamma_l = np.round(np.arange(0.1, 1.51, 0.05), 2) step_l = np.arange(run_time + 1) for r_value in [2, 2.2, 2.5, 3, 5, 7, 10]: print(r_value) gamma_frac_history = [] for gamma in gamma_l: print(gamma) f_0 = [(g_n - 1 - _ + 0.001)/ g_n for _ in range(g_n)] history_sim_r = run_game(f_0, init_time, run_time, gamma, ind_pos, pos_ind, g_s, w, mu) gamma_frac_history.extend(history_sim_r) m_index = pd.MultiIndex.from_product([gamma_l, step_l], names=['gamma', 'step']) gamma_frac_history_pd = pd.DataFrame(gamma_frac_history, index=m_index) file_name = './results/long_time_pgg_original_price_positive_hete_%.1f.csv' % r_value gamma_frac_history_pd.to_csv(file_name) print(gamma_frac_history_pd)
[ "cdengcnc@sjtu.edu.cn" ]
cdengcnc@sjtu.edu.cn
ac98451edb913e03fa32c02e54b21bf44a487584
ec21d4397a1939ac140c22eca12491c258ed6a92
/Zope-2.9/bin/test.py
e92ddcc1fb7fd8e3d16f64fc27a88170c6935365
[]
no_license
wpjunior/proled
dc9120eaa6067821c983b67836026602bbb3a211
1c81471295a831b0970085c44e66172a63c3a2b0
refs/heads/master
2016-08-08T11:59:09.748402
2012-04-17T07:37:43
2012-04-17T07:37:43
3,573,786
0
0
null
null
null
null
UTF-8
Python
false
false
3,448
py
#!/var/interlegis/SAPL-2.3/Python-2.4/bin/python2.4 ############################################################################## # # Copyright (c) 2004 Zope Corporation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Zope 2 test script see zope.testing testrunner.txt $Id: test.py 33303 2005-07-13 22:28:33Z jim $ """ import os.path, sys # Remove script directory from path: scriptdir = os.path.realpath(os.path.dirname(sys.argv[0])) sys.path[:] = [p for p in sys.path if os.path.realpath(p) != scriptdir] shome = os.environ.get('SOFTWARE_HOME') zhome = os.environ.get('ZOPE_HOME') ihome = os.environ.get('INSTANCE_HOME') if zhome: zhome = os.path.realpath(zhome) if shome: shome = os.path.realpath(shome) else: shome = os.path.join(zhome, 'lib', 'python') elif shome: shome = os.path.realpath(shome) zhome = os.path.dirname(os.path.dirname(shome)) elif ihome: print >> sys.stderr, ''' If INSTANCE_HOME is set, then at least one of SOFTWARE_HOME or ZOPE_HOME must be set ''' else: # No zope home, derive it from script directory: # (test.py lives in either ZOPE_HOME or ZOPE_HOME/bin) parentdir, lastpart = os.path.split(scriptdir) if lastpart == 'bin': zhome = parentdir else: zhome = scriptdir shome = os.path.join(zhome, 'lib', 'python') sys.path.insert(0, shome) defaults = '--tests-pattern ^tests$ -v'.split() defaults += ['-m', '!^(' 'ZConfig' '|' 'BTrees' '|' 'persistent' '|' 'ThreadedAsync' '|' 'transaction' '|' 'ZEO' '|' 'ZODB' '|' 'ZopeUndo' '|' 'zdaemon' '|' 'zope[.]testing' '|' 'zope[.]app' ')[.]'] if ihome: ihome = os.path.abspath(ihome) defaults += ['--path', os.path.join(ihome, 'lib', 'python')] products = os.path.join(ihome, 'Products') if os.path.exists(products): defaults += ['--package-path', products, 'Products'] else: defaults += ['--test-path', shome] from zope.testing import testrunner def load_config_file(option, opt, config_file, *ignored): config_file = os.path.abspath(config_file) print "Parsing %s" % config_file import Zope2 Zope2.configure(config_file) testrunner.setup.add_option( '--config-file', action="callback", type="string", dest='config_file', callback=load_config_file, help="""\ Initialize Zope with the given configuration file. """) def filter_warnings(option, opt, *ignored): import warnings warnings.simplefilter('ignore', Warning, append=True) testrunner.other.add_option( '--nowarnings', action="callback", callback=filter_warnings, help="""\ Install a filter to suppress warnings emitted by code. """) sys.exit(testrunner.run(defaults))
[ "root@cpro5106.publiccloud.com.br" ]
root@cpro5106.publiccloud.com.br
168c90051272f897704b82df305c78fc89fbe16e
6b63f4fc5105f3190014e1dd5685a891a74f8c63
/0024_desafio.py
8cbac1cdce046f3a4fbfd895992c622db4b75a04
[]
no_license
matheuszei/Python_DesafiosCursoemvideo
a711c7c9c6db022cc8a16a3a1dc59afabb586105
5b216908dd0845ba25ee6d2e6f8b3e9419c074d2
refs/heads/main
2023-05-10T18:13:09.785651
2021-06-04T13:50:48
2021-06-04T13:50:48
370,851,791
0
0
null
null
null
null
UTF-8
Python
false
false
294
py
#Crie um programa que leia o nome de uma cidade e diga se ela começa ou não com o nome "SANTO" cidade = input('Digite o nome de uma cidade: ').strip().upper() print('SANTO' in cidade[0:5]) #cidade = input('Digite o nome de uma cidade: ').strip() #print(cidade[:5].upper() == 'SANTO)
[ "noreply@github.com" ]
matheuszei.noreply@github.com
13c0f8ebf1ba81fb4c8befc8cd426cbb0791be34
ce828d2c95b445ecf928f74626bac6397b4af202
/otter/assign/r_adapter/tests.py
54e09c829845e88e9a79d9cae513e36f9735c0c3
[ "BSD-3-Clause" ]
permissive
JinwooPark00/otter-grader
fe8df82ae5b325d6e620e42997a7e334ff19154e
1e037fa56e7833650980347fc8db64bd0a152e09
refs/heads/master
2023-06-13T02:38:01.583389
2021-06-20T20:50:34
2021-06-20T20:50:34
375,860,574
0
0
BSD-3-Clause
2021-06-22T03:44:07
2021-06-11T00:14:53
null
UTF-8
Python
false
false
6,008
py
""" ottr test adapters for Otter Assign """ import re import pprint import yaml import nbformat from collections import namedtuple from ..constants import TEST_REGEX, OTTR_TEST_NAME_REGEX, OTTR_TEST_FILE_TEMPLATE from ..tests import write_test from ..utils import get_source, lock Test = namedtuple('Test', ['name', 'hidden', 'body']) def read_test(cell, question, assignment, rmd=False): """ Returns the contents of a test as a ``(name, hidden, body)`` named tuple Args: cell (``nbformat.NotebookNode``): a test cell question (``dict``): question metadata assignment (``otter.assign.assignment.Assignment``): the assignment configurations rmd (``bool``, optional): whether the cell is from an Rmd file; if true, the first and last lines of ``cell``'s source are trimmed, since they should be backtick delimeters Returns: ``Test``: test named tuple """ if rmd: source = get_source(cell)[1:-1] else: source = get_source(cell) hidden = bool(re.search("hidden", source[0], flags=re.IGNORECASE)) lines = source[1:] assert sum("test_that(" in line for line in lines) == 1, \ f"Too many test_that calls in test cell (max 1 allowed):\n{cell}" test_name = None for line in lines: match = re.match(OTTR_TEST_NAME_REGEX, line) if match: test_name = match.group(1) break assert test_name is not None, f"Could not parse test name:\n{cell}" return Test(test_name, hidden, '\n'.join(lines)) def gen_test_cell(question, tests, tests_dict, assignment): """ Parses a list of test named tuples and creates a single test file. Adds this test file as a value to ``tests_dict`` with a key corresponding to the test's name, taken from ``question``. Returns a code cell that runs the check on this test. Args: question (``dict``): question metadata tests (``list`` of ``Test``): tests to be written tests_dict (``dict``): the tests for this assignment assignment (``otter.assign.assignment.Assignment``): the assignment configurations Returns: ``nbformat.NotebookNode``: code cell calling ``ottr::check`` on this test """ cell = nbformat.v4.new_code_cell() cell.source = ['. = ottr::check("tests/{}.R")'.format(question['name'])] points = question.get('points', len(tests)) if isinstance(points, int): if points % len(tests) == 0: points = [points // len(tests) for _ in range(len(tests))] else: points = [points / len(tests) for _ in range(len(tests))] assert isinstance(points, list) and len(points) == len(tests), \ f"Points for question {question['name']} could not be parsed:\n{points}" test = gen_suite(question['name'], tests, points) tests_dict[question['name']] = test lock(cell) return cell def gen_suite(name, tests, points): """ Generates an R-formatted test file for ottr Args: name (``str``): the test name tests (``list`` of ``Test``): the test case named tuples that define this test file points (``float`` or ``int`` or ``list`` of ``float`` or ``int``): th points per question Returns: ``str``: the rendered R test file """ metadata = {'name': name, 'cases': []} cases = metadata['cases'] for test, p in zip(tests, points): cases.append({ 'name': test.name, 'points': p, 'hidden': test.hidden }) metadata = yaml.dump(metadata) return OTTR_TEST_FILE_TEMPLATE.render( metadata = metadata, tests = tests ) def remove_hidden_tests_from_dir(nb, test_dir, assignment, use_files=True): """ Rewrites test files in a directory to remove hidden tests Args: test_dir (``pathlib.Path``): path to test files directory assignment (``otter.assign.assignment.Assignment``): the assignment configurations """ for f in test_dir.iterdir(): if f.suffix != '.R': continue with open(f) as f2: test = f2.read() metadata, in_metadata, start_lines, test_names = "", False, {}, [] metadata_start, metadata_end = -1, -1 lines = test.split("\n") for i, line in enumerate(lines): match = re.match(OTTR_TEST_NAME_REGEX, line) if line.strip() == "test_metadata = \"": in_metadata = True metadata_start = i elif in_metadata and line.strip() == "\"": in_metadata = False metadata_end = i elif in_metadata: metadata += line + "\n" elif match: test_name = match.group(1) test_names.append(test_name) start_lines[test_name] = i assert metadata and metadata_start != -1 and metadata_end != -1, \ f"Failed to parse test metadata in {f}" metadata = yaml.full_load(metadata) cases = metadata['cases'] lines_to_remove, cases_to_remove = [], [] for i, case in enumerate(cases): if case['hidden']: start_line = start_lines[case['name']] try: next_test = test_names[test_names.index(case['name']) + 1] end_line = start_lines[next_test] except IndexError: end_line = len(lines) lines_to_remove.extend(range(start_line, end_line)) cases_to_remove.append(i) metadata['cases'] = [c for i, c in enumerate(cases) if i not in set(cases_to_remove)] lines = [l for i, l in enumerate(lines) if i not in set(lines_to_remove)] lines[metadata_start:metadata_end + 1] = ["test_metadata = \""] + \ yaml.dump(metadata).split("\n") + ["\""] test = "\n".join(lines) write_test({}, f, test, use_file=True)
[ "cpyles@berkeley.edu" ]
cpyles@berkeley.edu
e4f3d2a23983b4dbfb4c02e219aab16dd2417d1a
7a4cfe0e9f6c87780c53ecc2431473ca71b9e841
/crypto-caesar/hack.py
c6bad2c7a10cf1f10414a0402aeb797e3dd59883
[]
no_license
chakra7/wechall
6c7985c17ee34411c24df7bac5147ec07f04ce98
b5c35cc4fc5ee9014a83c190e53cbf7b4e07efbe
refs/heads/master
2021-01-12T17:30:42.404953
2016-10-21T18:25:39
2016-10-21T18:25:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
allchars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def decrypto(encs, shift): decs = '' for char in encs: if char == ' ': decs += char else : # 65 + (k-65) % 26 decs += str(unichr(65 + (ord(char) + shift - 65)%26)) return decs for i in range(1, 26): d = decrypto('ESP BFTNV MCZHY QZI UFXAD ZGPC ESP WLKJ OZR ZQ NLPDLC LYO JZFC FYTBFP DZWFETZY TD SZPLDLPAXTDO', i) print(d)
[ "root@localhost.localdomain" ]
root@localhost.localdomain
6f1db04426b6bc0855a1be58dab102ebc1ffd1f3
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_200/5429.py
e440769130e46909840dd9ff35500848ec3a3e50
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
577
py
def isTydy(a) : b=1 while a>100 and b==1 : if a%10==0 : return(0) elif a%10>=(a//10)%10 : a=a//10 else : b=0 if b==1 : if a%10==0 : return(0) elif a%10>=(a//10)%10 : return(1) else : return(0) else : return(0) fichier=open("B-small-attempt9.in","r") output=open("output","w") ligne =(int)(fichier.readline()) cnt=0 while cnt<ligne : cnt=cnt+1 nligne=(int)(fichier.readline()) while(isTydy(nligne)==0) : nligne=nligne-1 output.write("Case #") output.write(str(cnt)) output.write(": ") output.write(str(nligne)) output.write("\n")
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
8d92d0a2a04fcd4de1fe0744867226ff24e482df
bfbf65193a6aaa5527f8a58b01bdabfdb87cc924
/fix-lint.py
9e8fe0a109126ce5b82ea50899380d4ce75e7924
[]
no_license
jaredly/file-scripts
924f8a5a66178273244152e27befebfcee90fd6a
038ed7a6db1a2eef7a868b63f5d19ec9ea568314
refs/heads/master
2020-05-16T23:08:24.869806
2015-08-14T23:02:35
2015-08-14T23:02:35
40,739,318
0
0
null
null
null
null
UTF-8
Python
false
false
864
py
#!/usr/bin/env python import sys import re rx = re.compile('(?P<line>\d+):(?P<col>\d+)\s+(?P<level>\w+).+?(?P<type>[-\w]+)$') def parse_rule(line): return rx.match(line.strip()).groupdict() text = open(sys.argv[1]).read() files = {} block = {} for line in text.split('\n'): if not line.strip(): continue if ' problems ' in line: continue if line[0] != ' ': block = [] files[line.strip()] = block else: block.append(parse_rule(line.strip())) print block for fname in files: lines = open(fname).read().split('\n') dirty = False for item in files[fname]: if item['type'] != 'semi': continue dirty = True lno = int(item['line']) - 1 col = int(item['col']) line = lines[lno] lines[lno] = line[:col] + ';' + line[col:] if dirty: open(fname, 'w').write('\n'.join(lines)) # vim: et sw=4 sts=4
[ "jared@jaredforsyth.com" ]
jared@jaredforsyth.com
552260db3609384a5cbb9c1629cffe94180ee301
df019e648d75d37a05f3219d27fffad4147fbf3c
/stockprice/stock/views.py
6d413d5bdbf60847297ccc4c48d97675c1cf969e
[]
no_license
AmrElsayedEG/stock-market-price-USA
8b6fcccc02fb14dff4ce31af403b2f87a409855d
cdceb7e6e774972bf790f61ba75b1d4ee2cb205e
refs/heads/master
2023-01-31T13:19:01.652792
2020-12-15T09:59:48
2020-12-15T09:59:48
304,368,158
0
0
null
null
null
null
UTF-8
Python
false
false
4,298
py
from django.shortcuts import render, redirect, HttpResponse from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated import json from . import stockapi from .models import DailyPrice from django.db import IntegrityError api_key = 'SMRUPFC8PX39RTRT' #API Key from django.http import JsonResponse def home(request): s_l = stockapi.get_symbol() #Get all symbols list wrong_sym = False #Default Value to know if the user searched for wrong symbol if request.GET: #If we have search request symbol = request.GET.get("symbol","") #Get value from GET try: #Try to get the prices for that symbol price = stockapi.get_price_list_daily(symbol,api_key) price.reverse() #Revese so we can view chart from older to newer data except: #If the symbol is wrong wrong_sym = True #Wrong Symbol and show pop-up price = None else: price = None symbol = None context = { 's_l': s_l, 'price':price, 'symbol':symbol, 'lenstock':len(s_l), 'wrong_sym':wrong_sym } return render(request,'index.html',context) #AJAX Request View def home_ajax(request): if request.method == 'POST' and request.is_ajax(): symbol = request.POST.get("symbol","") try: #Try to get the prices for that symbol price = stockapi.get_price_list_daily(symbol,api_key) price.reverse() #Revese so we can view chart from older to newer data price = json.dumps(price) print("Done") except: #If the symbol is wrong wrong_sym = True #Wrong Symbol and show pop-up price = 'None' print("Fail") ctx = {'symbol':symbol,'price':price} return HttpResponse(json.dumps(ctx), content_type='application/json') s_l = stockapi.get_symbol() #Get all symbols list wrong_sym = False #Default Value to know if the user searched for wrong symbol price = None symbol = None context = { 's_l': s_l, 'price':price, 'symbol':symbol, 'lenstock':len(s_l), 'wrong_sym':wrong_sym } return render(request,'index-ajax.html',context) ############################### #Rest API from rest_framework import generics from .serializers import DailyPriceSerializer from .models import DailyPrice class TokenAuthSupportQueryString(TokenAuthentication): #Override TokenAuthentication to have token inside url not in headers def authenticate(self, request): # Check if 'token_auth' is in the request query params. # Give precedence to 'Authorization' header. if 'auth_token' in request.query_params and \ 'HTTP_AUTHORIZATION' not in request.META: return self.authenticate_credentials(request.query_params.get('auth_token')) else: return super(TokenAuthSupportQueryString, self).authenticate(request) class symbolData(generics.ListAPIView): queryset = DailyPrice.objects.all() #Gett All objects serializer_class = DailyPriceSerializer #add serializer def get_queryset(self, *args, **kwargs): #Get Symbol from url try: #Check if we have the latest prices saved on our db or not price = stockapi.get_price_list_daily(self.kwargs.get('symbol'), api_key) #Get last 100 prices for symbol if price: #If symbol Valied for i in price: try: DailyPrice.objects.get(symbol=self.kwargs.get('symbol'), date=i[0]) #Check whether we have the last date in our db as the last date in the fresh data from api except: #If we don't have refreshed data add them to our db new = DailyPrice(symbol=self.kwargs.get('symbol'), date=i[0], open=i[2], high=i[4], low=i[1], close=i[3]) new.save() #Save except: #If the symbol is wrong pass and print blank list pass res = self.queryset.filter(symbol=self.kwargs.get('symbol')) #Get all stock price for particular symbol return res #return data to api authentication_classes = [TokenAuthSupportQueryString] permission_classes = [IsAuthenticated]
[ "newsifb@gmail.com" ]
newsifb@gmail.com
a7d0319984b9bfb560fa0fa0df10e5c9b81149c6
553dce745ab6e97c2588ddec3b1aec1cd0266907
/Crawler/tools/adjust_tags.py
3dc3bbf6d90699de629ee88335e43fc4bfb2d73f
[]
no_license
foamliu/hackathon-ocw
ee8879882534a32a3ee7eed2767ae281b77ff029
2ec8e045f68ca3cac1333a7b0a045416beba01d9
refs/heads/master
2021-01-21T04:40:37.845867
2016-07-18T05:24:33
2016-07-18T05:24:33
52,413,648
8
11
null
2016-07-18T05:24:36
2016-02-24T04:14:08
Python
UTF-8
Python
false
false
1,453
py
#批量调整标签 import json import codecs input_file = open(r'C:\Users\foamliu.FAREAST\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\items.json', "r", encoding="utf-8") #output_file = codecs.open(r'C:\Users\foamliu.FAREAST\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\output.json', "w", encoding="utf-8") items = json.load(input_file, encoding='utf-8') count = 0 for item in items: #item['tags'] = item['tags'].replace('计算机', '互联网') #item['tags'] = item['tags'].replace('国际名校公开课', '').strip() #item['tags'] = item['tags'].replace('中国大学视频公开课 国内', '').strip() #item['tags'] = item['tags'].replace('可汗学院', '').strip() #item['tags'] = item['tags'].replace('大气', '地球科学').strip() #item['tags'] = item['tags'].replace('InfoQ', '').strip() #item['tags'] = item['tags'].replace('赏课', '').strip() #item['tags'] = item['tags'].replace(' ', ' ').strip() if '国内' in item['tags']: print(item['item_id']) print(item['title']) count += 1 print (count) #技能 地球科学 建筑 医学 社会 生物 物理 教育 艺术 建筑 历史 机器人学 教育 经济 天文 法律 演讲 教育 #json.dump(items ,output_file, indent=4,ensure_ascii=False,sort_keys=True) #http://imgsize.ph.126.net/?enlarge=true&amp;imgurl=http://img4.cache.netease.com/video/2013/9/4/20130904155017c21a3.jpg_280x158x1x95.jpg
[ "foamliu@yeah.net" ]
foamliu@yeah.net
548bc3fd3789a2b19f445b59af724f7360dcddd8
8ca88f155c3fa4bc4838ffbb3bcfdc0f79d5b923
/ymage/helpers.py
2c0bb4b7a8f2db87686772a652278401a411746c
[ "MIT" ]
permissive
cuchac/ymage
b9620da69c54fd7481c2eed236df5aae9c302881
025be067acbe002d9443c485c24be4cbacf3ec95
refs/heads/master
2021-01-15T23:28:19.709296
2011-10-28T10:38:25
2011-10-28T10:38:25
2,650,282
0
0
null
null
null
null
UTF-8
Python
false
false
1,903
py
# Copyright (c) 2011 Bogdan Popa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os from pyglet import clock def reschedule(callback, interval, *args, **kwargs): clock.unschedule(callback) clock.schedule_interval(callback, interval, *args, **kwargs) def reschedule_once(callback, interval, *args, **kwargs): clock.unschedule(callback) clock.schedule_once(callback, interval, *args, **kwargs) def valid_type(file_): types = ("bmp", "jpg", "jpeg", "png") for type_ in types: if file_.lower().endswith(type_): return True return False def get_files(path): for root, dirs, files in os.walk(path): for file_ in files: if valid_type(file_): yield os.path.join(root, file_) def get_paths(path): paths = [] for file_ in get_files(path): paths.append(file_) return sorted(paths)
[ "popa.bogdanp@gmail.com" ]
popa.bogdanp@gmail.com
0af2cab361db209fe3c3b299f67b3750fcb8ade9
6ef3fc3ffa5f33e6403cb7cb0c30a35623a52d0d
/samples/generated_samples/vision_v1_generated_product_search_get_product_async.py
347cd239d8975ec76e5bf13895cab884a24f35b2
[ "Apache-2.0" ]
permissive
vam-google/python-vision
61405506e3992ab89e6a454e4dda9b05fe2571f2
09e969fa30514d8a6bb95b576c1a2ae2c1e11d54
refs/heads/master
2022-08-15T08:40:35.999002
2022-07-18T16:04:35
2022-07-18T16:04:35
254,789,106
0
0
Apache-2.0
2020-04-11T03:59:02
2020-04-11T03:59:01
null
UTF-8
Python
false
false
1,435
py
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for GetProduct # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-vision # [START vision_v1_generated_ProductSearch_GetProduct_async] from google.cloud import vision_v1 async def sample_get_product(): # Create a client client = vision_v1.ProductSearchAsyncClient() # Initialize request argument(s) request = vision_v1.GetProductRequest( name="name_value", ) # Make the request response = await client.get_product(request=request) # Handle the response print(response) # [END vision_v1_generated_ProductSearch_GetProduct_async]
[ "noreply@github.com" ]
vam-google.noreply@github.com
4c73136669dd6d941e998a444deea0ce5e842594
c934f728ee4ef42eaa276ae30769ad9e7c51c285
/pyhpecw7/utils/templates/cli.py
eb52caad3c82da0e1a3842346f7d5f85b67a3a58
[ "Apache-2.0" ]
permissive
frepkovsky/pyhpecw7
c43ccd78cef952b723dbd641818d82d760aef685
4f0d1dd696db9ef1620832f460ef3983fde5ec0c
refs/heads/master
2021-06-26T05:59:48.081377
2020-12-10T15:17:25
2020-12-10T15:17:25
188,609,179
0
0
NOASSERTION
2019-05-25T20:25:16
2019-05-25T20:25:15
null
UTF-8
Python
false
false
1,383
py
#!/usr/bin/env python # Copyright 2015 Jason Edelman <jedelman8@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import textfsm def get_structured_data(template, rawtxt): """Returns structured data given raw text using TextFSM templates """ path = os.path.dirname(os.path.abspath(__file__)) \ + '/textfsm_temps/' + template fsm = textfsm.TextFSM(open(path)) # an object is what is being extracted # based on the template, it may be one objecst or multiple # as is the case with neighbors, interfaces, etc. objects = fsm.ParseText(rawtxt) structured_data = [] for each in objects: index = 0 temp = {} for template_value in each: temp[fsm.header[index].lower()] = str(template_value) index += 1 structured_data.append(temp) return structured_data
[ "chris_young@me.com" ]
chris_young@me.com
3bec5e02db0fed7c25830eaa2c018eb686a47f52
491f9ca49bbb275c99248134c604da9fb43ee9fe
/MD_analysis/gatherdata_diffusion_D_vs_Ktheta.py
f3a1bea862b4e2f4a07913147a98f5f403728180
[]
no_license
KineOdegardHanssen/PhD-subprojects
9ef0facf7da4b2a80b4bea9c890aa04f0ddcfd1a
c275539689b53b94cbb85c0fdb3cea5885fc40e9
refs/heads/Windows
2023-06-08T13:32:15.179813
2023-06-05T08:40:10
2023-06-05T08:40:10
195,783,664
2
0
null
2020-08-18T14:42:21
2019-07-08T09:49:14
Python
UTF-8
Python
false
false
6,515
py
from skimage import morphology, measure, io, util # For performing morphology operations (should maybe import more from skimage...) from mpl_toolkits.mplot3d import Axes3D # Plotting in 3D import matplotlib.pyplot as plt # To plot from scipy.optimize import curve_fit from pylab import * from scipy.ndimage import measurements, convolve # Should have used the command below, but changing now will lead to confusion from scipy import ndimage # For Euclidean distance measurement import numpy as np import random import math import time import os import glob def rmsd(x,y): Nx = len(x) Ny = len(y) if Nx!=Ny: print('WARNING! Nx!=Ny. Could not calculate rmsd value') return 'WARNING! Nx!=Ny. Could not calculate rmsd value' delta = 0 for i in range(Nx): delta += (x[i]-y[i])*(x[i]-y[i]) delta = np.sqrt(delta/(Nx-1)) return delta # Input parameters for file selection: # I will probably add more, but I want to make sure the program is running first psigma = 1 # For instance spacing = 3 Kthetas = [0.1,1,14,100,1000] pmass = 1 # I don't use this anymore, but it got stuck in the file names. Have constant mass density now. damp = 10 N = len(Kthetas) # Input booleans for file selection: bulkdiffusion = False substrate = False confignrs = np.arange(1,1001) # Ds DRs = np.zeros(N) Dxs = np.zeros(N) Dys = np.zeros(N) Dzs = np.zeros(N) Dparallel = np.zeros(N) # Ds, stdv DRs_stdv = np.zeros(N) Dxs_stdv = np.zeros(N) Dys_stdv = np.zeros(N) Dzs_stdv = np.zeros(N) Dparallel_stdv = np.zeros(N) # bs bRs = np.zeros(N) bxs = np.zeros(N) bys = np.zeros(N) bzs = np.zeros(N) bparallel = np.zeros(N) # bs, stdv bRs_stdv = np.zeros(N) bxs_stdv = np.zeros(N) bys_stdv = np.zeros(N) bzs_stdv = np.zeros(N) bparallel_stdv = np.zeros(N) if bulkdiffusion==True: parentfolder = 'Pure_bulk/' filestext = '_seed'+str(confignrs[0])+'to'+str(confignrs[-1]) systemtype = 'bulk' if substrate==True: parentfolder = 'Bulk_substrate/' systemtype = 'substrate' else: parentfolder = 'Brush/' systemtype = 'brush' filestext = '_config'+str(confignrs[0])+'to'+str(confignrs[-1]) endlocation_out = 'C:/Users/Kine/Documents/Projects_PhD/P2_PolymerMD/Planar_brush/Diffusion_bead_near_grid/D_vs_Ktheta/Spacing'+str(spacing)+'/Sigma_bead_' +str(psigma) + '/' outfilename = endlocation_out+'D_vs_Ktheta.txt' plotname = endlocation_out+'D_vs_Ktheta.png' plotname_fit = endlocation_out+'D_vs_Ktheta_fit.png' logplotname = endlocation_out + 'D_vs_Ktheta_log.png' indfilename = endlocation_out+'D_vs_Ktheta_fitindices.txt' outfile = open(outfilename, 'w') outfile.write('Ktheta D_R2 sigmaD_R2 b_R2 sigmab_R2; D_z2 sigmaD_z2 b_z2 sigmaD_z2; D_par2 sigmaD_par2 b_par2 sigmab_par2\n') indexfile = open(indfilename, 'w') indexfile.write('Start_index_R end_index_R Start_index_ort end_index_ort Start_index_par end_index_par\n') for i in range(N): Ktheta = Kthetas[i] endlocation_in = 'C:/Users/Kine/Documents/Projects_PhD/P2_PolymerMD/Planar_brush/Diffusion_bead_near_grid/Spacing'+str(spacing)+'/damp%i_diffseedLgv/' % damp +parentfolder+ 'Sigma_bead_' +str(psigma) + '/VaryKtheta/Ktheta'+str(Ktheta)+'/' infilename = endlocation_in+'diffusion'+filestext+'.txt' metaname = endlocation_in+'diffusion_metadata'+filestext+'.txt' #print('infilename_all:',infilename_all) # 0 1 2 3 4 5 6 7 8 9 10 11 #D_R2 sigmaD_R2 b_R2 sigmab_R2; D_z2 sigmaD_z2 b_z2 sigmaD_z2; D_par2 sigmaD_par2 b_par2 sigmab_par2 # Read in: #### Automatic part ## Find the extent of the polymers: Max z-coord of beads in the chains infile = open(infilename, "r") lines = infile.readlines() # This takes some time # Getting the number of lines, etc. line = lines[1] words = line.split() # Ds DRs[i] = float(words[0]) Dzs[i] = float(words[4]) Dparallel[i] = float(words[8]) # Ds, stdv DRs_stdv[i] = float(words[1]) Dzs_stdv[i] = float(words[5]) Dparallel_stdv[i] = float(words[9]) # bs bRs[i] = float(words[2]) bzs[i] = float(words[6]) bparallel[i] = float(words[10]) # bs, stdv bRs_stdv[i] = float(words[3]) bzs_stdv[i] = float(words[7]) bparallel_stdv[i] = float(words[11]) infile.close() outfile.write('%.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e \n' % (Ktheta, DRs[i], DRs_stdv[i], bRs[i], bRs_stdv[i], Dzs[i], Dzs_stdv[i], bzs[i], bzs_stdv[i], Dparallel[i], Dparallel_stdv[i], bparallel[i], bparallel[i])) metafile = open(metaname, 'r') mlines = metafile.readlines() startindex_R = int(mlines[0].split()[1]) endindex_R = int(mlines[1].split()[1]) startindex_ort = int(mlines[2].split()[1]) endindex_ort = int(mlines[3].split()[1]) startindex_par = int(mlines[4].split()[1]) endindex_par = int(mlines[5].split()[1]) metafile.close() indexfile.write('%i %i %i %i %i %i\n' % (startindex_R, endindex_R, startindex_ort, endindex_ort, startindex_par, endindex_par)) outfile.close() plt.figure(figsize=(6,5)) plt.errorbar(Kthetas, DRs, yerr=DRs_stdv, capsize=2, label=r'$D_R$') plt.errorbar(Kthetas, Dzs, yerr=Dzs_stdv, capsize=2, label=r'$D_\perp$') plt.errorbar(Kthetas, Dparallel, yerr=Dparallel_stdv, capsize=2, label=r'$D_\parallel$') plt.xlabel(r'$K_\theta$') plt.ylabel(r'Diffusion constant $D$') plt.title('Diffusion constant $D$, d = %i nm, dynamic %s' % (spacing, systemtype)) plt.tight_layout() plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.legend(loc='upper left') plt.savefig(plotname) fig = plt.figure(figsize=(6,5)) ax = fig.add_subplot(1,1,1) plt.errorbar(Kthetas, DRs, yerr=DRs_stdv, capsize=2, label=r'$D_R$') plt.errorbar(Kthetas, Dzs, yerr=Dzs_stdv, capsize=2, label=r'$D_\perp$') plt.errorbar(Kthetas, Dparallel, yerr=Dparallel_stdv, capsize=2, label=r'$D_\parallel$') ax.set_xscale('log') plt.xlabel(r'$K_\theta$') plt.ylabel(r'Diffusion constant $D$') plt.title('Diffusion constant $D$, d = %i nm, dynamic %s' % (spacing, systemtype)) plt.tight_layout() plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.legend(loc='upper left') plt.savefig(logplotname)
[ "noreply@github.com" ]
KineOdegardHanssen.noreply@github.com
760fe60455ccfa4e6c8013edef2dbcc1d00d6bb3
62e58c051128baef9452e7e0eb0b5a83367add26
/x12/4020/858004020.py
82bf10a750b1dab5753ae2fbdef20cf4efa23175
[]
no_license
dougvanhorn/bots-grammars
2eb6c0a6b5231c14a6faf194b932aa614809076c
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
refs/heads/master
2021-05-16T12:55:58.022904
2019-05-17T15:22:23
2019-05-17T15:22:23
105,274,633
0
0
null
2017-09-29T13:21:21
2017-09-29T13:21:21
null
UTF-8
Python
false
false
4,886
py
from bots.botsconfig import * from records004020 import recorddefs syntax = { 'version' : '00403', #version of ISA to send 'functionalgroup' : 'SI', } structure = [ {ID: 'ST', MIN: 1, MAX: 1, LEVEL: [ {ID: 'ZC1', MIN: 0, MAX: 1}, {ID: 'BX', MIN: 1, MAX: 1}, {ID: 'BNX', MIN: 0, MAX: 1}, {ID: 'M3', MIN: 0, MAX: 1}, {ID: 'N9', MIN: 0, MAX: 30}, {ID: 'CM', MIN: 0, MAX: 3}, {ID: 'Y6', MIN: 0, MAX: 4}, {ID: 'Y7', MIN: 0, MAX: 1}, {ID: 'C3', MIN: 0, MAX: 1}, {ID: 'ITD', MIN: 0, MAX: 1}, {ID: 'G62', MIN: 0, MAX: 10}, {ID: 'PER', MIN: 0, MAX: 3}, {ID: 'NA', MIN: 0, MAX: 999}, {ID: 'F9', MIN: 0, MAX: 1}, {ID: 'D9', MIN: 0, MAX: 1}, {ID: 'R1', MIN: 0, MAX: 1}, {ID: 'R2', MIN: 0, MAX: 13}, {ID: 'R3', MIN: 0, MAX: 13}, {ID: 'R4', MIN: 0, MAX: 5}, {ID: 'MEA', MIN: 0, MAX: 10}, {ID: 'H3', MIN: 0, MAX: 20}, {ID: 'PS', MIN: 0, MAX: 5}, {ID: 'H6', MIN: 0, MAX: 6}, {ID: 'V4', MIN: 0, MAX: 1}, {ID: 'V5', MIN: 0, MAX: 1}, {ID: 'E1', MIN: 0, MAX: 2, LEVEL: [ {ID: 'E4', MIN: 0, MAX: 1}, {ID: 'E5', MIN: 0, MAX: 13}, {ID: 'PI', MIN: 0, MAX: 1}, ]}, {ID: 'M1', MIN: 0, MAX: 1}, {ID: 'M2', MIN: 0, MAX: 1}, {ID: 'L7', MIN: 0, MAX: 30}, {ID: 'NTE', MIN: 0, MAX: 50}, {ID: 'XH', MIN: 0, MAX: 1}, {ID: 'N7', MIN: 0, MAX: 600, LEVEL: [ {ID: 'EM', MIN: 0, MAX: 1}, {ID: 'NA', MIN: 0, MAX: 30}, {ID: 'M7', MIN: 0, MAX: 5}, {ID: 'N5', MIN: 0, MAX: 1}, {ID: 'G62', MIN: 0, MAX: 2}, {ID: 'REF', MIN: 0, MAX: 5}, {ID: 'IC', MIN: 0, MAX: 1}, {ID: 'VC', MIN: 0, MAX: 21}, {ID: 'GA', MIN: 0, MAX: 15}, {ID: 'E1', MIN: 0, MAX: 2, LEVEL: [ {ID: 'E4', MIN: 0, MAX: 1}, {ID: 'E5', MIN: 0, MAX: 13}, {ID: 'PI', MIN: 0, MAX: 1}, ]}, ]}, {ID: 'N1', MIN: 0, MAX: 12, LEVEL: [ {ID: 'N2', MIN: 0, MAX: 2}, {ID: 'N3', MIN: 0, MAX: 2}, {ID: 'N4', MIN: 0, MAX: 1}, {ID: 'REF', MIN: 0, MAX: 12}, {ID: 'PER', MIN: 0, MAX: 3}, {ID: 'H3', MIN: 0, MAX: 5}, ]}, {ID: 'S5', MIN: 0, MAX: 999, LEVEL: [ {ID: 'G62', MIN: 0, MAX: 6}, {ID: 'N9', MIN: 0, MAX: 10}, {ID: 'H6', MIN: 0, MAX: 6}, {ID: 'N1', MIN: 0, MAX: 5, LEVEL: [ {ID: 'N2', MIN: 0, MAX: 2}, {ID: 'N3', MIN: 0, MAX: 2}, {ID: 'N4', MIN: 0, MAX: 1}, {ID: 'REF', MIN: 0, MAX: 12}, {ID: 'PER', MIN: 0, MAX: 3}, ]}, ]}, {ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [ {ID: 'FA2', MIN: 1, MAX: 99999}, {ID: 'L10', MIN: 0, MAX: 1}, ]}, {ID: 'HL', MIN: 1, MAX: 99999, LEVEL: [ {ID: 'N7', MIN: 0, MAX: 1}, {ID: 'NA', MIN: 0, MAX: 1}, {ID: 'M7', MIN: 0, MAX: 5}, {ID: 'N5', MIN: 0, MAX: 1}, {ID: 'REF', MIN: 0, MAX: 5}, {ID: 'IC', MIN: 0, MAX: 1}, {ID: 'VC', MIN: 0, MAX: 24}, {ID: 'L7', MIN: 0, MAX: 10}, {ID: 'SL1', MIN: 0, MAX: 1}, {ID: 'N9', MIN: 0, MAX: 10}, {ID: 'H3', MIN: 0, MAX: 1}, {ID: 'X1', MIN: 0, MAX: 6}, {ID: 'X2', MIN: 0, MAX: 1}, {ID: 'L5', MIN: 0, MAX: 10}, {ID: 'PER', MIN: 0, MAX: 5}, {ID: 'LH2', MIN: 0, MAX: 6}, {ID: 'LHR', MIN: 0, MAX: 1}, {ID: 'LH6', MIN: 0, MAX: 5}, {ID: 'Y7', MIN: 0, MAX: 2}, {ID: 'G62', MIN: 0, MAX: 10}, {ID: 'NTE', MIN: 0, MAX: 100}, {ID: 'LP', MIN: 0, MAX: 1}, {ID: 'AXL', MIN: 0, MAX: 12}, {ID: 'L0', MIN: 0, MAX: 20, LEVEL: [ {ID: 'L1', MIN: 0, MAX: 20}, {ID: 'MEA', MIN: 0, MAX: 10}, ]}, {ID: 'LH1', MIN: 0, MAX: 100, LEVEL: [ {ID: 'LH2', MIN: 0, MAX: 4}, {ID: 'LH3', MIN: 0, MAX: 12}, {ID: 'LFH', MIN: 0, MAX: 20}, {ID: 'LEP', MIN: 0, MAX: 3}, {ID: 'LH4', MIN: 0, MAX: 1}, {ID: 'LHT', MIN: 0, MAX: 3}, {ID: 'LHR', MIN: 0, MAX: 10}, {ID: 'PER', MIN: 0, MAX: 5}, {ID: 'LHE', MIN: 0, MAX: 1}, ]}, {ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [ {ID: 'FA2', MIN: 1, MAX: 99999}, {ID: 'L10', MIN: 0, MAX: 1}, ]}, {ID: 'NM1', MIN: 0, MAX: 4, LEVEL: [ {ID: 'N3', MIN: 0, MAX: 2}, {ID: 'N4', MIN: 0, MAX: 1}, ]}, {ID: 'N1', MIN: 0, MAX: 4, LEVEL: [ {ID: 'N2', MIN: 0, MAX: 2}, {ID: 'N3', MIN: 0, MAX: 2}, {ID: 'N4', MIN: 0, MAX: 1}, ]}, ]}, {ID: 'L3', MIN: 0, MAX: 1}, {ID: 'SE', MIN: 1, MAX: 1}, ]} ]
[ "jason.capriotti@gmail.com" ]
jason.capriotti@gmail.com
d3e9bc7a33216b4305b5ef54b7857edaf7042ddf
c2bf671e929b33bb74362559f5afb3e765662382
/colossus/apps/clientapp/forms.py
9d594ab69e9939a368fa67fd83eda1ff61eb1f8a
[ "MIT" ]
permissive
EnockOMONDI/jihus-dev
b7bf0e880dcaa6664b42eb9d942e3addb3b10b8e
a63ea595750748daf56a4fd5e0e9d53a7cfa3cd4
refs/heads/main
2022-12-31T21:45:48.417080
2020-10-22T17:05:33
2020-10-22T17:05:33
306,346,324
0
0
null
null
null
null
UTF-8
Python
false
false
213
py
from django import forms class SendMessageForm(forms.Form): title= forms.CharField(max_length=100) message = forms.CharField(widget=forms.Textarea) recepients = forms.CharField(widget=forms.Textarea)
[ "enockomondike@gmail.com" ]
enockomondike@gmail.com
ac89c6b2a7ab4aa411c8a6ba2e5e23ec635bb727
2251d71bc3ecb589ce1a8b274a08370c3240bf51
/0083 Remove Duplicates from Sorted List.py
dac2eed98424509c281b1b681789beb06ad5f2f3
[]
no_license
YuanyuanQiu/LeetCode
3495a3878edc2028f134bddb5b9ec963069562cb
6f5d0ef6a353713c0b41fa7ec0fb8c43a7e8dc55
refs/heads/master
2022-12-11T04:04:01.686226
2022-12-06T18:42:14
2022-12-06T18:42:14
231,168,173
0
0
null
null
null
null
UTF-8
Python
false
false
735
py
# Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None node1 = ListNode(1) node2 = ListNode(1) node3 = ListNode(2) node1.next = node2 node2.next = node3 # def printList(node): # while node: # print(node.val) # node = node.next # printList(node1) head = node1 def deleteDuplicates(head): node = head # 不为空链表和最后一个node while node and node.next: if node.val == node.next.val: node.next = node.next.next else: node = node.next # head是头节点,位置没变,node做了引用,用node做了链表的操作 return head print(deleteDuplicates(head))
[ "50243732+YuanyuanQiu@users.noreply.github.com" ]
50243732+YuanyuanQiu@users.noreply.github.com
e07f19ebcb868a53956f4158c34c0e154545cca5
9b96c37db1f61065094d42bc5c8ad6eb3925961b
/level2/target_number.py
bb5973bc44e3a2e092e766a33bd4c3b539f9a973
[]
no_license
Taeheon-Lee/Programmers
a97589498c866c498c1aa9192fdf8eec9f8e31f4
c38b1c7dc4114c99191b77e5d19af432eaf6177e
refs/heads/master
2023-07-09T21:10:25.064947
2021-08-30T05:17:49
2021-08-30T05:17:49
394,327,802
0
1
null
null
null
null
UTF-8
Python
false
false
901
py
"타겟 넘버" # 문제 링크 "https://programmers.co.kr/learn/courses/30/lessons/43165" def count(numbers, target, tmp, total): if tmp == len(numbers): # 깊이 우선 탐색 사용, 마지막까지 이동했을 때, if total == target: # 결과 값이 타겟 넘버와 동일할 경우, 1개를 추가하기 위해 1 리턴 return 1 return 0 answer = 0 # 타겟 넘버와 매칭되는 개수 초기화 answer += count(numbers, target, tmp+1, total+numbers[tmp]) # 매칭되는 것을 더해줌 (더하기 탐색) answer += count(numbers, target, tmp+1, total-numbers[tmp]) # 매칭되는 것을 더해줌 (빼기 탐색) return answer def solution(numbers, target): answer = count(numbers, target, 0, 0) return answer
[ "taeheon714@gmail.com" ]
taeheon714@gmail.com
9112b55172eef632eca15f787a9bb1457535a1cd
09e57dd1374713f06b70d7b37a580130d9bbab0d
/benchmark/startPyquil2557.py
4208349c9bbf8ccc828c76303636edb4bd953026
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
1,801
py
# qubit number=4 # total number=35 import pyquil from pyquil.api import local_forest_runtime, QVMConnection from pyquil import Program, get_qc from pyquil.gates import * import numpy as np conn = QVMConnection() def make_circuit()-> Program: prog = Program() # circuit begin prog += CNOT(0,3) # number=13 prog += CNOT(0,3) # number=17 prog += X(3) # number=18 prog += RX(-3.1101767270538954,1) # number=27 prog += CNOT(0,3) # number=19 prog += CNOT(0,3) # number=15 prog += H(1) # number=2 prog += H(2) # number=3 prog += H(3) # number=4 prog += Y(3) # number=12 prog += H(1) # number=26 prog += H(0) # number=5 prog += H(1) # number=6 prog += X(3) # number=29 prog += H(2) # number=7 prog += CNOT(3,0) # number=20 prog += CNOT(3,0) # number=23 prog += CNOT(3,0) # number=32 prog += Z(3) # number=33 prog += CNOT(3,0) # number=34 prog += CNOT(3,0) # number=25 prog += CNOT(3,0) # number=22 prog += H(3) # number=8 prog += Z(3) # number=28 prog += H(0) # number=9 prog += Y(2) # number=10 prog += Y(2) # number=11 prog += X(1) # number=30 prog += X(1) # number=31 # circuit end return prog def summrise_results(bitstrings) -> dict: d = {} for l in bitstrings: if d.get(l) is None: d[l] = 1 else: d[l] = d[l] + 1 return d if __name__ == '__main__': prog = make_circuit() qvm = get_qc('4q-qvm') results = qvm.run_and_measure(prog,1024) bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T bitstrings = [''.join(map(str, l)) for l in bitstrings] writefile = open("../data/startPyquil2557.csv","w") print(summrise_results(bitstrings),file=writefile) writefile.close()
[ "wangjiyuan123@yeah.net" ]
wangjiyuan123@yeah.net
0a7e55342d8a4b7b2f538f9bececc852e87badf0
04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29
/655/Solution.py
2d8edc9e127ac5e52942f9488260f0e9792c9b4d
[]
no_license
zhangruochi/leetcode
6f739fde222c298bae1c68236d980bd29c33b1c6
cefa2f08667de4d2973274de3ff29a31a7d25eda
refs/heads/master
2022-07-16T23:40:20.458105
2022-06-02T18:25:35
2022-06-02T18:25:35
78,989,941
14
6
null
null
null
null
UTF-8
Python
false
false
1,013
py
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def printTree(self, root: TreeNode) -> List[List[str]]: if not root: return [] def get_depth(root): if not root: return 0 return max(get_depth(root.left), get_depth(root.right)) + 1 def print_tree(res,root,h,left,right): if not root: return mid = (right - left) // 2 + left res[h][mid] = str(root.val) print_tree(res,root.left,h+1,left,mid-1) print_tree(res,root.right,h+1,mid+1,right) depth = get_depth(root) w = 2 ** depth - 1 res = [[""]*w for i in range(depth)] print_tree(res,root,0,0,w-1) return res
[ "zrc720@gmail.com" ]
zrc720@gmail.com