blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
768aeb458d3c1a278bb6c13bfb68415378dea271
|
76e931912629c37beedf7c9b112b53e7de5babd7
|
/2-mouth02/day09/insert_many.py
|
f985b8313dcbcd47dce05b6061d60655455b3c3e
|
[
"Apache-2.0"
] |
permissive
|
gary-gggggg/gary
|
c59ac21d8e065f296ff986d11a0e4cbf186a1bc4
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
refs/heads/main
| 2023-02-23T06:54:34.500683
| 2021-02-01T10:17:02
| 2021-02-01T10:17:02
| 334,905,744
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
import pymysql
db_dic = {
"host": "localhost",
"port": 3306,
"user": "root",
"password": "123456",
"database": "gary",
"charset": "utf8"
}
# 链接数据库
db = pymysql.connect(**db_dic) # 一个链接口
# 创建游标 游标对象:执行sql得到结果的对象
cur = db.cursor() # 打开完成
# 操作数据
list1 = [
("张三", 21, 'male', 65),
("李四", 18, 'female', 47),
("王五", 16, 'others', 94)
]
try:
sql = "insert into school (name,age,gender,grade) values (%s,%s,%s,%s);"
cur.executemany(sql, list1)
db.commit()
except Exception as e:
print(e)
db.rollback()
# 关闭数据库
cur.close()
db.close()
|
[
"673248932@qq.com"
] |
673248932@qq.com
|
b69d77cf2d656cfdd59cd2d926c4ff2a3d3b483e
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_85/45.py
|
6c238130b6c65c8e115cbd376dfa2b64f752cdeb
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
def dist_at(a,c,dist):
return dist[a%c]
def normal_time(n,c,dist):
time = 0
for star in xrange(0,n):
length = dist_at(star,c,dist)
time += length*2
return time
def solve():
test = int(raw_input())
for case in xrange(1,test+1):
dist = []
input = map(int,raw_input().split())
for i in xrange(0,len(input)):
if i == 0:
l = input[i]
elif i == 1:
t = input[i]
elif i == 2:
n = input[i]
elif i == 3:
c = input[i]
else:
dist.append(input[i])
current = 0
dist_from_current = 0.0
for star in xrange(0,n):
next = dist_at(star,c,dist)
time = next*2
if time < t:
t = t - time
current = star+1
elif time == t:
current = star+1
dist_from_current = 0.0
else:
current = star
dist_from_current = 0.5*t
break
# print current,dist_from_current
a = current
b = current + 1
dist_left_to_b = dist_at(a,c,dist) - dist_from_current
list = []
for star in xrange(b,n):
list.append(dist_at(star,c,dist))
list.append(dist_left_to_b)
count = 0
normal = normal_time(n,c,dist)
saved = 0
if l != 0:
for val in sorted(list,reverse=True):
saved += val
count += 1
if count == l:
break
if l == 0:
print "Case #" + str(case) + ": " + str(int(normal))
else:
print "Case #" + str(case) + ": " + str(int(normal-saved))
solve()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
cb6cde984e9367c6b77787362767d0684e97c498
|
11bb0cbe6de2a0a4e94fc0ba610f61894d5593a1
|
/VBS_Zgamma/AQGC/combine/run/zz/test/th2_to_txt.py
|
9f8455d301e3b0c8e56d9350a9029ec067096327
|
[] |
no_license
|
AnYpku/PKU-Cluster
|
0dc4a88445aeb3ca239b2d7d7f796c6a67f3f69c
|
f9ffbcb7988053f4618fd015c1bb656d92ff51c6
|
refs/heads/master
| 2022-11-01T23:46:59.442037
| 2022-10-21T06:37:43
| 2022-10-21T06:37:43
| 188,202,345
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,830
|
py
|
#!/usr/bin/env python
from ROOT import gROOT, THStack, TH1D, TList, TFile, TH1F
print '-----begin to transfer TH2D to txt for Higgs-combine tool----- \n'
f1 = TFile.Open("chAll.root")
th1_ZA_sig = f1.Get("diboson")
th1_ZA = f1.Get("QCD_gg")
th1_non_prompt = f1.Get("QCD_qq")
print '>>>>begin to read bin content to the txt file>>>>'
for i in range(1,7):
f = open('./%s_bin_%d.txt'%('chAll', i),'w')
f.write('imax 1 number of channels\n')
f.write('jmax 2 number of processes-1\n')
f.write('kmax 1 number of nuisance parameters (sources of systematical uncertainties)\n')
f.write('------------\n')
f.write('# we have just one channel, in which we observe 0 events\n')
f.write('bin chAll%i\n'%(i))
bin_content = th1_non_prompt.GetBinContent(i)+th1_ZA.GetBinContent(i)+th1_ZA_sig.GetBinContent(i)
# bincontent of each precess
non_prompt_bincontent = th1_non_prompt.GetBinContent(i) if th1_non_prompt.GetBinContent(i)>0 else 0
ZA_bincontent = th1_ZA.GetBinContent(i) if th1_ZA.GetBinContent(i) else 0
ZA_sig_bincontent = th1_ZA_sig.GetBinContent(i) if th1_ZA_sig.GetBinContent(i)>0 else 0
# bin error
non_prompt_binerror = th1_non_prompt.GetBinError(i)/non_prompt_bincontent if non_prompt_bincontent>0 else 0
non_prompt_binerror = non_prompt_binerror if non_prompt_binerror<1 else 1
non_prompt_binerror =non_prompt_binerror+1
ZA_binerror = th1_ZA.GetBinError(i)/ZA_bincontent if ZA_bincontent>0 else 0
ZA_binerror = ZA_binerror if ZA_binerror<1 else 1
ZA_binerror = ZA_binerror+1
ZA_sig_binerror = th1_ZA_sig.GetBinError(i)/ZA_sig_bincontent if ZA_sig_bincontent>0 else 0
ZA_sig_binerror = ZA_sig_binerror if ZA_sig_binerror<1 else 1
ZA_sig_binerror = ZA_sig_binerror+1
f.write('observation %.2f\n'%bin_content)
f.write('------------\n')
f.write('# now we list the expected events for signal and all backgrounds in that bin\n')
f.write('# the second process line must have a positive number for backgrounds, and 0 for signal\n')
f.write('# then we list the independent sources of uncertainties, and give their effect (syst. error)\n')
f.write('# on each process and bin\n')
f.write('bin\tchAll%i\tchAll%i\tchAll%i\n'%(i,i,i))
f.write('process\tsig\tQCDgg\tQCDqq\n')
f.write('process\t0\t1\t2\n')
f.write('rate\t%0.2f\t%0.2f\t%0.2f\n'%(ZA_sig_bincontent,ZA_bincontent, non_prompt_bincontent, ))
f.write('------------\n')
f.write('lumi\tlnN\t1.06\t1.06\t-\t#lumi\n')
# f.write('VBS_stat_%s_%s_bin_%d\tlnN\t%0.2f\t-\t-\n'%('chAll','18',i,ZA_sig_binerror))
# f.write('QCDgg_stat_%s_%s_bin_%d\tlnN\t-\t%0.2f\t-\n'%('chAll','18',i,ZA_binerror))
# f.write('QCDqq_stat_%s_%s_bin_%d\tlnN\t-\t-\t%0.2f\n'%('chAll','18',i,non_prompt_binerror))
print 'bin ',i,' ',ZA_sig_binerror,' ',ZA_binerror,' ',non_prompt_binerror,' '
|
[
"ying.an@cern.ch"
] |
ying.an@cern.ch
|
c755e0f999df38cc3bc6191386c7b4da163fc42e
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5670465267826688_1/Python/YeOldeLancer/d___stra.py
|
f523443b1ff964dc7109509426fe56b1cce4791e
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,098
|
py
|
# Google Code Jam 2015, Qualification Round
# Problem C. Dijkstra
# Lance C. Simons
def dijkstra_eval(s):
if s.startswith("-"):
d = dijkstra_digit(s[:2])
else:
d = dijkstra_digit(s[0])
for c in s[1:]:
d = d * c
return d
class dijkstra_digit:
mtable = {"1":{"1": "1", "i": "i", "j": "j", "k": "k"},
"i":{"1": "i", "i":"-1", "j": "k", "k":"-j"},
"j":{"1": "j", "i":"-k", "j":"-1", "k": "i"},
"k":{"1": "k", "i": "j", "j":"-i", "k":"-1"} }
def __init__(self, *args):
self.positive = True
if type(args[0]) == type(""):
self.positive = len(args[0]) == 1
self.value = args[0][-1]
elif len(args) == 1:
self.value = args[0]
else:
self.positive, self.value = args
def __neg__(self):
return dijkstra_digit(not self.positive, self.value)
def __mul__(self, other):
if type(other) == type(""):
other = dijkstra_eval(other)
d = dijkstra_digit(self.mtable[self.value][other.value])
if self.positive != other.positive:
d = -d
return d
def __rmul__(self, other):
return dijkstra_eval(other) * self
def __pow__(self, exp):
exp = exp % 4
if self.value == "1":
if exp == 0:
return dijkstra_digit("1")
elif exp == 1:
return dijkstra_digit(self.positive, "1")
elif exp == 2:
return dijkstra_digit(True, "1")
else:
return dijkstra_digit(self.positive, "1")
else:
if exp == 0:
return dijkstra_digit("1")
elif exp == 1:
return dijkstra_digit(self.positive, self.value)
elif exp == 2:
return dijkstra_digit("-1")
else:
return dijkstra_digit(not self.positive, self.value)
def __eq__(self, other):
if type(other) == type(""):
other = dijkstra_eval(other)
return self.positive == other.positive and self.value == other.value
def __ne__(self, other):
return not self == other
def __str__(self):
return ("" if self.positive else "-") + self.value
def __repr__(self):
return str(self)
def correctable(count, substr, maxtest=6):
# Exit early if they are not equal
if (dijkstra_eval(substr) ** count) != "ijk":
return False
strlen = len(substr)
def at(i):
return substr[i % strlen]
def search_fwd(goal, *start):
if not all(start): return None
start = sum(start)
i = 0
fwd = dijkstra_digit(at(i+start))
while fwd != goal and i < strlen * maxtest:
i += 1
fwd = fwd * at(i+start)
if fwd != goal:
return None
return i+1
def search_rev(goal, *end):
if not all(end): return None
end = sum(end)
i = 0
rev = dijkstra_digit(at(end-1-i))
while rev != goal and i < strlen * maxtest:
i += 1
rev = at(end-1-i) * rev
if rev != goal:
return None
return i+1
def valid(*args):
return all(args)
def will_fit(*chars):
chars_used = sum(chars)
words_used = ((chars_used - 1) / strlen) + 1
return words_used <= count
# Forward search
i_used_fwd = search_fwd("i")
j_used_fwd = search_fwd("j", i_used_fwd)
k_used_fwd = search_fwd("k", i_used_fwd, j_used_fwd)
if valid(i_used_fwd, j_used_fwd, k_used_fwd):
return will_fit(i_used_fwd, j_used_fwd, k_used_fwd)
# Reverse search
k_used_rev = search_rev("k")
j_used_rev = search_rev("j", k_used_rev)
i_used_rev = search_rev("i", k_used_rev, j_used_rev)
if valid(i_used_rev, j_used_rev, k_used_rev):
return will_fit(i_used_rev, j_used_rev, k_used_rev)
if valid(i_used_fwd, j_used_fwd, k_used_rev):
return will_fit(i_used_fwd, j_used_fwd, k_used_rev)
if valid(i_used_fwd, j_used_rev, k_used_rev):
return will_fit(i_used_fwd, j_used_rev, k_used_rev)
return False
def go(infilename, outfilename):
inf = open(infilename, "r")
outf = open(outfilename, "w")
runs = int(inf.next().strip())
for i in range(runs):
L,X = map(int, inf.next().strip().split())
substr = inf.next().strip()
outf.write("Case #%d: %s\n" % (i+1, {True:"YES", False:"NO"}[correctable(X,substr)]))
if __name__ == "__main__":
import sys
go(sys.argv[1], sys.argv[1].replace(".in", ".out"))
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
dffce93acf81be40e78272c45459c21d33a2780a
|
3ff28c714fef7f568e8dfce0a2d4a16d6d10e8ef
|
/Using Python to Access Web Data - University of Michigan/get_contents_between_tags.py
|
86f3d5bb546264bc8aadff7a06c6be873c6ea2f3
|
[] |
no_license
|
avishkakavindu/Fun-Times
|
a0325c045a3d9316fd00d1c9b025a994498762d5
|
6861558c668892ce2a0b1b37ecfac30883f0f3b5
|
refs/heads/master
| 2022-08-13T15:23:22.939576
| 2020-05-22T10:18:30
| 2020-05-22T10:18:30
| 264,172,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
# Retrieve all of the anchor tags
tags = soup('a')
for tag in tags:
# Look at the parts of a tag
print('TAG:', tag) #gets entire tag
print('URL:', tag.get('href', None)) #gets value belongs to attribute 'href'
print('Contents:', tag.contents[0]) #gets content between the tags
print('Attrs:', tag.attrs) #gets attributes of the tags and there values returns a dictionary
|
[
"avishkakavindud@gmail.com"
] |
avishkakavindud@gmail.com
|
c8e6c842ce09125be40f318e5d67694aa1bf17f4
|
91e18177b07a842b84863cee8cad118666107b4b
|
/schedule/migrations/0001_initial.py
|
59d213cde1acd6355cf40fbd8e7f4b4590b2abd1
|
[] |
no_license
|
HyeonGyuChi/NewBeTon_2019
|
6c55797af34715a803cf4eee245b1c7b77584f2a
|
1d93bdaec9dbf1eb82ea689eb01b106e835d373f
|
refs/heads/master
| 2020-05-19T08:21:20.622579
| 2019-05-05T00:43:19
| 2019-05-05T00:43:19
| 184,919,650
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
# Generated by Django 2.2.1 on 2019-05-04 15:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='TimeTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subjuect_name', models.CharField(max_length=30)),
('day', models.CharField(choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')], max_length=1)),
('start_time', models.CharField(choices=[(9, '9시'), (10, '10시'), (11, '11시'), (12, '12시'), (13, '13시'), (14, '14시'), (15, '15시'), (16, '16시'), (17, '17시'), (18, '18시')], max_length=2)),
('end_time', models.CharField(choices=[(9, '9시'), (10, '10시'), (11, '11시'), (12, '12시'), (13, '13시'), (14, '14시'), (15, '15시'), (16, '16시'), (17, '17시'), (18, '18시')], max_length=2)),
('user_id', models.ForeignKey(on_delete='CASCADE', to='schedule.User')),
],
),
]
|
[
"hyeongyuc96@gmail.com"
] |
hyeongyuc96@gmail.com
|
ea87a335075397221ad96fc8a450587dcbe0c2c2
|
273eb20546083f0e23a8077a3f6d383ed37ffef6
|
/Bricks/Qt4_MultipleMotorsBrick.py
|
dda64ad67cca45f17bcc363e8cba0386acbdb934
|
[] |
no_license
|
douglasbeniz/BlissFramework
|
fd886b161b9ba6f246424b1352a99820303d48aa
|
11486d6c91fc0077e967cb2321743466a7c1aa8b
|
refs/heads/master
| 2021-01-24T15:22:54.876055
| 2017-09-21T19:12:44
| 2017-09-21T19:12:44
| 55,637,790
| 0
| 0
| null | 2016-04-06T20:20:11
| 2016-04-06T20:20:11
| null |
UTF-8
|
Python
| false
| false
| 4,972
|
py
|
#
# Project: MXCuBE
# https://github.com/mxcube.
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtGui
from PyQt4 import QtCore
from Qt4_MotorSpinBoxBrick import Qt4_MotorSpinBoxBrick
from BlissFramework import Qt4_Icons
from BlissFramework.Qt4_BaseComponents import BlissWidget
from BlissFramework.Utils import Qt4_widget_colors
__category__ = 'Motor'
class Qt4_MultipleMotorsBrick(BlissWidget):
def __init__(self, *args):
BlissWidget.__init__(self, *args)
# Hardware objects ----------------------------------------------------
# Internal values -----------------------------------------------------
self.motor_hwobj_list = []
self.motor_widget_list = []
self.motor_widget_labels = []
self.predefined_positions_list = []
self.positions = None
# Properties ----------------------------------------------------------
self.addProperty('mnemonic', 'string', '')
self.addProperty('labels','string','')
self.addProperty('predefinedPositions', 'string', '')
# Signals -------------------------------------------------------------
# Slots ---------------------------------------------------------------
# Graphic elements ----------------------------------------------------
self.main_group_box = QtGui.QGroupBox(self)
# Layout --------------------------------------------------------------
self.main_groupbox_hlayout = QtGui.QHBoxLayout(self.main_group_box)
self.main_groupbox_hlayout.setSpacing(2)
self.main_groupbox_hlayout.setContentsMargins(0, 0, 0, 0)
self.main_hlayout = QtGui.QHBoxLayout(self)
self.main_hlayout.addWidget(self.main_group_box)
self.main_hlayout.setSpacing(2)
self.main_hlayout.setContentsMargins(2, 2, 2, 2)
# Size Policy ---------------------------------------------------------
# Qt signal/slot connections ------------------------------------------
# Other ---------------------------------------------------------------
def propertyChanged(self, property_name, old_value, new_value):
if property_name == 'mnemonic':
hwobj_names_list = new_value.split()
for hwobj_name in hwobj_names_list:
temp_motor_hwobj = self.getHardwareObject(hwobj_name)
temp_motor_widget = Qt4_MotorSpinBoxBrick(self)
temp_motor_widget.set_motor(temp_motor_hwobj, hwobj_name)
temp_motor_widget.move_left_button.hide()
temp_motor_widget.move_right_button.hide()
temp_motor_widget.step_button.hide()
temp_motor_widget.set_line_step(10.0)
temp_motor_widget.step_changed(None)
self.main_groupbox_hlayout.addWidget(temp_motor_widget)
self.motor_hwobj_list.append(temp_motor_hwobj)
self.motor_widget_list.append(temp_motor_widget)
if len(self.motor_widget_labels):
for index, label in enumerate(self.motor_widget_labels):
self.motor_widget_list[index].setLabel(label)
elif property_name == 'icons':
icons_list = new_value.split()
elif property_name == 'labels':
self.motor_widget_labels = new_value.split()
if len(self.motor_widget_list):
for index, label in enumerate(self.motor_widget_labels):
self.motor_widget_list[index].setLabel(label)
elif property_name == 'predefinedPositions':
self.predefined_positions_list = new_value.split()
for predefined_position in self.predefined_positions_list:
temp_position_button = QtGui.QPushButton(predefined_position, self.main_group_box)
self.main_groupbox_hlayout.addWidget(temp_position_button)
temp_position_button.clicked.connect(lambda: \
self.predefined_position_clicked(predefined_position))
else:
BlissWidget.propertyChanged(self,property_name, old_value, new_value)
def predefined_position_clicked(self, predefined_position):
for motor in self.motor_hwobj_list:
motor.move_to_predefined_position(predefined_position.lower())
|
[
"ivars.karpics@gmail.com"
] |
ivars.karpics@gmail.com
|
d54f685c7714a72608acb26e0491b4540d60859f
|
220b79a0c02d43817a5fe4fb0d73e6061507f09d
|
/mlflow_tools/display/list_model_versions.py
|
d5bc037b0d944561b8595a72e0d5665b6990dffc
|
[] |
no_license
|
amesar/mlflow-tools
|
7ae5976297545417f5974f418028246e3d74da5f
|
2e7282397e9d3a29b4c30aae8ee5d26511d9ab15
|
refs/heads/master
| 2023-07-25T12:54:01.087785
| 2023-07-25T01:44:54
| 2023-07-25T01:44:54
| 232,914,433
| 34
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,768
|
py
|
"""
List all registered models versions.
"""
import click
from tabulate import tabulate
from mlflow_tools.common.click_options import opt_sort_attr, opt_sort_order, opt_columns, opt_output_csv_file
from mlflow_tools.api import api_factory
from mlflow_tools.display.display_utils import process_df
pandas_api = api_factory.get_pandas_api()
def to_pandas_dataframe(model_name=None, use_by_models=False):
filter = f"name = '{model_name}'" if model_name else None
if use_by_models:
df = pandas_api.search_model_versions_by_models(filter=filter)
else:
df = pandas_api.search_model_versions(filter=filter)
return df
def list(model_name, columns=None, csv_file=None, sort_attr="name", sort_order="asc", use_by_models=False):
df = to_pandas_dataframe(model_name, use_by_models)
df = process_df(df, columns, sort_attr, sort_order, csv_file)
print(tabulate(df, headers="keys", tablefmt="psql", showindex=False))
print(f"Versions: {df.shape[0]}")
@click.command()
@click.option("--model",
help="Registered model to filter by.",
type=str,
required=False,
show_default=True
)
@opt_sort_attr
@opt_sort_order
@click.option("--use-by-models",
help="Use 'by models' variant to search for versions.",
type=bool,
required=False
)
@opt_columns
@opt_output_csv_file
def main(model, sort_attr, sort_order, use_by_models, columns, csv_file):
print("Options:")
for k,v in locals().items():
print(f" {k}: {v}")
if columns:
columns = columns.split(",")
list(model,
sort_attr = sort_attr,
sort_order = sort_order,
use_by_models = use_by_models,
columns = columns,
csv_file = csv_file
)
if __name__ == "__main__":
main()
|
[
"amesar@users.noreply.github.co"
] |
amesar@users.noreply.github.co
|
1976cf0f7d94870552d7607100e8d9e5a7980f1f
|
a1c166a1ac4782f1f0792e0fd21741360373b376
|
/frontEnd/widgets/commodity_tree.py
|
f2d1ae1f2f4111d1292e37489fab3a0caa3674f6
|
[] |
no_license
|
xiaomapython/QlaskExplor
|
3c7b75866b8276a5c2de3fbfddf779e1a66691d0
|
c8b1757d08d06d350f7ca41897bbf4378fde3911
|
refs/heads/master
| 2020-06-23T08:08:45.169160
| 2019-05-15T02:05:17
| 2019-05-15T02:05:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,166
|
py
|
# _*_ coding:utf-8 _*_
# company: RuiDa Futures
# author: zizle
"""目录树组件"""
from PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, pyqtSignal
from settings import MEDIA_PATH
class Tree(QTreeWidget):
variety_click_signal = pyqtSignal(str) # 品种被点击信号槽
def __init__(self):
super(Tree, self).__init__()
self.__init_style()
def __init_style(self):
"""
读取文件初始化风格
:return: None
"""
style_sheet = """
QTreeWidget {
outline: 0px;
min-width: 270px;
max-width: 270px;
color: black;
background: #F5F5F5;
font-size: 13px;
border: none;
}
QHeaderView {
font-size: 14px;
}
QTreeWidget::Item {
width: 100%;
height: 30px;
border-bottom: 1px solid #EEEEEE;
}
QTreeWidget::Item:selected {
background: powderblue;
}
QTreeWidget::Item:hover {
background: lightgrey;
}
"""
self.setHeaderLabels(["商品"]) # 目录树头标签(labels方便添加和去除)
self.setStyleSheet(style_sheet) # 根据文件设置风格
self.setHeaderHidden(True) # 隐藏目录树的表头
self.setContextMenuPolicy(Qt.CustomContextMenu) # 可设置右键自定义菜单,需绑定事件`customContextMenuRequested`并实现槽函数
class TreeItem(QTreeWidgetItem):
def __init__(self, collected=False):
super(TreeItem, self).__init__()
self.is_collected = collected
self.__init_style()
def __init_style(self):
"""
初始化为不收藏
:return:
"""
self.no_collection_icon = QIcon(MEDIA_PATH + "no_collection.png")
self.collected_icon = QIcon(MEDIA_PATH + "collected.png")
if self.is_collected:
self.collected()
def no_collection(self):
self.setIcon(0, QIcon())
def collected(self):
self.setIcon(0, self.collected_icon)
|
[
"zizle_lin@163.com"
] |
zizle_lin@163.com
|
1428a143a04dea36c997dcea9eae210a8267879d
|
ddd18c78b27b9c85629feeab6914fc925aea9099
|
/practice19c.py
|
084a5e674ea8d90e9b0c68ad3070cbca3f20330d
|
[] |
no_license
|
harmansehmbi/Project19
|
7e6331050599db2d33f1c20aef5ad13c5d7a17a8
|
6c000d05bbc397132e9fe13a48f89b57b48b70da
|
refs/heads/master
| 2020-06-13T04:38:55.514033
| 2019-06-30T16:28:13
| 2019-06-30T16:28:13
| 194,537,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
import numpy as np
arr1 = np.array([(8, 9), (10, 12), (13, 14)])
print(arr1[0:2, 1])
arr2 = np.array([10, 20, 30])
print(arr2.min())
print(arr2.max())
print(arr2.sum())
arr3 = np.array([(1, 2, 3), (4, 5, 6)])
print(arr3.sum(axis=0))
arr4 = np.array([(4, 9, 16), (11, 13, 15)])
print(np.sqrt(arr4))
print(np.std(arr4))
arr5 = np.array([(1, 2, 3), (4, 5, 6)])
arr6 = np.array([(1, 2, 3), (4, 5, 6)])
print(arr5 + arr6)
print(arr5 - arr6)
print(arr5 * arr6)
print(arr5 / arr6)
print(arr5 // arr6)
print("===============================")
X = np.array([(1, 2, 3), (4, 5, 6)])
Y = np.array([(1, 2, 3), (4, 5, 6)])
print(np.vstack((X,Y)))
print(np.hstack((X,Y)))
Z = np.array((7, 21, 3))
print(np.sin(Z))
print(np.log10(Z))
|
[
"51370954+harmansehmbi@users.noreply.github.com"
] |
51370954+harmansehmbi@users.noreply.github.com
|
83a7906165e04bf8bb596c05be03851eaf23994e
|
8f205d31e8e5555d69e0a7db086a3c93de6d2806
|
/kube/task_generation/merge_ccs.py
|
0668da5478328e17e92f72b24c014457c97dc7cc
|
[
"MIT"
] |
permissive
|
torms3/Synaptor
|
94e0f04478118399db91d79a8a8b478858fd4138
|
5de74aa61b3d04e88e6bc4c336d543f89d64b9a4
|
refs/heads/master
| 2021-05-21T19:08:43.625841
| 2020-06-19T23:10:47
| 2020-06-19T23:10:47
| 252,764,824
| 0
| 0
|
NOASSERTION
| 2020-04-03T15:03:17
| 2020-04-03T15:03:16
| null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
import argparse
from taskqueue import TaskQueue
import synaptor.cloud.kube.parser as parser
import synaptor.cloud.kube.task_creation as tc
def main(configfilename):
config = parser.parse(configfilename)
task = tc.create_merge_ccs_task(
config["storagestrs"][0], config["szthresh"],
config["maxfaceshape"])
tq = TaskQueue(config["queueurl"])
tq.insert_all([task])
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("configfilename")
args = argparser.parse_args()
main(args.configfilename)
|
[
"nturner.stanford@gmail.com"
] |
nturner.stanford@gmail.com
|
ff086b9a60930c6eb551f1675b66986afed25460
|
8602a87e12fe19b28c2e85cfae0bbde27a62855d
|
/ingenico/connect/sdk/domain/errors/definitions/api_error.py
|
378aba23c91ae36081a6e7f6901cce6727671a09
|
[
"MIT"
] |
permissive
|
king1212/connect-sdk-python2
|
6a687de7967a411fa802477069c7fc7079d059c2
|
203982559c5c10e3dbbb9dfc71123c269908ed26
|
refs/heads/master
| 2021-07-04T03:10:55.061416
| 2017-09-27T09:29:49
| 2017-09-27T09:29:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,311
|
py
|
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
class APIError(DataObject):
__category = None
__code = None
__http_status_code = None
__id = None
__message = None
__property_name = None
__request_id = None
@property
def category(self):
"""
| Category the error belongs to. The category should give an indication of the type of error you are dealing with.Possible values:
* CONNECT_PLATFORM_ERROR - indicating that a functional error has occurred in the Connect platform.
* PAYMENT_PLATFORM_ERROR - indicating that a functional error has occurred in the Payment platform.
* IO_ERROR - indicating that a technical error has occurred within the Connect platform or between Connect and any of the payment platforms or third party systems.
Type: str
"""
return self.__category
@category.setter
def category(self, value):
self.__category = value
@property
def code(self):
"""
| Error code
Type: str
"""
return self.__code
@code.setter
def code(self, value):
self.__code = value
@property
def http_status_code(self):
"""
| HTTP status code for this error that can be used to determine the type of error
Type: int
"""
return self.__http_status_code
@http_status_code.setter
def http_status_code(self, value):
self.__http_status_code = value
@property
def id(self):
"""
| ID of the error. This is a short human-readable message that briefly describes the error.
Type: str
"""
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def message(self):
"""
| Human-readable error message that is not meant to be relayed to consumer as it might tip off people who are trying to commit fraud
Type: str
"""
return self.__message
@message.setter
def message(self, value):
self.__message = value
@property
def property_name(self):
"""
| In case the error was in relation to a property that was missing or not correct the name of the property in question is returned
Type: str
"""
return self.__property_name
@property_name.setter
def property_name(self, value):
self.__property_name = value
@property
def request_id(self):
"""
| ID of the request that can be used for debugging purposes
Type: str
"""
return self.__request_id
@request_id.setter
def request_id(self, value):
self.__request_id = value
def to_dictionary(self):
dictionary = super(APIError, self).to_dictionary()
self._add_to_dictionary(dictionary, 'category', self.category)
self._add_to_dictionary(dictionary, 'code', self.code)
self._add_to_dictionary(dictionary, 'httpStatusCode', self.http_status_code)
self._add_to_dictionary(dictionary, 'id', self.id)
self._add_to_dictionary(dictionary, 'message', self.message)
self._add_to_dictionary(dictionary, 'propertyName', self.property_name)
self._add_to_dictionary(dictionary, 'requestId', self.request_id)
return dictionary
def from_dictionary(self, dictionary):
super(APIError, self).from_dictionary(dictionary)
if 'category' in dictionary:
self.category = dictionary['category']
if 'code' in dictionary:
self.code = dictionary['code']
if 'httpStatusCode' in dictionary:
self.http_status_code = dictionary['httpStatusCode']
if 'id' in dictionary:
self.id = dictionary['id']
if 'message' in dictionary:
self.message = dictionary['message']
if 'propertyName' in dictionary:
self.property_name = dictionary['propertyName']
if 'requestId' in dictionary:
self.request_id = dictionary['requestId']
return self
|
[
"jenkins@isaac.nl"
] |
jenkins@isaac.nl
|
416bdd664c7972904f6b0112f4f041b2123c7141
|
057a85e94d7a284cda900e6c89bd3da6402206e8
|
/tools.py
|
fcc935962c53518d1ff2d416c1eec10895975b9e
|
[
"MIT"
] |
permissive
|
nusratsharmin/AFQ-notebooks
|
9eb5662dc46a1b67d379f5a6227667272f195184
|
de4e2a9fce6f7f3134a2d362e55d3c6ba44fd3a0
|
refs/heads/master
| 2020-12-31T01:23:24.088489
| 2015-08-05T19:24:54
| 2015-08-05T19:24:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
import numpy as np
from dipy.align.metrics import CCMetric, EMMetric, SSDMetric
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
metric_dict = {'CC': CCMetric,
'EM': EMMetric,
'SSD': SSDMetric}
def syn_registration(moving, static, moving_grid2world=None, static_grid2world=None,
metric='CC', dim=3, level_iters = [10, 10, 5], prealign=None):
"""
Register a source image (moving) to a target image (static)
Parameters
----------
moving : ndarray
The source image data to be registered
moving_grid2world : array, shape (4,4)
The affine matrix associated with the moving (source) data.
static : ndarray
The target image data for registration
static_grid2world : array, shape (4,4)
The affine matrix associated with the static (target) data
metric : string, optional
The metric to be optimized. One of `CC`, `EM`, `SSD`, Default: CCMetric.
dim: int (either 2 or 3), optional
The dimensions of the image domain. Default: 3
level_iters : list of int, optional
the number of iterations at each level of the Gaussian Pyramid (the
length of the list defines the number of pyramid levels to be
used).
Returns
-------
warped_moving : ndarray
The data in `moving`, warped towards the `static` data.
forward : ndarray (..., 3)
The vector field describing the forward warping from the source to the target.
backward : ndarray (..., 3)
The vector field describing the backward warping from the target to the source
"""
use_metric = metric_dict[metric](dim)
sdr = SymmetricDiffeomorphicRegistration(use_metric, level_iters)
mapping = sdr.optimize(static, moving, static_grid2world=static_grid2world,
moving_grid2world=moving_grid2world, prealign=prealign)
warped_moving = mapping.transform(moving)
return warped_moving, mapping
|
[
"arokem@gmail.com"
] |
arokem@gmail.com
|
200340de9c7d3d668b43028bd73db5ca64648433
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03282/s933456835.py
|
554270dfd4662d9ae4de392ee9c913edc15fbb2a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
s=input()
k=int(input())
c=0
for i in range(len(s)):
if s[i]=="1":
c+=1
else:
break
if k<=c:
print("1")
else:
print(s[c])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
dd8c33559a0eae9643ac31ea199756615170cc53
|
985242058ea4cdc7f42dde4ff60ec96e19669ef0
|
/aleph/analyze/polyglot_entity.py
|
2547d9b2a9c6344486e03e65477aa4a67c7c4c1f
|
[
"MIT"
] |
permissive
|
KarrieK/aleph
|
4f4eff8892e145f1b0ad085bca45a19382854150
|
d8c1895339c1bfd3ad265237feb411cef9dd114b
|
refs/heads/master
| 2021-04-28T07:34:03.385829
| 2018-02-16T16:41:56
| 2018-02-16T16:41:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
from __future__ import absolute_import
import regex
import logging
from polyglot.text import Text
from polyglot.downloader import downloader
from normality import collapse_spaces
from aleph import settings
from aleph.analyze.analyzer import Analyzer
from aleph.model import Document, DocumentTag, DocumentTagCollector
log = logging.getLogger(__name__)
class PolyglotEntityAnalyzer(Analyzer):
ORIGIN = 'polyglot'
MIN_LENGTH = 100
CLEAN = regex.compile('(^[^\w]*|[^\w]*$)')
TYPES = {
'I-PER': DocumentTag.TYPE_PERSON,
'I-ORG': DocumentTag.TYPE_ORGANIZATION,
}
IGNORED = [
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_IMAGE,
Document.SCHEMA_TABLE
]
def __init__(self):
self.active = settings.ANALYZE_POLYGLOT
@property
def languages(self):
cls = type(self)
if not hasattr(cls, '_languages'):
try:
packages = downloader.packages()
packages = [p for p in packages if p.task == 'ner2']
cls._languages = [p.language for p in packages]
except Exception:
log.info("Cannot load polyglot language list.")
return cls._languages
def tag_text(self, text, languages):
for language in languages:
parsed = Text(text, hint_language_code=language)
for entity in parsed.entities:
if entity.tag == 'I-LOC':
continue
label = ' '.join(entity)
label = self.CLEAN.sub(' ', label)
label = collapse_spaces(label)
if ' ' not in label or len(label) < 4 or len(label) > 200:
continue
yield label, entity.tag
def analyze(self, document):
if document.schema in self.IGNORED:
return
collector = DocumentTagCollector(document, self.ORIGIN)
try:
languages = set(document.languages)
if len(self.languages):
languages = languages.intersection(self.languages)
if not len(languages):
languages = [settings.DEFAULT_LANGUAGE]
for text in document.texts:
if len(text) <= self.MIN_LENGTH:
continue
for label, tag in self.tag_text(text, languages):
# log.info("Entity [%s]: %s", document.id, label)
collector.emit(label, self.TYPES[tag])
except ValueError as ve:
log.warning('NER value error: %r', ve)
collector.save()
if len(collector):
log.info('Polyglot extracted %s entities.', len(collector))
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
3cd48988c3167fcab3f06686d166211a8933beca
|
9791c7cd589c5c53aa7e1dbf929d69ba99f7c526
|
/myapp/migrations/0001_initial.py
|
ac89857986b89af875fefb96ade4f759a637aabe
|
[] |
no_license
|
okprogrammer/Calener-App
|
3d717ff9eac3e8ddebd6f0b5e95caf513e3fa429
|
a15e71eebed670b7203960e86f88e8da92dca1b7
|
refs/heads/master
| 2020-03-06T23:44:54.761954
| 2018-03-30T12:34:59
| 2018-03-30T12:34:59
| 127,140,184
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
# Generated by Django 2.0.2 on 2018-03-28 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('date', models.DateTimeField()),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"omk7912@gmail.com"
] |
omk7912@gmail.com
|
ac28828be9a051149f7f7ae8b2cefe16d974f496
|
a37c48267bfb8476476dad7219c4e3329f9e2991
|
/Packs/qualys/Scripts/QualysCreateIncidentFromReport/QualysCreateIncidentFromReport.py
|
c2f994a169a3651f86f8693d07c61805e6dc09f8
|
[
"MIT"
] |
permissive
|
adambaumeister/content
|
611ce9fba412a5eb28fbefa8a43282e98d3f9327
|
01b57f8c658c2faed047313d3034e8052ffa83ce
|
refs/heads/master
| 2023-03-09T18:16:18.623380
| 2022-07-13T18:11:09
| 2022-07-13T18:11:09
| 274,290,989
| 2
| 0
|
MIT
| 2023-03-06T12:22:17
| 2020-06-23T02:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,183
|
py
|
import json
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def get_asset_id_for_ip(ip):
resp = demisto.executeCommand("qualys-host-list", {"ips": ip})
if isError(resp[0]):
demisto.results(resp)
sys.exit(0)
if isinstance(resp_dict := resp[0], dict) and isinstance(xml_string := resp_dict['Contents'], str):
json_string: str = xml2json(xml_string)
asset_id = demisto.get(json.loads(json_string), 'HOST_LIST_OUTPUT.RESPONSE.HOST_LIST.HOST.ID')
else:
asset_id = demisto.get(resp[0], 'Contents.HOST_LIST_OUTPUT.RESPONSE.HOST_LIST.HOST.ID')
return asset_id
def main():
incident_type = demisto.args().get("incidentType", "Vulnerability")
max_file_size = int(demisto.args().get("maxFileSize", 1024 ** 2))
min_severity = int(demisto.args().get("minSeverity", 1))
file_entry = demisto.getFilePath(demisto.args().get("entryID"))
with open(file_entry['path'], 'r') as f:
data = f.read(max_file_size)
if data:
report = json.loads(xml2json(data))
generation_date = demisto.get(report, "ASSET_DATA_REPORT.HEADER.GENERATION_DATETIME")
# Get asset list
asset_list = demisto.get(report, "ASSET_DATA_REPORT.HOST_LIST.HOST")
if not asset_list:
demisto.results({
"Type": entryTypes["note"],
"ContentsFormat": formats["text"],
"Contents": 'No vulnerable assets were found'
})
sys.exit(0)
if not isinstance(asset_list, list):
asset_list = [asset_list]
# Get QIDs only if over relevant severity
general_vulnerabilities = argToList(
demisto.get(report, "ASSET_DATA_REPORT.GLOSSARY.VULN_DETAILS_LIST.VULN_DETAILS"))
if not isinstance(general_vulnerabilities, list):
general_vulnerabilities = [general_vulnerabilities]
# Get list of QID with severity >= min_severity
qid_severity = [demisto.get(vulnerability, "QID.#text") for vulnerability in general_vulnerabilities if
demisto.get(vulnerability, 'SEVERITY')
and (int(demisto.get(vulnerability, 'SEVERITY')) >= min_severity)]
for asset in asset_list:
# Get Asset ID from Qualys
ip = demisto.get(asset, "IP")
if not ip:
demisto.results({
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": 'No IP was found for asset {0}'.format(str(asset))
})
sys.exit(0)
asset_id = get_asset_id_for_ip(ip)
if not asset_id:
demisto.results({
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": 'No ID was found for asset {0}'.format(str(asset))
})
sys.exit(0)
# Get Asset vulnerability list
vulnerabilities = argToList(demisto.get(asset, "VULN_INFO_LIST.VULN_INFO"))
if not isinstance(vulnerabilities, list):
vulnerabilities = [vulnerabilities]
qids = map(lambda vulnerability: demisto.get(vulnerability, "QID.#text"), vulnerabilities)
# Get only the QIDs that exists in asset and has severity >= min_severity
qids = list(set(qids) & set(qid_severity))
for qid in qids:
# Search for existing open incidents with the same Vendor ID and Asset ID.
# Will open a new incident only if such an incident not exists.
resp = demisto.executeCommand(
"getIncidents",
{"query": "vendorid: {0} and assetid: {1} and --status:Closed".format(qid, asset_id)})
if isError(resp[0]):
demisto.results(resp)
sys.exit(0)
incident_number = demisto.get(resp[0], "Contents.total")
try:
incident_number = int(incident_number)
except Exception:
demisto.results({
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": 'Error while searching the incident repository'
})
sys.exit(0)
if incident_number == 0:
# Create incident
demisto.executeCommand("createNewIncident", {
"name": "Vulnerability - Asset {0} QID {1} - {2}".format(asset_id, qid, generation_date),
"vendorid": str(qid),
"type": incident_type,
"assetid": str(asset_id)
})
demisto.results("Done.")
else:
demisto.results({
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": 'No data could be read.'
})
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
[
"noreply@github.com"
] |
adambaumeister.noreply@github.com
|
c235d6570542073a628b108ff849420cd261fbff
|
19375a18719e44eee7c596e72ef8915d3fcbff92
|
/day02_spider/07_pymongo.py
|
5f075d08ba8928c549c17868f7535f1d3477baa2
|
[] |
no_license
|
J-shan0903/AID1912
|
6c617fa26751c31ff05a63050a320122e3ca044e
|
0797f3d8ef0e96b8eb6908dffbec8193c9614973
|
refs/heads/master
| 2021-03-23T12:21:32.480026
| 2020-05-23T08:36:21
| 2020-05-23T08:36:21
| 247,452,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
import pymongo
conn = pymongo.MongoClient('localhost', 27017)
db = conn['maoyandb']
myset = db['maoyanset']
myset.insert_one({'name': '战狼', 'star': '123', 'time': '2017-2-15'})
|
[
"369618935@qq.com"
] |
369618935@qq.com
|
fd159e49eeca3c25d2f16637e57bd79297ec1b34
|
63428f4bec80630523355f8a05bcbdbb1cf31dbf
|
/lambda/pseudotest/upload_results.py
|
de10c76d32ed2589bc3cb081079f381d85a27488
|
[] |
no_license
|
pniedzwiedzinski/pseudotest
|
fe314222619ebae55467acf4def14fa5619ad2eb
|
011800879cac43ded439370fa9ed8539f8e98ae5
|
refs/heads/master
| 2021-07-05T09:30:36.203945
| 2019-12-02T10:03:19
| 2019-12-02T10:03:19
| 182,046,647
| 2
| 2
| null | 2020-10-01T07:08:10
| 2019-04-18T08:19:19
|
Python
|
UTF-8
|
Python
| false
| false
| 250
|
py
|
import os
import pymysql
from .db import execute
def upload_results(results, job_id):
execute(
"UPDATE pseudo_test_score SET score = %s WHERE file_id = %s",
(pymysql.escape_string(results), pymysql.escape_string(job_id)),
)
|
[
"pniedzwiedzinski19@gmail.com"
] |
pniedzwiedzinski19@gmail.com
|
a6918cdd6386b5e72a785cdeaabed8970021c91c
|
15240e0e187788f1114cac98c534004ab4793cbf
|
/influxdb_client/domain/telegraf_plugin_output_file_config_files.py
|
38f742691f37962daf3966ab2ea0118da675af41
|
[
"MIT"
] |
permissive
|
semyont/influxdb-client-python
|
dddb6a1309fd424c9b1e0bec7c67a442cfcfe0b5
|
7b685fda2030b22697b6096cc3161fbbc01e7bee
|
refs/heads/master
| 2020-09-19T18:39:52.132739
| 2019-11-27T07:11:23
| 2019-11-27T07:11:23
| 224,266,106
| 2
| 0
|
MIT
| 2019-11-26T19:06:17
| 2019-11-26T19:06:09
| null |
UTF-8
|
Python
| false
| false
| 3,738
|
py
|
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TelegrafPluginOutputFileConfigFiles(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'path': 'str'
}
attribute_map = {
'type': 'type',
'path': 'path'
}
def __init__(self, type=None, path=None): # noqa: E501
"""TelegrafPluginOutputFileConfigFiles - a model defined in OpenAPI""" # noqa: E501
self._type = None
self._path = None
self.discriminator = None
if type is not None:
self.type = type
if path is not None:
self.path = path
@property
def type(self):
"""Gets the type of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:return: The type of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this TelegrafPluginOutputFileConfigFiles.
:param type: The type of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:type: str
"""
self._type = type
@property
def path(self):
"""Gets the path of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:return: The path of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this TelegrafPluginOutputFileConfigFiles.
:param path: The path of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TelegrafPluginOutputFileConfigFiles):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"robert.hajek@gmail.com"
] |
robert.hajek@gmail.com
|
b484145d63d254f278fb2c36cbab484831c7e745
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02984/s206130983.py
|
f20b1996e5c69cf6026085eef48328162e7f21d4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
N = int(input())
A = list(map(int, input().split()))
now = sum(A)
for i in range(N//2):
now -= 2*A[2*i+1]
for a in A:
print(now, end=' ')
X = 2*a - now
now = X
print()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9db46716b6f44e4912f2a47e91a80b23070e5d7f
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/streams/blocks_20201019102708.py
|
61f5e48858ce8dbe0af570ab942e18fa784b868b
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
from wagtail.core import blocks
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class Cards Block(bloc)
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
b2792f1e2803d83a7bd248ee8ea357b94ed0badb
|
8c6816435093cb8e9e45593d3ffdd67028a011b6
|
/Tree/is_valid_bst.py
|
7a38d7d17976d7e01ce6e8b7a4be9e91ad0e5208
|
[] |
no_license
|
Keeady/daily-coding-challenge
|
6ee74a5fe639a1f5b4753dd4848d0696bef15c28
|
31eebbf4c1d0eb88a00f71bd5741adf5e07d0e94
|
refs/heads/master
| 2020-03-27T07:58:05.713290
| 2019-03-08T15:03:05
| 2019-03-08T15:03:05
| 146,210,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
import sys
class Solution:
def isValidBST(self, root):
if not root:
return True
return self.isValidSubTree(root, -sys.maxsize -1, sys.maxsize)
def isValidSubTree(self, root, min, max):
if root is None:
return True
if root.val > min and root.val < max:
return self.isValidSubTree(root.left, min, root.val) and self.isValidSubTree(root.right, root.val, max)
else:
return False
# left child > parent's min and < parent.val
# right child > parent.val and < max from parent
#
|
[
"cbevavy@datto.com"
] |
cbevavy@datto.com
|
c6dfc35a86ffd65768a932003f5e30de191624e6
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/YDgtdP69Mn9pC73xN_7.py
|
204a19d7df1cb840639dc9daeacb8f81fa7f26df
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
"""
This challenge is based on the game Minesweeper.
Create a function that takes a grid of `#` and `-`, where each hash (#)
represents a mine and each dash (-) represents a mine-free spot. Return a list
where each dash is replaced by a digit indicating the number of mines
immediately adjacent to the spot (horizontally, vertically, and diagonally).
### Examples
num_grid([
["-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-"],
["-", "-", "#", "-", "-"],
["-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-"]
]) ➞ [
["0", "0", "0", "0", "0"],
["0", "1", "1", "1", "0"],
["0", "1", "#", "1", "0"],
["0", "1", "1", "1", "0"],
["0", "0", "0", "0", "0"],
]
num_grid([
["-", "-", "-", "-", "#"],
["-", "-", "-", "-", "-"],
["-", "-", "#", "-", "-"],
["-", "-", "-", "-", "-"],
["#", "-", "-", "-", "-"]
]) ➞ [
["0", "0", "0", "1", "#"],
["0", "1", "1", "2", "1"],
["0", "1", "#", "1", "0"],
["1", "2", "1", "1", "0"],
["#", "1", "0", "0", "0"]
]
num_grid([
["-", "-", "-", "#", "#"],
["-", "#", "-", "-", "-"],
["-", "-", "#", "-", "-"],
["-", "#", "#", "-", "-"],
["-", "-", "-", "-", "-"]
]) ➞ [
["1", "1", "2", "#", "#"],
["1", "#", "3", "3", "2"],
["2", "4", "#", "2", "0"],
["1", "#", "#", "2", "0"],
["1", "2", "2", "1", "0"],
]
### Notes
N/A
"""
def num_grid(lst):
for i in range(len(lst)):
for j in range(len(lst[i])):
if lst[i][j] == '#':
continue
lst[i][j] = str(sum(lst[k][l] == '#' for k in range(len(lst)) for l in range(len(lst[0]))
if abs(i - k) < 2 and abs(j - l) < 2))
return lst
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
ede9675c9b6afbc4a54e4081f519e4ef3376ae81
|
e65d16ea1e8d412bac75a809be6d390126bdf528
|
/tests/components/remote/test_device_action.py
|
d652f4d869d060039e222b8a6d3d595b5bec83bd
|
[
"Apache-2.0"
] |
permissive
|
syssi/home-assistant
|
6347d57866cb16ab9d4499ad38e2be6f0399077f
|
fd43687833741b21221769d46b4d1ecef8a94711
|
refs/heads/dev
| 2023-08-17T09:31:52.680518
| 2023-06-11T14:22:12
| 2023-06-11T14:22:12
| 97,874,495
| 6
| 16
|
Apache-2.0
| 2023-09-13T06:31:21
| 2017-07-20T20:12:37
|
Python
|
UTF-8
|
Python
| false
| false
| 6,168
|
py
|
"""The test for remote device automation."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.remote import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON, EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity_registry import RegistryEntryHider
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
@pytest.fixture(autouse=True, name="stub_blueprint_populate")
def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None:
"""Stub copying the blueprints to the config folder."""
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_actions(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test we get the expected actions from a remote."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_registry.async_get_or_create(
DOMAIN, "test", "5678", device_id=device_entry.id
)
expected_actions = [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
for action in ["turn_off", "turn_on", "toggle"]
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
@pytest.mark.parametrize(
("hidden_by", "entity_category"),
(
(RegistryEntryHider.INTEGRATION, None),
(RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_actions_hidden_auxiliary(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
hidden_by,
entity_category,
) -> None:
"""Test we get the expected actions from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_registry.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_actions = []
expected_actions += [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for action in ["turn_off", "turn_on", "toggle"]
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
async def test_action(
hass: HomeAssistant, calls, enable_custom_integrations: None
) -> None:
"""Test for turn_on and turn_off actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_off",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_on",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "toggle",
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
|
[
"noreply@github.com"
] |
syssi.noreply@github.com
|
3a15a5b18a062442591358d0a5eb5d0c26f7290e
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/MojpPTZYQyN5L2i4a_5.py
|
82668008b83f18c70018cafee43b7e375d184f1a
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
"""
You work in a toy car workshop, and your job is to build toy cars from a
collection of parts. Each toy car needs 4 wheels, 1 car body, and 2 figures of
people to be placed inside. Given the total number of wheels, car bodies and
figures available, how many _complete_ toy cars can you make?
### Examples
cars(2, 48, 76) ➞ 0
# 2 wheels, 48 car bodies, 76 figures
cars(43, 15, 87) ➞ 10
cars(88, 37, 17) ➞ 8
### Notes
N/A
"""
def cars(wheels, bodies, figures):
totalcars = 0
check = True
while check:
if wheels >= 4 and bodies >= 1 and figures >= 2:
wheels -= 4
bodies -= 1
figures -= 2
totalcars += 1
else:
check = False
return totalcars
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
cdd263ea94ba391933c5aa44be7a4aad74ca9bdb
|
f854751c12afc48401ddcf0590ea70c72a2b7c58
|
/Canny_findContours.py
|
a1faaabc01f51f5dea466c49023c70c3d4fcf9d0
|
[] |
no_license
|
mu-777/image_processing_tests
|
4326ebe3b321f9b900a02b63d0a3189659439244
|
2b6ee72e10065fd2dfb1b7e430bf5ccfc26a0c95
|
refs/heads/master
| 2021-01-17T10:21:09.473142
| 2016-03-18T13:00:41
| 2016-03-18T13:00:41
| 42,449,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,260
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# アニメ顔分類器
# https://github.com/nagadomi/lbpcascade_animeface
# 動画で検出サンプル
# http://www.takunoko.com/blog/python%E3%81%A7%E9%81%8A%E3%82%93%E3%81%A7%E3%81%BF%E3%82%8B-part1-opencv%E3%81%A7%E9%A1%94%E8%AA%8D%E8%AD%98/
import cv2
import numpy as np
import time
# カスケード分類器の特徴量を取得する
CASCADE_PATH = "./cascade/lbpcascade_animeface.xml"
IN_IMG_PATHS = ["./test_imgs/face_detecting" + str(i + 1) + ".png" for i in range(9)]
OVERLAY_IMG_PATH = "./test_imgs/face_up5.jpg"
OUT_IMG_PATH = "./test_imgs/face_detecting_out.png"
overlay_color = (0, 187, 254)
rect_color = (0, 0, 0)
def check_img(img):
cv2.imshow('a', img)
cv2.waitKey(0)
def cutoff_hsv(src_img, diff_threshold=6):
(h, w) = src_img.shape[:2]
hsv_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2HSV)
ret_img = np.zeros((h, w, 3), np.uint8)
(c_h, c_s, c_v) = hsv_img[h / 2.0, w / 2.0]
for i, j in [(i, j) for i in range(h) for j in range(w)]:
(h, s, v) = hsv_img[i, j]
if abs(c_h - h) < diff_threshold:
ret_img[i, j] = src_img[i, j]
return ret_img
def cutoff_rgb(src_img, diff_threshold=20):
(h, w) = src_img.shape[:2]
ret_img = np.zeros((h, w, 3), np.uint8)
center_color = src_img[h / 2.0, w / 2.0]
for i, j in [(i, j) for i in range(h) for j in range(w)]:
color = src_img[i, j]
if all([abs(diff) < diff_threshold for diff in center_color - color]):
ret_img[i, j] = src_img[i, j]
return ret_img
def main(in_img_path):
rgb_img = cv2.imread(in_img_path)
cascade = cv2.CascadeClassifier(CASCADE_PATH)
faces = cascade.detectMultiScale(cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY),
scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))
if len(faces) > 0:
# 検出した顔を囲む矩形の作成
for (x, y, w, h) in faces:
print(w, h)
over_img_temp = rgb_img[y:y + h, x:x + w]
gray = cv2.cvtColor(over_img_temp, cv2.COLOR_BGR2GRAY)
gray_smooth = cv2.GaussianBlur(gray, (5, 5), 0)
# edge_img = cv2.Canny(gray_smooth, 1000, 1500, apertureSize=5)
edge_img = cv2.Canny(gray_smooth, 1600, 1600, apertureSize=5)
check_img(edge_img)
dilated_img = cv2.dilate(edge_img, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2)), iterations=3)
check_img(dilated_img)
# cv2.imwrite('./'+str(x)+'dilated_img.jpg', dilated_img)
contours, hierarchy = cv2.findContours(dilated_img, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
c_len = len(contours)
contours.reverse()
for i, contour in enumerate(contours):
cv2.drawContours(over_img_temp, [contour], -1, (0, 255 * float(i) / c_len, 0), thickness=-1)
check_img(over_img_temp)
# cv2.imwrite('./'+str(x)+'over_img.jpg', over_img_temp)
# contour_img = over_img_temp.copy()
# for i, contour in enumerate(contours):
# arclen = cv2.arcLength(contour, True)
# approx = cv2.approxPolyDP(contour, 0.02 * arclen, True)
# cv2.drawContours(contour_img, [approx], -1,
# (0, 0, 255 * (1 - float(i) / len(contours))), 2)
# check_img(contour_img)
# contour = reduce(lambda c1, c2: np.r_[c1, c2], contours)
# cv2.fillConvexPoly(over_img_temp, contour, (255, 0, 0))
# for contour in contours:
# if len(contour) > 10:
# box = cv2.fitEllipse(contour)
# cv2.ellipse(over_img_temp, box, (255, 255, 0), 2)
# check_img(over_img_temp)
# over_img_temp = cutoff_rgb(x, y, w, h)
# over_img_temp = cutoff_hsv(x, y, w, h)
# kernel_l = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25))
# kernel_m = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
# kernel_s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
# ret, binary_img = cv2.threshold(over_img_temp, 130, 255, cv2.THRESH_BINARY)
# first = cv2.dilate(binary_img, kernel_l)
# second = cv2.erode(first, kernel_s, iterations=5)
# first = cv2.dilate(binary_img, kernel_l)
# second = cv2.erode(first, kernel_s, iterations=5)
# check_img(binary_img)
# check_img(first)
# check_img(second)
# gray = cv2.cvtColor(over_img_temp, cv2.COLOR_BGR2GRAY)
# gray_smooth = cv2.GaussianBlur(gray, (31, 31), 0)
# ret, th1 = cv2.threshold(gray_smooth, 130, 255, cv2.THRESH_BINARY)
# contours, hierarchy = cv2.findContours(th1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(over_img_temp, contours, 0, overlay_color, thickness=5)
cv2.rectangle(rgb_img, (x, y), (x + w, y + h), (0, 187, 254), thickness=7)
# cv2.imwrite(out_img_path, rgb_img)
# --------------------------------------------
if __name__ == '__main__':
for img_path in IN_IMG_PATHS:
main(img_path)
|
[
"ryosuke.murata.7@gmail.com"
] |
ryosuke.murata.7@gmail.com
|
38345091f0f4fc6d048048415eef8af7eff32537
|
914ca4921c114c917267214e0987ebecf30b3510
|
/Programming_Practice/Python/Python_Scraping/Scraping_004/open_api_2.py
|
0729766be8fd65dbcdf4e6bca7e239c092a0c883
|
[] |
no_license
|
BurnFaithful/KW
|
52535030ea57f1489a0d108d599b66ffee50a1f4
|
15deb50449b8f902f623f20b97448c0f473a9342
|
refs/heads/master
| 2022-12-20T16:06:01.827398
| 2020-09-12T08:51:23
| 2020-09-12T08:51:23
| 294,897,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
from urllib.parse import quote
import requests
import bs4
endpoint = "http://apis.data.go.kr/B552657/ErmctInsttInfoInqireService/getParmacyListInfoInqire?"
serviceKey = "xqhT19uqLKmUuUxiUk6By%2FkUkZHlqQfalicqhc3oYnPy4KoA%2FK%2BM8EQVYGOaBBtfRMfqs6SQ1ei%2F8VPZgE6VlA%3D%3D"
Q0 = quote("서울특별시") # 한글 인코딩 메서드
# Q1 = quote("강남구")
# QT = "1"
# QN = quote("삼성약국")
ORD = "NAME"
pageNo = "1"
startPage = "1"
numOfRows = "5000"
pageSize = "10"
paramset = "serviceKey=" + serviceKey \
+ "&numOfRows=" + numOfRows \
+ "&pageSize=" + pageSize \
+ "&pageNo=" + pageNo \
+ "&startPage=" + startPage \
+ "&Q0=" + Q0 \
+ "&ORD=" + ORD #\
# + "&Q1=" + Q1 \
# + "&QT=" + QT \
# + "&QN=" + QN \
# + "&_type=json"
url = endpoint + paramset
print(url)
result = requests.get(url)
bs_obj = bs4.BeautifulSoup(result.content, "html.parser")
# print(bs_obj)
items = bs_obj.findAll("item")
count = 0
for item in items:
tagged_item = item.find("dutytime1c")
if tagged_item != None:
close_time = int(tagged_item.text)
if close_time > 2100:
count += 1
print(item.find("dutyname").text)
# print(tagged_item)
# print("서울특별시 내 월요일 9시 이후까지 하는 약국의 수 : " + str(count))
|
[
"burnfaithful@gmail.com"
] |
burnfaithful@gmail.com
|
b78217f75d6e278638d0e9c281e604eab9e625fd
|
a8750439f200e4efc11715df797489f30e9828c6
|
/LeetCodeContests/93/871_minimum_refill_required.py
|
8573ce433e9d77f48451ccfa85747b65c8a1c2b3
|
[] |
no_license
|
rajlath/rkl_codes
|
f657174305dc85c3fa07a6fff1c7c31cfe6e2f89
|
d4bcee3df2f501349feed7a26ef9828573aff873
|
refs/heads/master
| 2023-02-21T10:16:35.800612
| 2021-01-27T11:43:34
| 2021-01-27T11:43:34
| 110,989,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,408
|
py
|
'''
871. Minimum Number of Refueling Stops
User Accepted: 234
User Tried: 563
Total Accepted: 247
Total Submissions: 1546
Difficulty: Hard
A car travels from a starting position to a destination which is target miles east of the starting position.
Along the way, there are gas stations. Each station[i] represents a gas station that is station[i][0] miles east
of the starting position, and has station[i][1] liters of gas.
The car starts with an infinite tank of gas, which initially has startFuel liters of fuel in it.
It uses 1 liter of gas per 1 mile that it drives.
When the car reaches a gas station, it may stop and refuel, transferring all the gas from the station into the car.
What is the least number of refueling stops the car must make in order to reach its destination?
If it cannot reach the destination, return -1.
Note that if the car reaches a gas station with 0 fuel left, the car can still refuel there.
If the car reaches the destination with 0 fuel left, it is still considered to have arrived.
Example 1:
Input: target = 1, startFuel = 1, stations = []
Output: 0
Explanation: We can reach the target without refueling.
Example 2:
Input: target = 100, startFuel = 1, stations = [[10,100]]
Output: -1
Explanation: We can't reach the target (or even the first gas station).
Example 3:
Input: target = 100, startFuel = 10, stations = [[10,60],[20,30],[30,30],[60,40]]
Output: 2
Explanation:
We start with 10 liters of fuel.
We drive to position 10, expending 10 liters of fuel. We refuel from 0 liters to 60 liters of gas.
Then, we drive from position 10 to position 60 (expending 50 liters of fuel),
and refuel from 10 liters to 50 liters of gas. We then drive to and reach the target.
We made 2 refueling stops along the way, so we return 2.
Note:
1 <= target, startFuel, stations[i][1] <= 10^9
0 <= stations.length <= 500
0 < stations[0][0] < stations[1][0] < ... < stations[stations.length-1][0] < target
#solution by https://leetcode.com/yangzhenjian
class Solution:
def minRefuelStops(self, target, startFuel, stations):
"""
:type target: int
:type startFuel: int
:type stations: List[List[int]]
:rtype: int
"""
stations.append([target, 0])
n = len(stations)
MINF = - 10 ** 15
dp = [startFuel] + [MINF] * n
px = 0
for i, (x, f) in enumerate(stations, 1):
dp_next = [MINF] * (n + 1)
for j in range(i + 1):
if dp[j] >= x - px:
dp_next[j] = dp[j] - (x - px)
if j > 0 and dp[j-1] >= x - px:
dp_next[j] = max(dp_next[j], dp[j-1] - (x - px) + f)
px = x
dp = dp_next
for j in range(n):
if dp[j] >= 0:
return j
return -1
# cpp solutin by https://leetcode.com/shdut
#include <iostream>
#include <string>
#include <stdio.h>
#include <string.h>
#include <vector>
#include <ctime>
#include <set>
#include <map>
#include <unordered_map>
#include <queue>
#include <algorithm>
#include <cmath>
#include <assert.h>
using namespace std;
#define vi vector<int>
#define pii pair<int,int>
#define x first
#define y second
#define all(x) x.begin(),x.end()
#define pb push_back
#define mp make_pair
#define SZ(x) int(x.size())
#define rep(i,a,b) for(int i=a;i<b;i++)
#define per(i,a,b) for(int i=b-1;i>=a;i--)
#define pi acos(-1)
#define mod 998244353 //1000000007
#define inf 1000000007
#define ll long long
#define DBG(x) cerr<<(#x)<<"="<<x<<"\n";
#define N 100010
template <class U,class T> void Max(U &x, T y){if(x<y)x=y;}
template <class U,class T> void Min(U &x, T y){if(x>y)x=y;}
template <class T> void add(int &a,T b){a=(a+b)%mod;}
int pow(int a,int b){
int ans=1;
while(b){
if(b&1)ans=1LL*ans*a%mod;
a=1LL*a*a%mod;b>>=1;
}
return ans;
}
pii a[510];
ll dp[510][510];
class Solution {
public:
int minRefuelStops(int target, int startFuel, vector<vector<int>>& stations) {
int sz=0;
a[sz++]={0,startFuel};
for(auto &o:stations)a[sz++]={o[0],o[1]};
a[sz++]={target,0};
rep(i,0,sz)rep(j,0,i+1)dp[i][j]=-1;
dp[0][0]=0;
rep(i,0,sz-1){
rep(j,0,i+1){
if(dp[i][j]>=0){
ll w=dp[i][j]-(a[i+1].x-a[i].x);
if(w>=0)Max(dp[i+1][j],w);
w+=a[i].y;
if(w>=0)Max(dp[i+1][j+1],w);
}
}
}
rep(i,0,sz)if(dp[sz-1][i]>=0)return i-1;
return -1;
}
};
'''
# my version of https://leetcode.com/shdut solution in cpp
# TLE solution
class Solution(object):
def minRefuelStops(self, target, startFuel, stations):
"""
:type target: int
:type startFuel: int
:type stations: List[List[int]]
:rtype: int
"""
size = 0
a = []
a.append((0, startFuel))
for x in stations:a.append((x[0],x[1]))
a.append((target, 0))
size = len(a)
dp = [[-1 for x in range(size+1)] for y in range(size+1)]
dp[0][0] = 0
for i in range(size-1):
for j in range(i+1):
if dp[i][j] >= 0:
w = dp[i][j] - (a[i+1][0] - a[i][0])
if w >= 0:dp[i+1][j] = max(dp[i+1][j], w)
w += a[i][1]
if w >= 0:dp[i+1][j+1] = max(dp[i+1][j+1],w)
for i in range(size):
if dp[size-1][i] >=0:return i-1
return -1
#awice solution
#
class Solution(object):
def minRefuelStops(self, target, startFuel, stations):
dp = [startFuel] + [0] * len(stations)
for i, (location, capacity) in enumerate(stations):
for t in xrange(i, -1, -1):
if dp[t] >= location:
dp[t+1] = max(dp[t+1], dp[t] + capacity)
for i, d in enumerate(dp):
if d >= target: return i
return -1
class Solution(object):
def refuelStops(self, target, tank, stations):
pq = [] # A maxheap is simulated using negative values
stations.append((target, float('inf')))
ans = prev = 0
for location, capacity in stations:
tank -= location - prev
while pq and tank < 0: # must refuel in past
tank += -heapq.heappop(pq)
ans += 1
if tank < 0: return -1
heapq.heappush(pq, -capacity)
prev = location
return ans
print(Solution().minRefuelStops(1, 1, []))
|
[
"raj.lath@gmail.com"
] |
raj.lath@gmail.com
|
95223a9cb75f866ad335207924223e191993dc69
|
71e18daf9e567792a6ce1ae243ba793d1c3527f0
|
/ApplicationsAlgo/maze.py
|
67d5c62ac216e8e824085d0bc616c55b195917da
|
[] |
no_license
|
ohjooyeong/python_algorithm
|
67b18d92deba3abd94f9e239227acd40788140aa
|
d63d7087988e61bc72900014b0e72603d0150600
|
refs/heads/master
| 2020-06-22T18:10:41.613155
| 2020-05-27T13:12:03
| 2020-05-27T13:12:03
| 197,767,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
def solve_maze(g, start, end):
qu = [] # 기억 장소 1: 앞으로 처리해야할 이동 경로를 큐에 저장
done = set() # 기억 장소 2: 이미 큐에 추가한 꼭짓점들을 집합에 기록
qu.append(start)
done.add(start)
while qu: #큐에 처리할 경로가 남아있으면
p = qu.pop(0) # 큐에서 처리 대상을 꺼냄
v = p[-1] # 큐에 저장된 이동 경로의 마지막 문자가 현재 처리해야할 꼭짓점
print('---')
print('p :', p)
print('v :', v)
if v == end: # 처리해야할 꼭짓점이 도착점이면 종료
return p #지금까지의 전체 이동 경로를 돌려주고 종료
for x in g[v]: # 대상 꼭짓점에 연결된 꼭짓점들 중에
if x not in done: # 아직 큐에 추가된 적이 없는 꼭짓점을
qu.append(p + x) # 이동 경로에 새 꼭짓점으로 추가하여 큐에 저장하고
done.add(x) #집합에도 추가
print('qu: ', qu)
return "?"
maze = {
'a': ['e'],
'b': ['c', 'f'],
'c': ['b', 'd'],
'd': ['c'],
'e': ['a', 'i'],
'f': ['b', 'g', 'j'],
'g': ['f', 'h'],
'h': ['g', 'l'],
'i': ['e', 'm'],
'j': ['f', 'k', 'n'],
'k': ['j', 'o'],
'l': ['h', 'p'],
'm': ['i', 'n'],
'n': ['m', 'j'],
'o': ['k'],
'p': ['l']
}
print(solve_maze(maze, 'a', 'p'))
|
[
"brb1111@naver.com"
] |
brb1111@naver.com
|
e507584e2e9212c610c211af0fbe2ff4b93f932e
|
5173c3e3956387a3f2ae8fcf4aed7c7a600dac78
|
/Programmers/Programmers_카펫.py
|
3db67116d2c77a7a87b7fe9e13797f0a6d27976f
|
[] |
no_license
|
ma0723/Min_Algorithm
|
df75f53f6e89b7817d4b52d686effb8236a4ddac
|
b02d1043008cb32e22daa9d4207b9a45f111d66f
|
refs/heads/master
| 2023-07-25T11:00:15.397093
| 2021-08-30T02:08:05
| 2021-08-30T02:08:05
| 375,613,927
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
def solution(brown, yellow):
answer = []
# 노란색과 갈색으로 색칠된 격자의 개수
y_lst = []
for i in range(1, yellow+1):
# 1부터 yellow 개수까지
if yellow%i==0:
# 노란색 직사각형 경우의 약수들의 집합 (나누어 떨어지는 경우)
# 24 (1, 24) (2, 12) (3, 8) (4, 6) 등 중복 제거
row = yellow//i
col = i
if row >= col:
# 가로 길이는 세로 길이와 같거나, 세로 길이보다 깁니다
y_lst.append([row, col])
for i in y_lst:
b_row = (i[0] + 2)*2
# 가로 양쪽 가로+2만큼 2번씩
b_col = i[1]*2
# 세로 양쪽 노란색 세로만큼 2번씩
if b_row + b_col == brown:
# 테두리 1줄은 갈색으로 칠해져 있는 격자 모양 카펫
answer = [b_row//2, b_col//2+2]
# 카펫의 가로(2로 나누기), 세로 크기(2로 나누고 위아래 +1씩 총 +2)
|
[
"ma0723@naver.com"
] |
ma0723@naver.com
|
ecdd5dd43ced15b6ba50c76d6d12a296b7c3b2dc
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/arcade-platformer/arcade_platformer/01_game_skeleton.py
|
862af40f1113621b43a6afd046e81429f8a5f7f8
|
[
"MIT"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
"""
Arcade Platformer
Demonstrating the capbilities of arcade in a platformer game
Supporting the Arcade Platformer article on https://realpython.com
All game artwork from www.kenney.nl
Game sounds and tile maps by author
"""
import arcade
class Platformer(arcade.Window):
def __init__(self):
pass
def setup(self):
"""Sets up the game for the current level"""
pass
def on_key_press(self, key: int, modifiers: int):
"""Processes key presses
Arguments:
key {int} -- Which key was pressed
modifiers {int} -- Which modifiers were down at the time
"""
def on_key_release(self, key: int, modifiers: int):
"""Processes key releases
Arguments:
key {int} -- Which key was released
modifiers {int} -- Which modifiers were down at the time
"""
def on_update(self, delta_time: float):
"""Updates the position of all game objects
Arguments:
delta_time {float} -- How much time since the last call
"""
pass
def on_draw(self):
pass
if __name__ == "__main__":
window = Platformer()
window.setup()
arcade.run()
|
[
"jfincher42@gmail.com"
] |
jfincher42@gmail.com
|
10460745daf408d4f3cb18983ec6bad8fdd4a296
|
88bf6991bc8f291e16b792df729d58d7eeee1b2b
|
/proteome_tools_data/prep.py
|
ff3521cb31d81fe8771efab91ef33a15cf6b2e0c
|
[] |
no_license
|
MatteoLacki/proteome_tools_data
|
7ac3c335831911adab116cf6ce0bb08e785e733c
|
84101923f95787d1ac48e47101b94b22b3301667
|
refs/heads/master
| 2020-08-05T02:21:06.805640
| 2019-10-29T12:16:01
| 2019-10-29T12:16:01
| 212,360,930
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,719
|
py
|
"""Prepartion of files done on Linux. Execution on Windows."""
from pathlib import Path, PureWindowsPath as PWP
import pandas as pd
import json
import re
pd.set_option('display.max_rows', 4)
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_colwidth', -1)#display whole column without truncation
net = Path("/mnt/ms/restoredData/proteome_tools/net/")
ms_win = Path("//MSSERVER")
def iter_raw_folders(net):
for comp in ("idefix", "synapt"):
yield from net.glob('{}/WIRD_GESICHERT/*/*.raw'.format(comp))
res = pd.DataFrame({'Raw_File': p.stem,
'path': str(ms_win/"/".join(p.parts[3:]))
} for p in iter_raw_folders(net))
# project description
data_on_project = Path('/home/matteo/Projects/proteome_tools')
plates = pd.read_excel(data_on_project/"Sample_RAWFile_List.xlsx")
plates.columns = [p.replace(' ','_') for p in plates.columns]
plates = plates.iloc[:,0:4]
DDA = plates[plates.MS_Methode.str.contains('DDA')]
plates = plates[~plates.MS_Methode.str.contains('DDA')].copy()
def pad(s, k, v='0'):
"""Pad stringv s to the left with v to match length k."""
return v*(k-len(s)) + s
def get_fasta_file(s):
"""Get the name of the fasta file from the sample name."""
f = s.split('-')[-1]
return pad(f, 3) + '.fasta'
fastas_pool_1 = ms_win/"restoredData/proteome_tools/automation/db_jorg_pool1"
fastas_pool_2 = ms_win/"restoredData/proteome_tools/automation/db_jorg_pool2"
fasta_paths = {'Pools Plate 1': fastas_pool_1,
'Pools Plate 2': fastas_pool_2,
'missing first Plate 2': Path(''),
'Second Pool Plate 1': ms_win/"restoredData/proteome_tools/automation/db_jorg_pool2",
'Second Pool Plate 2': ms_win/"restoredData/proteome_tools/automation/db_jorg_pool2",
'Third Pool Plate 2': Path('')}
plates['parsed_name'] = [re.sub(' \d\d\d\d-\d\d\d-\d+','', sn).replace('TUM ','') for sn in plates.Sample_Name]
counts = Counter(plates.parsed_name)
plates['fasta_file'] = plates.Sample_Name.apply(get_fasta_file)
plates['fasta_fold'] = plates.parsed_name.map(fasta_paths)
plates['fasta_file'] = [ff/f for ff, f in zip(plates.fasta_fold, plates.fasta_file)]
plates = plates.merge(res, 'left', validate='one_to_one')
plates['top_fold'] = [Path(p).parent.name + '/' + Path(p).name for p in plates.path]
plates = plates.set_index('Raw_File')
pool1_bothplates = plates[plates.Sample_Name.str.contains('-054-')]
pool2_bothplates = plates[plates.Sample_Name.str.contains('-086-')]
db2 = set(p.name for p in Path("/mnt/ms/restoredData/proteome_tools/automation/db_jorg_pool2").glob("*.fasta"))
assert all(p.name in db2 for p in pool2_bothplates.fasta_file), "Some fasta files are missing."
# COMPARING WITH THE OLD LIST
# with (data_on_project/'plate1.json').open('r', encoding ="utf-8") as f:
# plate1 = json.load(f)
# analysed = {Path(p).stem for p,f in plate1}
# A = plates.loc[analysed]
# A_ok = A[A.Sample_Name.str.contains('-054-')]
# '127' in {Path(f).stem for f in A_ok.fasta_file}
# with (data_on_project/'good_files.json').open('w', encoding ="utf-8") as f:
# json.dump(list(A_ok.top_fold), f, indent=2)
pool1 = list(zip(pool1_bothplates.path, (str(f) for f in pool1_bothplates.fasta_file)))
pool2 = list(zip(pool2_bothplates.path, (str(f) for f in pool2_bothplates.fasta_file)))
with (data_on_project/'pool1.json').open('w', encoding ="utf-8") as f:
json.dump(pool1, f, indent=4)
with (data_on_project/'pool2.json').open('w', encoding ="utf-8") as f:
json.dump(pool2, f, indent=4)
net_folder = Path('/mnt/ms/users/Matteo/poligono')
# {Path(p).stem for p,f in pool2 if Path(p).stem[0] == 'S'}
# copy fasta files to the existing folders
|
[
"matteo.lacki@gmail.com"
] |
matteo.lacki@gmail.com
|
36452b274c24a8d85cfad2937c0a5943990eea13
|
32c56293475f49c6dd1b0f1334756b5ad8763da9
|
/google-cloud-sdk/lib/surface/access_context_manager/levels/update.py
|
f22234640a48085e7e67ec5bc155d8fda74563b6
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/socialliteapp
|
b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494
|
85bb264e273568b5a0408f733b403c56373e2508
|
refs/heads/master
| 2022-11-20T03:01:47.654498
| 2020-02-01T20:29:43
| 2020-02-01T20:29:43
| 282,403,750
| 0
| 0
|
MIT
| 2020-07-25T08:31:59
| 2020-07-25T08:31:59
| null |
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud access-context-manager levels update` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.accesscontextmanager import levels as levels_api
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.accesscontextmanager import levels
from googlecloudsdk.command_lib.accesscontextmanager import policies
@base.ReleaseTracks(base.ReleaseTrack.GA)
class UpdateLevelsGA(base.UpdateCommand):
"""Update an existing access level."""
_API_VERSION = 'v1'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1')
@staticmethod
def ArgsVersioned(parser, version='v1'):
levels.AddResourceArg(parser, 'to update')
levels.AddLevelArgs(parser, version=version)
levels.AddLevelSpecArgs(parser, version=version)
def Run(self, args):
client = levels_api.Client(version=self._API_VERSION)
level_ref = args.CONCEPTS.level.Parse()
policies.ValidateAccessPolicyArg(level_ref, args)
mapper = levels.GetCombineFunctionEnumMapper(version=self._API_VERSION)
combine_function = mapper.GetEnumForChoice(args.combine_function)
return client.Patch(
level_ref,
description=args.description,
title=args.title,
combine_function=combine_function,
basic_level_conditions=args.basic_level_spec)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class UpdateLevelsBeta(UpdateLevelsGA):
_API_VERSION = 'v1beta'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1beta')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateLevelsAlpha(UpdateLevelsGA):
_API_VERSION = 'v1alpha'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1alpha')
|
[
"jonathang132298@gmail.com"
] |
jonathang132298@gmail.com
|
228600e7a43d9390eb83a4bc4a96de585a9e6f59
|
a7596165a29e5186bc6c4718e3b6e835939b105d
|
/desktop/libs/libsolr/src/libsolr/conf.py
|
dbc65d6d5d35fa9bcc38744608dcae449b9ad21d
|
[
"Apache-2.0"
] |
permissive
|
lockhart39/HueQualityAndIngestionApp
|
f0c778665f0fbe699ec30e0df5e9f3ed8a9c3384
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
refs/heads/master
| 2021-08-20T00:31:29.481333
| 2017-11-27T19:22:16
| 2017-11-27T19:22:16
| 112,237,923
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _t
from desktop.lib.conf import Config, coerce_bool
from desktop.conf import default_ssl_validate
SSL_CERT_CA_VERIFY = Config(
key="ssl_cert_ca_verify",
help=_t("In secure mode (HTTPS), if Solr SSL certificates have to be verified against certificate authority"),
dynamic_default=default_ssl_validate,
type=coerce_bool
)
SOLR_ZK_PATH = Config(
key="solr_zk_path",
help=_t("Default path to Solr in ZooKeeper"),
default='/solr',
type=str
)
|
[
"cloudera@quickstart.cloudera"
] |
cloudera@quickstart.cloudera
|
c39fd1ca1f556f4ee4250827d5a78b0030d68abf
|
6da0547afcecb3444d0b429161e46bc5a38e14ab
|
/demo_project/urls.py
|
5e53e90908206cd1f6045e53f5f990be0f0c1e38
|
[] |
no_license
|
rashidhamid139/DjangoCustomAdmin
|
53e440071a31d3ab6cff308c404ba89cc40cd6a0
|
3b01f38201754ae1370684cafb58f4005e5c8b35
|
refs/heads/master
| 2022-06-17T10:15:34.213179
| 2020-05-11T15:12:09
| 2020-05-11T15:12:09
| 261,787,315
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('email/', include('sendemail.urls')),
path('posts/', include('posts.urls')),
path('', include('pages.urls')),
path('users/', include('users.urls')),
path('accounts/', include('allauth.urls')),
path('payment/', include('payment.urls'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rashidhamid139@gmail.com"
] |
rashidhamid139@gmail.com
|
171cc337c57713480ffad953a757cb65ff9424ef
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03646/s301674634.py
|
c91b7b40947c4be6233524eaca5442719d6c84a0
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import sys
def main():
input = sys.stdin.readline
K=int(input())
N=max(2,min(50,K))
ans=[i+K//N for i in range(N)]
m=K%N
for i in range(m):
for j in range(N):
if i==j: ans[j]+=N
else: ans[j]-=1
print(N)
print(*ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e89f1bc3975c4e316d484945057a258863a59f6e
|
dd35833bead7de2f2ca7affd985ac0d345b2ab6e
|
/apps/useroperation/views.py
|
57349580d24bee20652e0332cd757dbe46a503ec
|
[] |
no_license
|
lhsheild/MxShop
|
df14c11aa7457f304194ff099a35869d83f0d9a7
|
811be4dad55284e737c80ebd4d00c079837393f2
|
refs/heads/master
| 2020-05-27T16:23:00.578686
| 2019-09-10T08:09:01
| 2019-09-10T08:09:01
| 188,130,934
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,877
|
py
|
from rest_framework.authentication import SessionAuthentication
from rest_framework.mixins import CreateModelMixin, DestroyModelMixin, ListModelMixin, RetrieveModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import GenericViewSet, ModelViewSet
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from utils.permissions import IsOwnerOrReadOnly
from .models import UserFav, UserLeavingMessage, UserAddress
from .serializers import UserFavSerializer, UserFavDetailSerializer, UserLeavingMessageSerializer, UserAddressSerializer
# Create your views here.
class UserFavViewset(CreateModelMixin, DestroyModelMixin, ListModelMixin, RetrieveModelMixin, GenericViewSet):
"""
list:获取用户收藏列表
retrieve:判断某个商品是否已经收藏
create:收藏商品
"""
# queryset = UserFav.objects.all()
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
# serializer_class = UserFavSerializer
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
lookup_field = 'goods_id'
def perform_create(self, serializer): # 商品收藏数修改,也可以用信号量实现
instance = serializer.save()
goods = instance.goods
goods.fav_num += 1
goods.save()
def perform_destroy(self, instance): # 商品收藏数删减,也可以用信号量实现
goods = instance.goods
goods.fav_num -= 1
goods.save()
instance.delete()
def get_queryset(self):
return UserFav.objects.filter(user=self.request.user)
def get_serializer_class(self):
if self.action == 'list':
return UserFavDetailSerializer
elif self.action == 'create':
return UserFavSerializer
return UserFavSerializer
class LeavingMessageViewset(ListModelMixin, CreateModelMixin, RetrieveModelMixin, DestroyModelMixin, GenericViewSet):
"""
list:获取用户留言
create:添加留言
destroy:删除留言
"""
serializer_class = UserLeavingMessageSerializer
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
def get_queryset(self):
return UserLeavingMessage.objects.filter(user=self.request.user)
class AddressViewset(ModelViewSet):
"""
收获地址管理
list:获取收货地址列表
create:新建收获地址
destroy:删除收货地址
update:更新收货地址
retrieve:获取详细收获地址
"""
serializer_class = UserAddressSerializer
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
def get_queryset(self):
return UserAddress.objects.filter(user=self.request.user)
|
[
"lhsheild@yahoo.com"
] |
lhsheild@yahoo.com
|
56885001d9dcf999407005b8ba85693c10b09566
|
bd0dc9a8d24863f7353c4124ce7e3c6b25e94910
|
/test/test_sampling.py
|
e91fe37a33293f41c5d7049f2544d29df6d104b3
|
[
"BSD-3-Clause"
] |
permissive
|
qbektrix/profiling
|
6cdc7a07a10955e993988217a720509bd4b961c4
|
89d1bc572c2502e02aeb822134453fd8d228e526
|
refs/heads/master
| 2021-01-21T00:53:25.880636
| 2015-08-26T15:00:39
| 2015-08-26T15:00:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import sys
import pytest
from profiling.sampling import SamplingProfiler
from profiling.sampling.samplers import ItimerSampler, TracingSampler
from utils import find_stats, spin
def spin_100ms():
spin(0.1)
def spin_500ms():
spin(0.5)
def _test_sampling_profiler(sampler):
profiler = SamplingProfiler(top_frame=sys._getframe(), sampler=sampler)
with profiler:
spin_100ms()
spin_500ms()
stat1 = find_stats(profiler.stats, 'spin_100ms')
stat2 = find_stats(profiler.stats, 'spin_500ms')
ratio = stat1.deep_hits / stat2.deep_hits
# 1:5 expaected, but tolerate (0.8~1.2):5
assert 0.8 <= ratio * 5 <= 1.2
@pytest.mark.flaky(reruns=10)
def test_itimer_sampler():
_test_sampling_profiler(ItimerSampler(0.0001))
@pytest.mark.flaky(reruns=10)
def test_tracing_sampler():
_test_sampling_profiler(TracingSampler(0.0001))
@pytest.mark.flaky(reruns=10)
def test_tracing_sampler_does_not_sample_too_often():
# pytest-cov cannot detect a callback function registered by
# :func:`sys.setprofile`.
class fake_profiler(object):
samples = []
@classmethod
def sample(cls, frame):
cls.samples.append(frame)
@classmethod
def count_and_clear_samples(cls):
count = len(cls.samples)
del cls.samples[:]
return count
sampler = TracingSampler(0.1)
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 1
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 0
spin(0.5)
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 1
def test_not_sampler():
with pytest.raises(TypeError):
SamplingProfiler(sampler=123)
def test_sample_1_depth():
frame = sys._getframe()
while frame.f_back is not None:
frame = frame.f_back
assert frame.f_back is None
profiler = SamplingProfiler()
profiler.sample(frame)
|
[
"sub@subl.ee"
] |
sub@subl.ee
|
0b7bab2ef4cad9a4121b6af03e00f73434566b2c
|
69be26f4fd44ed3bac1c9dd0941e435b2b2728af
|
/backend/products/urls.py
|
b93ad9e8922f0977eae8878c66380ab40a41d404
|
[] |
no_license
|
mahidulmoon/djreact-e-commerce
|
0078598ab2327e4ef5b992c1fd8f202aca4c705e
|
0fb7d50b408710d90af43db9326d9fff6b03804f
|
refs/heads/master
| 2023-02-15T10:55:14.830589
| 2020-05-20T16:45:27
| 2020-05-20T16:45:27
| 265,608,114
| 1
| 0
| null | 2021-01-06T02:55:27
| 2020-05-20T15:28:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 293
|
py
|
from django.urls import path,include
from rest_framework import routers
from .views import ShirtViewset,MobileViewset
router = routers.DefaultRouter()
router.register('shirtlist',ShirtViewset)
router.register('mobilelist',MobileViewset)
urlpatterns = [
path('',include(router.urls)),
]
|
[
"mahidulmoon@gmail.com"
] |
mahidulmoon@gmail.com
|
2c6e7507e4d055e36773baca642ec3c3dab8cc84
|
5a4d5ee624b375ece06fda1467afe18beb69c14b
|
/Algorithm/BOJ/12865.knapsack.py
|
2825339abbe9002cecb181cc88098d9982880547
|
[] |
no_license
|
Knightofcydonia51/TIL
|
cd10dab949659bc827118ee42b25d926336dce23
|
78d7e8617f4abed9932a557c12e68bd950f8230d
|
refs/heads/master
| 2022-12-26T00:10:06.262200
| 2022-05-26T01:12:32
| 2022-05-26T01:12:32
| 195,938,010
| 0
| 0
| null | 2022-12-16T01:03:09
| 2019-07-09T05:22:49
|
Python
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
import sys
sys.stdin=open('12865.knapsack.txt')
N,K=map(int,input().split())
stuffs=[list(map(int,input().split())) for x in range(N)]
dp=[[0 for x in range(K+1)]for y in range(N+1)]
# N : 물건 개수 K : 배낭 크기
for k in range(1,N+1):
for j in range(K+1):
# j가 k번째 물건의 무게와 같아지기 시작할 때부터 끝까지
weight,value=stuffs[k-1]
if j>=stuffs[k-1][0]:
dp[k][j]=max(dp[k-1][j-weight]+value,dp[k-1][j])
else:
dp[k][j]=dp[k-1][j]
print(max(dp[-1]))
|
[
"leavingwill@gmail.com"
] |
leavingwill@gmail.com
|
f8191ca72ddf845194adef5e0ffa2088accb0580
|
a0784b1a66a6c1a89ee8a75e32cd48d2c168931b
|
/extras/tools/rst_tokens.py
|
9c3614127a376786a554d7277a8ff3cbca89df9c
|
[
"MIT"
] |
permissive
|
cltrudeau/purdy
|
ebe5d8b556dadc0a4eb04018826c066b83617f71
|
4ff2d5b33771266d46260ee9ba6503bb4895ab2f
|
refs/heads/master
| 2023-07-08T08:23:08.409053
| 2023-06-29T21:37:29
| 2023-06-29T21:37:29
| 210,162,520
| 10
| 3
|
MIT
| 2021-03-10T21:55:26
| 2019-09-22T14:40:17
|
Python
|
UTF-8
|
Python
| false
| false
| 645
|
py
|
#!/usr/bin/env python
import argparse
from pygments.lexers.markup import RstLexer
# =============================================================================
parser = argparse.ArgumentParser(description=('Prints out the tokens '
'generated by pygments.lexers.markup.RstLexer'))
parser.add_argument('files', type=str, nargs='+',
help='One or more file names to lex and parse')
args = parser.parse_args()
# --- Do the parsing
lexer = RstLexer()
with open(args.files[0]) as f:
contents = f.read()
for token, text in lexer.get_tokens(contents):
if text == '\n':
text = '\\n'
print('%s: %s' % (token, text))
|
[
"ctrudeau@arsensa.com"
] |
ctrudeau@arsensa.com
|
a30101b7f38ba1fddd661a493cdcfae8287a25d6
|
b6bcaae5169cf20a84edafae98ba649dab6fc67c
|
/crowdsourcing/migrations/0034_auto_20150817_2049.py
|
b473f2c71a406fed6d27fb1b2fe3e7e97180cfcc
|
[
"MIT"
] |
permissive
|
shriyanka/daemo-forum
|
b7eb84a46799d8c6bcb29a4f5c9996a3d2f40351
|
58c555f69208beedbb0c09f7b7d1e32ab741b2c5
|
refs/heads/master
| 2023-01-12T05:13:48.804930
| 2015-09-20T01:52:29
| 2015-09-20T01:52:29
| 40,193,653
| 1
| 0
|
MIT
| 2022-12-26T19:49:24
| 2015-08-04T15:42:57
|
Python
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0033_templateitem_position'),
]
operations = [
migrations.AlterField(
model_name='templateitem',
name='position',
field=models.IntegerField(),
),
]
|
[
"vnarwal95@gmail.com"
] |
vnarwal95@gmail.com
|
0e39f0a5b1327cea53f1645eb0d644c1a1759991
|
64eb1cfb19e01c629c3ef7fc40fe4dd4dda078ff
|
/pyunitgrading/testrunner.py
|
3da1d1b15800f8a8b6123cde32e0ee8209a01f7d
|
[
"BSD-3-Clause"
] |
permissive
|
stevecassidy/pyunitgrading
|
2406942d6fb851a5576da0155cac7410687f5ff4
|
183677f89f385201dd54959b4bc84a6236cee59e
|
refs/heads/master
| 2023-03-15T17:21:13.564061
| 2022-03-03T23:38:03
| 2022-03-03T23:38:03
| 32,105,841
| 0
| 1
|
BSD-3-Clause
| 2023-03-14T01:00:56
| 2015-03-12T22:16:10
|
Python
|
UTF-8
|
Python
| false
| false
| 7,487
|
py
|
"""
run unit tests for a set of student submissions downloaded from iLearn (moodle)
"""
import unittest
import os
import traceback
import sys
import importlib
import imp
import multiprocessing
import re
import shutil
import csv
if sys.version_info < (3,0):
from ConfigParser import ConfigParser
else:
from configparser import ConfigParser
import subprocess
import datetime
from pyunitgrading.filehandling import scan_or_unpack_submissions
def report_error(pid, message):
"""Report an error for this student"""
out = os.path.join(pid, "test_errors.txt")
ostream = open(out,"a")
print("\tError running tests:", message)
ostream.write("Error running tests\n")
ostream.write(message + "\n")
ostream.close()
def find_sourcedir(basedir, modulename):
"""Locate the sourcedir by looking for the modulename
to be tested inside a subfolder of basedir"""
for dirpath, dirnames, filenames in os.walk(basedir):
if modulename in filenames:
return dirpath
# if we don't find it, raise an error
report_error(basedir, "Can't locate module %s" % modulename)
return None
class TestRunner(multiprocessing.Process):
"""Class to run a set of unit tests in a separate process"""
def __init__(self, basedir, sid, testmodulename, targetname, modules, outputname, queue):
"""Initialise a test runner
basedir - directory in which student submissions are unpacked
sid - student id
testmodulename - name of the test module to run
targetname - name of a source module in the submission
modules - list of python modules to be copied into project, including test module
outputname - name for file to write test output to
queue - message queue to send result back to parent process
"""
multiprocessing.Process.__init__(self)
self.sid = sid
self.modules = modules
self.testmodulename = testmodulename
self.targetname = targetname
self.queue = queue
self.result = (self.sid, 0, 0, 0, 0)
self.rootdir = os.getcwd()
out = os.path.join(basedir, sid, outputname)
self.ostream = open(out,"w")
self.sourcedir = find_sourcedir(os.path.join(basedir, sid), self.targetname)
def __report_error(self, message=""):
"""Report an error, either an explicit message
or just dump out crash info"""
print("\tError running test: ",)
self.ostream.write("Error running tests\n")
if message != "":
self.ostream.write(message + "\n")
print(message)
else:
info = sys.exc_info()
self.ostream.write(str(info))
traceback.print_exc(None, self.ostream)
def run(self):
# if there is no source to load, we quit now
if self.sourcedir == None:
self.__report_error("Source file not found in submission")
self.ostream.close()
return
print("DIR", self.sourcedir)
# get the python script to test from the given directory: add it to the path
sys.path.insert(0, '.')
# any modules already in this dir should be reloaded
reloadmods = []
for modfile in os.listdir(self.sourcedir):
if modfile.endswith('.py'):
modname, ext = os.path.splitext(modfile)
#print("add to reload queue: ", modname)
reloadmods.append(modname)
# copy the test module file into the target dir
for m in self.modules:
#print("COPYING: ", m, " to ", self.sourcedir)
shutil.copy(m, self.sourcedir)
try:
os.chdir(self.sourcedir)
# reload any user modules
for modname in reloadmods:
if modname in sys.modules:
#print('\treloading', sys.modules[modname])
target = imp.reload(sys.modules[modname])
testmodule = importlib.import_module(self.testmodulename)
# load all tests in the module
suite = unittest.defaultTestLoader.loadTestsFromModule(testmodule)
# run the tests
result = unittest.TextTestRunner(stream=self.ostream, verbosity=2).run(suite)
totalmark = result.testsRun-len(result.errors)-len(result.failures)
self.result = (self.sid, result.testsRun, len(result.failures), len(result.errors), totalmark)
# put the result onto the queue to send back to the caller
self.queue.put(self.result)
except Exception:
self.__report_error()
self.queue.put((self.sid, 0, 0, 0, 0, "Error running tests"))
finally:
# ensure we reset the path
sys.path.pop(0)
os.chdir(self.rootdir)
self.ostream.close()
def read_config(configfile):
"""Read config file and set up defaults,
return a dictionary of config values"""
r = dict()
config = ConfigParser()
config.read(configfile)
# paths are resolved relative to the config file directory
configdir = os.path.dirname(configfile)
r['basedir'] = config.get('default', 'basedir')
r['targetname'] = config.get('default', 'targetname', fallback=None)
r['testmodule'] = config.get('default', 'testmodule')
r['outputname'] = config.get('default', 'outputname', fallback='test_output.txt')
expectzip = config.get('default', 'expectzip', fallback='no')
r['expectzip'] = expectzip == 'yes'
modules = config.get('default', 'modules')
# we split modules on whitespace
r['modules'] = [os.path.join(configdir, m) for m in modules.split()]
r['csvname'] = config.get('default', 'csvname', fallback="results.csv")
return r
def run_tests_on_collection(dirlist, basedir, testmodule, targetname, modules, outputname):
"""Run unit tests for each student directory in an unpacked directory
dirlist is a list of student submissions directories"""
# 60 second timeout threshold
threshold = datetime.timedelta(0, 60, 0)
result = []
queue = multiprocessing.Queue()
for sid in dirlist:
thr = TestRunner(basedir, sid, testmodule, targetname, modules, outputname, queue)
thr.start()
start = datetime.datetime.now()
timeout = False
while not timeout and thr.is_alive():
if datetime.datetime.now() - start > threshold:
timeout = True
if not queue.empty():
testresult = queue.get()
else:
testresult = (sid,0,0,0,0)
print("RESULT: ", sid, testresult)
result.append(testresult)
return result
def process(zfile, configfile):
"""Unpack submissions and run the unit tests for each
student"""
c = read_config(configfile)
h = open(c['csvname'], 'w')
results = csv.writer(h)
results.writerow(('SID', 'Tests', 'Failed', 'Errors', 'Total'))
unpacked, problems = scan_or_unpack_submissions(zfile, c['basedir'], c['targetname'], c['expectzip'])
result = run_tests_on_collection(unpacked, c['basedir'], c['testmodule'], c['targetname'], c['modules'], c['outputname'])
for row in result:
results.writerow(row)
print("Problem cases:\n")
for sid in problems:
results.writerow((sid,))
print(sid)
h.close()
print("Grading complete")
print("Results in ", c['csvname'])
|
[
"steve.cassidy@mq.edu.au"
] |
steve.cassidy@mq.edu.au
|
f7729591d5635a1da2b4fe884e44539a6aa15cd9
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/FEK7892zgj4nPJvkE_4.py
|
6ec575bba1998ca7103a7d9ed80a1478a9fb8052
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
def primes2(n):
""" Input n>=6, Returns a list of primes, 2 <= p < n """
n, correction = n-n%6+6, 2-(n%6>1)
sieve = [True] * (n//3)
for i in range(1,int(n**0.5)//3+1):
if sieve[i]:
k=3*i+1|1
sieve[ k*k//3 ::2*k] = [False] * ((n//6-k*k//6-1)//k+1)
sieve[k*(k-2*(i&1)+4)//3::2*k] = [False] * ((n//6-k*(k-2*(i&1)+4)//6-1)//k+1)
return [2,3] + [3*i+1|1 for i in range(1,n//3-correction) if sieve[i]]
primes = primes2(10**6)
def prime_gaps(g, a, b):
for i in range(len(primes) - 1):
p1, p2 = primes[i:i+2]
if p1 >= a and p2 <= b and p2 - p1 == g:
return [p1, p2]
if p1 + g > b:
break
return None
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
276f391bee58142fcfb697ca4d76631818bcd5f4
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/list_topics_response.py
|
8df5bc3f28136681613fcee4eb2ec78edaee20f2
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,583
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListTopicsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total': 'int',
'size': 'int',
'items': 'list[Topic]'
}
attribute_map = {
'total': 'total',
'size': 'size',
'items': 'items'
}
def __init__(self, total=None, size=None, items=None):
"""ListTopicsResponse
The model defined in huaweicloud sdk
:param total: 总数
:type total: int
:param size: 本次返回数量
:type size: int
:param items: 主题列表
:type items: list[:class:`huaweicloudsdkroma.v2.Topic`]
"""
super(ListTopicsResponse, self).__init__()
self._total = None
self._size = None
self._items = None
self.discriminator = None
if total is not None:
self.total = total
if size is not None:
self.size = size
if items is not None:
self.items = items
@property
def total(self):
"""Gets the total of this ListTopicsResponse.
总数
:return: The total of this ListTopicsResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListTopicsResponse.
总数
:param total: The total of this ListTopicsResponse.
:type total: int
"""
self._total = total
@property
def size(self):
"""Gets the size of this ListTopicsResponse.
本次返回数量
:return: The size of this ListTopicsResponse.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ListTopicsResponse.
本次返回数量
:param size: The size of this ListTopicsResponse.
:type size: int
"""
self._size = size
@property
def items(self):
"""Gets the items of this ListTopicsResponse.
主题列表
:return: The items of this ListTopicsResponse.
:rtype: list[:class:`huaweicloudsdkroma.v2.Topic`]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ListTopicsResponse.
主题列表
:param items: The items of this ListTopicsResponse.
:type items: list[:class:`huaweicloudsdkroma.v2.Topic`]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTopicsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
a2d04d53f2ed1214f12315065171a637a70f1949
|
841c0df958129bef4ec456630203992a143c7dc7
|
/src/15/15726.py
|
ea66bee86e5bafd68fe9975425375b2326cc0ab2
|
[
"MIT"
] |
permissive
|
xCrypt0r/Baekjoon
|
da404d3e2385c3278a1acd33ae175c2c1eb82e5e
|
7d858d557dbbde6603fe4e8af2891c2b0e1940c0
|
refs/heads/master
| 2022-12-25T18:36:35.344896
| 2021-11-22T20:01:41
| 2021-11-22T20:01:41
| 287,291,199
| 16
| 25
|
MIT
| 2022-12-13T05:03:49
| 2020-08-13T13:42:32
|
C++
|
UTF-8
|
Python
| false
| false
| 291
|
py
|
"""
15726. 이칙연산
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 60 ms
해결 날짜: 2020년 9월 19일
"""
def main():
A, B, C = map(int, input().split())
print(int(A * B / C if B > C else A / B * C))
if __name__ == '__main__':
main()
|
[
"fireintheholl@naver.com"
] |
fireintheholl@naver.com
|
b4eedfabafacd95da1f202e2227f0a1c1511d5b0
|
d8e662acf8aa90b5a76351712dfcb405bfebc01a
|
/webtelnet/django_webtelnet/tools/telnet.py
|
d45fb8934cda5ac1bca1438f8426ee2b5d2edc79
|
[
"MIT"
] |
permissive
|
crazyinstall/django-webtelnet
|
c86f35d6f22c7f06c2fad378907da0444b3f9fb6
|
840572c14792109025bf2a17bc481ae58b06b29e
|
refs/heads/master
| 2023-05-26T06:20:11.033401
| 2019-07-30T08:40:42
| 2019-07-30T08:40:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,198
|
py
|
import telnetlib
from threading import Thread
import json
import time
import traceback
class Telnet:
"""
由于 telnetlib 库的原因,终端无法显示颜色以及设置终端大小
"""
def __init__(self, websocker, message):
self.websocker = websocker
self.message = message
self.cmd = ''
self.res = ''
self.tn = telnetlib.Telnet()
def connect(self, host, user, password, port=23, timeout=30):
try:
self.tn.open(host=host, port=port, timeout=timeout)
self.tn.read_until(b'login: ', timeout=10)
user = '{0}\n'.format(user).encode('utf-8')
self.tn.write(user)
self.tn.read_until(b'Password: ', timeout=10)
password = '{0}\n'.format(password).encode('utf-8')
self.tn.write(password)
time.sleep(0.5) # 服务器响应慢的话需要多等待些时间
command_result = self.tn.read_very_eager().decode('utf-8')
self.message['status'] = 0
self.message['message'] = command_result
message = json.dumps(self.message)
self.websocker.send(message)
self.res += command_result
if 'Login incorrect' in command_result:
self.message['status'] = 2
self.message['message'] = 'connection login faild...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close(3001)
self.tn.write(b'export TERM=ansi\n')
time.sleep(0.2)
self.tn.read_very_eager().decode('utf-8')
# 创建1线程将服务器返回的数据发送到django websocket, 多个的话会极容易导致前端显示数据错乱
Thread(target=self.websocket_to_django).start()
except:
print(traceback.format_exc())
self.message['status'] = 2
self.message['message'] = 'connection faild...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close(3001)
def django_to_ssh(self, data):
try:
self.tn.write(data.encode('utf-8'))
if data == '\r':
data = '\n'
self.cmd += data
except:
self.close()
def websocket_to_django(self):
try:
while True:
data = self.tn.read_very_eager().decode('utf-8')
if not len(data):
continue
self.message['status'] = 0
self.message['message'] = data
self.res += data
message = json.dumps(self.message)
self.websocker.send(message)
except:
self.close()
def close(self):
try:
self.message['status'] = 1
self.message['message'] = 'connection closed...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close()
self.tn.close()
except:
pass
def shell(self, data):
self.django_to_ssh(data)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
6d8fc1aa3874d6519fdfe7a3ce8bc07ba45332d9
|
7ee4e7e48da5390839fd91f561637267bc65c731
|
/examples/jupyter/merge.py
|
764fa19fd8b7942c1b4f21f310bb6d0a9ac975ce
|
[
"Apache-2.0"
] |
permissive
|
OliverEvans96/python-pachyderm
|
f82239d230e2346a677841c7e94079c7b4dabcbd
|
8a3755402b0e32048c89315c3e7754cf9836d310
|
refs/heads/master
| 2020-07-19T04:10:47.168478
| 2019-08-08T21:23:27
| 2019-08-08T21:23:27
| 206,371,596
| 0
| 0
|
Apache-2.0
| 2019-09-04T17:09:57
| 2019-09-04T17:09:57
| null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
import os
import csv
import json
import datetime
PRICE = 5
def main():
try:
weather_filenames = os.listdir("/pfs/weather")
except:
weather_filenames = []
with open("/pfs/out/data.csv", "w") as out_file:
writer = csv.writer(out_file)
for weather_filename in weather_filenames:
dt = datetime.datetime.strptime(weather_filename, "%Y-%m-%d")
trip_filepath = "/pfs/trips/{}-{}".format(dt.month, dt.strftime("%d-%y"))
if os.path.exists(trip_filepath):
with open("/pfs/weather/{}".format(weather_filename), "r") as weather_file:
with open(trip_filepath, "r") as trip_file:
weather_json = json.load(weather_file)
precip = weather_json["daily"]["data"][0]["precipProbability"]
trip_csv = csv.reader(trip_file)
next(trip_csv) # skip the header row
trips = int(next(trip_csv)[1])
writer.writerow([dt.strftime("%Y-%m-%d"), precip, trips, trips * PRICE])
if __name__ == "__main__":
main()
|
[
"simonson@gmail.com"
] |
simonson@gmail.com
|
69d7f955c39a3df010c6b0722c30cd0b852e3a78
|
230633f33aaf722b1ece605a81ee566e1060fa3a
|
/textattack/models/wrappers/pytorch_model_wrapper.py
|
e3d9bcd7e5b5ce7880a2f46e8bd9ef8d633497c5
|
[
"MIT"
] |
permissive
|
ashwani-bhat/TextAttack
|
bc9a428a68f0894db7d6404e91adf8e2891055c0
|
9f5c0794b95779f11bf2a120642db00da2bc4928
|
refs/heads/master
| 2022-12-08T17:09:31.272779
| 2020-08-04T16:35:57
| 2020-08-04T16:35:57
| 284,734,102
| 0
| 0
|
MIT
| 2020-08-03T15:17:18
| 2020-08-03T15:17:17
| null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
import torch
import textattack
from .model_wrapper import ModelWrapper
class PyTorchModelWrapper(ModelWrapper):
"""Loads a PyTorch model (`nn.Module`) and tokenizer."""
def __init__(self, model, tokenizer, batch_size=32):
if not isinstance(model, torch.nn.Module):
raise TypeError(
f"PyTorch model must be torch.nn.Module, got type {type(model)}"
)
self.model = model.to(textattack.shared.utils.device)
self.tokenizer = tokenizer
self.batch_size = batch_size
def tokenize(self, inputs):
if hasattr(self.tokenizer, "batch_encode"):
return self.tokenizer.batch_encode(inputs)
else:
return [self.tokenizer.encode(x) for x in inputs]
def __call__(self, text_input_list):
ids = self.tokenize(text_input_list)
ids = torch.tensor(ids).to(textattack.shared.utils.device)
with torch.no_grad():
outputs = textattack.shared.utils.batch_model_predict(
self.model, ids, batch_size=self.batch_size
)
return outputs
|
[
"jxmorris12@gmail.com"
] |
jxmorris12@gmail.com
|
d02dee9370662c42f3808914bfe276ed8b71b720
|
733f1b8e8069ee11e4f4d56e57c8fdc4c901d080
|
/python/qa_pdu_round_robin.py
|
1541e08ed6ff97e53c5bf1abf1a9727c589646f0
|
[] |
no_license
|
arirubinstein/gr-iridium
|
3bbe8858968a4fb872a7da8abf621ce72a595fd1
|
3f7079bdf688f70acb43d12f049a405262982d78
|
refs/heads/master
| 2021-01-21T01:43:16.111168
| 2016-09-14T06:16:32
| 2016-09-14T06:16:32
| 68,176,992
| 2
| 0
| null | 2016-09-14T05:45:37
| 2016-09-14T05:45:37
| null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Free Software Foundation, Inc.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import iridium_swig as iridium
class qa_pdu_round_robin (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_pdu_round_robin, "qa_pdu_round_robin.xml")
|
[
"schneider@blinkenlichts.net"
] |
schneider@blinkenlichts.net
|
7ec6ea6355a4691c164a132310317a1a87d8f9a3
|
b2158bfa536ff4f0dd1359be383fe2331f3e9707
|
/1_Web_Server/Skeleton WebServer 2.py
|
1a21b0b12ec4e228bca8f52436c167fac03e0746
|
[] |
no_license
|
kristogj/TTM4100_KTN
|
63fc6f1840927b392fc2d140be73cd4a6ccfb6ec
|
a202b9d8cc6db7ea5936d550671e4076fc09dc89
|
refs/heads/master
| 2021-05-11T02:02:16.929177
| 2018-05-03T09:19:30
| 2018-05-03T09:19:30
| 118,347,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
# This skeleton is valid for both Python 2.7 and Python 3.
# You should be aware of your additional code for compatibility of the Python version of your choice.
# Import socket module
from socket import *
# Create a TCP server socket
#(AF_INET is used for IPv4 protocols)
#(SOCK_STREAM is used for TCP)
serverSocket = socket(AF_INET, SOCK_STREAM)
# Prepare a server socket
# FILL IN START
# Assign a port number
serverPort = 56400##
# Bind the socket to server address and server port
serverSocket.bind(('',serverPort)) ##
# Listen to at most 1 connection at a time
serverSocket.listen(1) ##
# FILL IN END
# Server should be up and running and listening to the incoming connections
while True:
print('Ready to serve...')
# Set up a new connection from the client
connectionSocket, addr = serverSocket.accept()# FILL IN START # FILL IN END
# If an exception occurs during the execution of try clause
# the rest of the clause is skipped
# If the exception type matches the word after except
# the except clause is executed
try:
# Receives the request message from the client
message = connectionSocket.recv(1024) # FILL IN START # FILL IN END
# Extract the path of the requested object from the message
# The path is the second part of HTTP header, identified by [1]
filepath = message.split()[1]
# Because the extracted path of the HTTP request includes
# a character '\', we read the path from the second character
f = open(filepath[1:],'r')
# Read the file "f" and store the entire content of the requested file in a temporary buffer
outputdata = f.readlines()# FILL IN START # FILL IN END
print(outputdata)
# Send the HTTP response header line to the connection socket
# Format: "HTTP/1.1 *code-for-successful-request*\r\n\r\n"
# FILL IN START
connectionSocket.send(b"HTTP/1.1 200 OK\r\n\r\n")##
# FILL IN END
# Send the content of the requested file to the connection socket
for i in range(0, len(outputdata)):
connectionSocket.send(outputdata[i].encode())
connectionSocket.send(b"\r\n")
# Close the client connection socket
connectionSocket.close()
except IOError:
# Send HTTP response message for file not found
# Same format as above, but with code for "Not Found" (see outputdata variable)
# FILL IN START
connectionSocket.send(b"HTTP/1.1 404 NOT FOUND\r\n\r\n")##
# FILL IN END
connectionSocket.send(b"<html><head></head><body><h1>404 Not Found</h1></body></html>\r\n")
# Close the client connection socket
# FILL IN START
connectionSocket.close()##
# FILL IN END
serverSocket.close()
|
[
"kristoffergjerde@gmail.com"
] |
kristoffergjerde@gmail.com
|
5fc6b4960040d94fb1e4a0cbd82e50746474d47a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02821/s590731514.py
|
827448373b6a7d5f2313e8622bb2f870d0d97350
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
import cmath
pi = cmath.pi
exp = cmath.exp
N=2**18
def make_exp_t(N, base):
exp_t = {0: 1}
temp = N
while temp:
exp_t[temp] = exp(base / temp)
temp >>= 1
return exp_t
fft_exp_t = make_exp_t(N, -2j*pi)
ifft_exp_t = make_exp_t(N, 2j*pi)
def fft_dfs(f, s, N, st, exp_t):
if N==2:
a = f[s]; b = f[s+st]
return [a+b, a-b]
N2 = N//2; st2 = st*2
F0 = fft_dfs(f, s , N2, st2, exp_t)
F1 = fft_dfs(f, s+st, N2, st2, exp_t)
w = exp_t[N]; wk = 1.0
for k in range(N2):
U = F0[k]; V = wk * F1[k]
F0[k] = U + V
F1[k] = U - V
wk *= w
F0.extend(F1)
return F0
def fft(f, N):
if N==1:
return f
return fft_dfs(f, 0, N, 1, fft_exp_t)
def ifft(F, N):
if N==1:
return F
f = fft_dfs(F, 0, N, 1, ifft_exp_t)
for i in range(N):
f[i] /= N
return f
n,m,*a=map(int,open(0).read().split())
b=[0]*N
for i in a:b[i]+=1
i=N
c=0
for a in ifft([t*t for t in fft(b,N)],N)[::-1]:
a=int(a.real+.5)
i-=1
if a:
t=min(m,a)
c+=i*t
m-=t
if not m:break
print(c)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d0d2fec898dd15fa114393b866c165481d23c57f
|
0547d1826e99eedb959a3463520d73985a3b844e
|
/Data Science for Everyone Track/19-Introduction to Shell/01- Manipulating files and directories/01-How does the shell compare to a desktop interface.py
|
5e8bc41d249261420a721998a9502cdcd75ba95c
|
[] |
no_license
|
abhaysinh/Data-Camp
|
18031f8fd4ee199c2eff54a408c52da7bdd7ec0f
|
782c712975e14e88da4f27505adf4e5f4b457cb1
|
refs/heads/master
| 2022-11-27T10:44:11.743038
| 2020-07-25T16:15:03
| 2020-07-25T16:15:03
| 282,444,344
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
'''
How does the shell compare to a desktop interface?
An operating system like Windows, Linux, or Mac OS is a special kind of program. It controls the computer's processor, hard drive, and network connection, but its most important job is to run other programs.
Since human beings aren't digital, they need an interface to interact with the operating system. The most common one these days is a graphical file explorer, which translates clicks and double-clicks into commands to open files and run programs. Before computers had graphical displays, though, people typed instructions into a program called a command-line shell. Each time a command is entered, the shell runs some other programs, prints their output in human-readable form, and then displays a prompt to signal that it's ready to accept the next command. (Its name comes from the notion that it's the "outer shell" of the computer.)
Typing commands instead of clicking and dragging may seem clumsy at first, but as you will see, once you start spelling out what you want the computer to do, you can combine old commands to create new ones and automate repetitive operations with just a few keystrokes.
What is the relationship between the graphical file explorer that most people use and the command-line shell?
Answer the question
50 XP
Possible Answers
The file explorer lets you view and edit files, while the shell lets you run programs.
The file explorer is built on top of the shell.
The shell is part of the operating system, while the file explorer is separate.
They are both interfaces for issuing commands to the operating system.
Answer : They are both interfaces for issuing commands to the operating system.
'''
|
[
"abhaysinh.surve@gmail.com"
] |
abhaysinh.surve@gmail.com
|
71b044f34e96ce148cec417fb41d88ef7818d82e
|
defe77f8cfb333f4c67c0f9cafb290cb337464aa
|
/sequencing_utilities/gdtools.py
|
6f2964959ff2d33c37b40c683d098e7a05003f7b
|
[
"MIT"
] |
permissive
|
dmccloskey/sequencing_utilities
|
8bd5c2c3ffe5d54a3c898db86bb65d6ae2af1394
|
3845cede661bc263a38cf8850148380e08c0e9ea
|
refs/heads/master
| 2020-04-06T07:01:42.013835
| 2016-09-15T04:24:31
| 2016-09-15T04:24:31
| 38,275,400
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,909
|
py
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""
Implements the GDTools class that annotates and applies mutations to .gd and reference .gbk files
based on the gdtools utility program
"""
import os
class GDTools():
def apply(self,gbk_filename_I,gd_filename_I,fastaOrGff3_filename_O,output_O='gff3',
gdtools_I = 'gdtools'):
'''apply mutational changes found in the gd file to the reference genome
e.g. gdtools APPLY [ -o output.gff3 -f GFF3 ] -r reference.gbk input.gd
INPUT:
fastaOrGff3_filename_O = output filename
output_O = 'fasta' or 'gff3' (default output: gff3)
gbk_filename_I = reference genome
gd_filename_I = gd filename
gdtools_I = command for gdtools'''
cmd = ("%s APPLY -o %s -f %s -r %s %s" %(gdtools_I,fastaOrGff3_filename_O,output_O,gbk_filename_I,gd_filename_I));
print(cmd);
os.system(cmd);
def annotate(self,htmlOrGd_filename_O,gbk_filename_I,gd_filenames_I=[],output_O='html',
gdtools_I = 'gdtools'):
'''
e.g. gdtools ANNOTATE [-o annotated.html] -r reference.gbk input.1.gd [input.2.gd ... ]
INPUT:
htmlOrGd_filename_O = filename for the .html or .gd file output
output_O = 'html' or 'gd' (default output: html)
gbk_filename_I = reference genome
gd_filenames_I = list of gd files
gdtools_I = command for gdtools
OUTPUT:
html or gd file based on input
'''
gd_filename_str = ' '.join(gd_filenames_I);
if output_O=='html':
cmd = ("%s ANNOTATE -o %s --html -r %s %s" %(gdtools_I,
htmlOrGd_filename_O,gbk_filename_I,gd_filename_str));
else:
cmd = ("%s ANNOTATE -o %s -r %s %s" %(gdtools_I,
htmlOrGd_filename_O,gbk_filename_I,gd_filename_str));
print(cmd);
os.system(cmd);
|
[
"dmccloskey87@gmail.com"
] |
dmccloskey87@gmail.com
|
56198034f91bc59cb3faaffacf9f3a6f362d3f7a
|
05215b1f0f07eeb7266996c4d9a3f4cff78be7e1
|
/ai.py
|
2e5d593ca4af2608d3f1595dfe3b22e1fa50d533
|
[] |
no_license
|
ljte/TicTacToe
|
c54c259dc0106fddf9c814f9efac2285e5a89ae1
|
584d1f8900b9b9ee216587a247af7c97714ad3dd
|
refs/heads/master
| 2022-12-09T13:33:57.400026
| 2020-09-12T13:16:33
| 2020-09-12T13:16:33
| 294,947,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,633
|
py
|
import sys
from main import check_winner
scores = {
'X': 1,
'O': -1,
'Tie': 0
}
ai_player = 'O'
human_player = 'X'
def minimax(board, depth, is_maximizing):
result = check_winner(board)
if result:
return scores[result]
if is_maximizing:
best_score = -sys.maxsize
for i in range(3):
for j in range(3):
if board[i][j].is_empty:
board[i][j].value = human_player
score = minimax(board, depth + 1, False)
board[i][j].value = None
best_score = max(score, best_score)
return best_score
else:
best_score = sys.maxsize
for i in range(3):
for j in range(3):
if board[i][j].is_empty:
board[i][j].value = ai_player
score = minimax(board, depth + 1, True)
board[i][j].value = None
best_score = min(score, best_score)
return best_score
def make_best_move(board):
best_score = sys.maxsize
best_move = ()
for i in range(3):
for j in range(3):
if board[i][j].is_empty:
board[i][j].value = ai_player
score = minimax(board, 0, True)
if score < best_score:
best_score = score
best_move = (i, j)
board[i][j].value = None
board[best_move[0]][best_move[1]].value = ai_player
def make_simple_turn(grid):
empty_cell = grid.get_empty_cell()
if empty_cell:
empty_cell.value = ai_player
|
[
"="
] |
=
|
83fa38f3c85d64b10ad5d7b0a64e7056c9159000
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2137/60698/252878.py
|
6263880d5378d64f8803902ccd10935998fcc738
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
from math import sqrt
def test():
n = int(input())
if n<=1:
print(False)
return
factor = [1]
for i in range(2, int(sqrt(n)) + 1):
if n % i == 0:
if i not in factor:
factor.append(i)
if int(n / i) not in factor:
factor.append(int(n / i))
sum = 0
for j in range(0, len(factor)):
sum = sum + factor[j]
if sum == n:
print(True)
else:
print(False)
test()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5ef92ca87e34e9dc5efdc49c10d106b68e01480f
|
2c3da6e0bddf55d64d650040bbf286c47b31811a
|
/learnpython100day/元类.py
|
6af39b9d5739481bdc65d335eb5cb68b0e9e6c1e
|
[
"MIT"
] |
permissive
|
Bngzifei/PythonNotes
|
76bd53db3033a9c51ab4bdd727842cd89607b584
|
01590e1b6c1bc0f04aa2d355fa2553c04cce27f2
|
refs/heads/master
| 2023-02-04T06:49:00.725463
| 2020-12-15T09:26:40
| 2020-12-15T09:26:40
| 155,154,662
| 1
| 2
|
MIT
| 2020-09-08T01:30:19
| 2018-10-29T05:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
# -*- coding: utf-8 -*-
# @Author: Marte
# @Date: 2019-05-27 17:46:35
# @Last Modified by: Marte
# @Last Modified time: 2019-05-27 20:20:18
class Foo(object):
def hello(self):
print("hello world!")
return
foo = Foo()
print(type(foo)) # <class '__main__.Foo'>
print(type(foo.hello)) # <class 'method'>
print(type(Foo)) # <class 'type'>
temp = Foo # 赋值给其他变量
Foo.var = 11 # 增加参数
print(Foo) # 作为函数参数
def init(self,name):
self.name = name
return
def hello(self):
print("hello %s"%self.name)
return
Foo = type("Foo", (object,), {"__init__": init, "hello": hello, "cls_var": 10})
foo = Foo("xianhu")
print(foo.hello())
print(Foo.cls_var)
print(foo.__class__)
print(Foo.__class__)
print(type.__class__)
class Author(type):
def __new__(mcs, name, bases, dict):
# 添加作者属性
dict["author"] = "xianhu"
return super(Author, mcs).__new__(mcs, name, bases, dict)
class Foo(object, metaclass=Author):
pass
foo = Foo()
print(foo.author)
|
[
"bngzifei@gmail.com"
] |
bngzifei@gmail.com
|
4a85a1b72cfea37cab5e95a542ca77b194c0997b
|
135cf3b73c4cd01970865b794260e195076875da
|
/scripts/r&d/testSend.py
|
985650bfe80997d6ea6994ed041ff01680387f20
|
[] |
no_license
|
njha7/elbalang_orchestration
|
1a4b7e6bb49dd7f7f735291949f52fdebed78c51
|
056f6305d43b24fedbf3eb4f6f26deaf5a0f57af
|
refs/heads/master
| 2021-04-28T18:41:43.603542
| 2018-04-19T19:25:59
| 2018-04-19T19:25:59
| 121,879,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
for x in range(1,100):
channel.basic_publish(exchange='', routing_key='hello', body='%d' % x)
connection.close()
|
[
"you@example.com"
] |
you@example.com
|
57995e50c4c2740675bad0ae2baf19562f0f7c26
|
eed7b5aa4861086d34e539e7bbfeff4286506692
|
/src/Server/Game/games.py
|
92eaa0fc62857153b72dd206ce6f503f660e8d55
|
[] |
no_license
|
dfwarden/DeckBuilding
|
0be2ccb68fc9a69c8eaa1d8acedeaa7cebef1a31
|
0b5a7573a3cf33430fe61e4ff8a8a7a0ae20b258
|
refs/heads/master
| 2021-01-18T09:52:51.880892
| 2015-02-03T03:21:17
| 2015-02-03T03:21:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
from game_wrapper import GameWrapper
id = 1
games = {}
def StartNewGame(game, players):
""" Start a New Game """
global games
global id
currentId = id
games[currentId] = GameWrapper(currentId, game, players)
id += 1
return currentId
|
[
"cloew123@gmail.com"
] |
cloew123@gmail.com
|
4f3036721abf197f97623f9c2e9af80af2d85d55
|
7792b03540784a0d28073899dd4ad78689e9a9fb
|
/VoiceAI/my_slice.py
|
1b58955d63d01e8ff062e6c608d1488fcab21885
|
[] |
no_license
|
ayiis/coding
|
3b1362f813a22a7246af3725162cfb53dea2f175
|
c73e4622e1811cc3fd8729a92df6537bd73dc802
|
refs/heads/master
| 2021-06-02T14:55:38.451288
| 2021-04-26T08:39:16
| 2021-04-26T08:39:16
| 134,660,001
| 0
| 0
| null | 2020-06-05T04:03:58
| 2018-05-24T04:14:14
|
CSS
|
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'ayiis'
# create on 2018/11/07
"""
切分目录下所有子目录里的wav文件
用于切分喜马拉雅的儿童读英语的音频
"""
import sys
import numpy as np
import re
import ubelt
from pathlib import Path
import wave
reload(sys).setdefaultencoding("utf8")
class Slicer(object):
"""docstring for Slicer"""
def __init__(self, arg):
super(Slicer, self).__init__()
self.arg = arg
self.filename = arg["filename"]
self.save_dir = arg["save_dir"]
self.num_samples = 2048 # pyaudio内置缓冲大小
self.sampling_rate = 16000 # 取样频率
self.level = 1000 # 声音保存的阈值
self.count_num = 20 # count_num个取样之内出现COUNT_NUM个大于LEVEL的取样则记录声音
self.save_length = 10 # 声音记录的最小长度:save_length * num_samples 个取样
self.channels = 1 # 声道,单声道
self.sampwidth = 2 # 录音取样点
self.save_buffer = []
self.save_2_buffer = ""
self.save_num = 0
self.MAGIC_VALUE = 8
def save_2_file(self, content):
self.save_num += 1
ubelt.ensuredir(self.save_dir)
self.save_wav_path = "%s/%s.wav" % (self.save_dir, str(self.save_num).rjust(3, "0"))
print "save to: %s" % self.save_wav_path
wf = wave.open(self.save_wav_path, "wb")
wf.setnchannels(self.channels)
wf.setsampwidth(self.sampwidth)
wf.setframerate(self.sampling_rate)
wf.writeframes(content)
wf.close()
def do(self):
offset = []
with open(self.filename) as fr:
fr.seek(44)
while True:
string_audio_data = fr.read(self.num_samples)
if not string_audio_data:
break
self.save_2_buffer += string_audio_data
audio_data = np.fromstring(string_audio_data, dtype=np.short)
large_sample_count = np.sum(audio_data > self.level)
if large_sample_count > self.count_num:
offset.append(1)
else:
offset.append(0)
# print offset
# print len([x for x in offset if x == 1]), "/", len(offset)
# c_count = [0] * 24
cut_pos = [0]
c0 = 0
r_start = False
for pos, i in enumerate(offset):
if i == 0:
c0 += 1
else:
# for k in range(c0+1):
# if k >= 24:
# continue
# c_count[k] += 1
if c0 >= self.MAGIC_VALUE and r_start is True:
cut_pos.append(pos - c0 / 2)
c0 = 0
r_start = True
# print "------"
# print cut_pos[-1], len(offset)-1
cut_pos.append(len(offset))
# print "\t".join([str(x+1) for x in range(24)])
# print "\t".join([str(x) for x in c_count])
# print "cut at:"
print cut_pos
# print len(cut_pos)
# print "cut result:"
# print "end_pos:", cut_pos
for i, val in enumerate(cut_pos):
if i == 0:
continue
print offset[cut_pos[i-1]: val]
self.save_2_file(self.save_2_buffer[cut_pos[i-1]*self.num_samples:val*self.num_samples])
source_path = "/home/1109"
target_path = "/home/1109_done/"
def main():
for wav_dir in Path(source_path).glob("*"):
for wav_file in Path(wav_dir).glob("*.wav"):
wav_file_name = wav_file.name.lower().replace(".wav", "")
wav_file_name = re.sub(r"[\d]+[.][\d]+", "", wav_file_name)
wav_file_name = re.sub(r"raz[ -]?[a-z][ ]", "", wav_file_name)
# fixed \W
wav_file_name = re.sub(r"[\W]", "_", "%s" % wav_file_name)
wav_file_name = wav_file_name.strip()
new_file_path = "%s%s___%s" % (
target_path,
wav_dir.name.replace(" ", "").replace("-", "").lower().replace("raz", ""),
wav_file_name
)
# new_file_path = re.sub(r"[\W]", "_", new_file_path)
# print wav_dir, wav_file
# print new_file_path
ubelt.ensuredir(new_file_path + "/wav")
ubelt.ensuredir(new_file_path + "/etc")
# if "Fruit" not in "%s" % wav_file:
# continue
sc = Slicer({
"filename": "%s" % wav_file,
# "filename": "/home/data2/18988369 - Raz d/Raz d maria's halloween.wav",
# "filename": "/home/data/12338138 - RAZ-A/Fruit.wav",
"save_dir": new_file_path + "/wav",
"txt_path": new_file_path + "/etc/prompts-original",
})
sc.do()
# exit(1)
if __name__ == "__main__":
pass
# main()
|
[
"ayiis@126.com"
] |
ayiis@126.com
|
01921ca9f3fca49a540f52fee546503370155e01
|
2f083c3e5ebaf5803edb1c6329501d09dd7695db
|
/set 2h.py
|
746ef779a9a804d81c050b48c947ec3ab81238a3
|
[] |
no_license
|
Anjanaanjujsrr/Anju-code_kata-PLAYER
|
321a1097d1afe7da8916f193b35dfbd9ca440ec6
|
fadd1a2971843c5cf12cd63bcd062e96e093feb5
|
refs/heads/master
| 2020-05-26T08:59:53.795026
| 2019-06-09T08:05:27
| 2019-06-09T08:05:27
| 188,175,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
#anju
number=int(input())
r=[]
a=0
count=0
for i in range(number):
c=input()
r.append(c)
for i in r:
for j in i:
a+=ord(j)
if(a==612):
count+=1
a=0
print(count)
|
[
"noreply@github.com"
] |
Anjanaanjujsrr.noreply@github.com
|
f90b5b1fcbe9306e479747f6580ea0cbe3a5d1c7
|
35f069aad9f7040e20494dac11f826bba41d029e
|
/src/main/resources/qtools/lib/webservice/__init__.py
|
384fc467ac669b3197a860fb9c1a1dfa9b73fa32
|
[] |
no_license
|
v-makarenko/vtoolsmq
|
4be3bc3965aaeeee2d64c359a30f6f18617f354d
|
8a0dd75b196c0e641bb8b4b20124540aaaa2814b
|
refs/heads/master
| 2021-01-10T02:04:58.893206
| 2015-12-03T16:34:44
| 2015-12-03T16:34:44
| 47,275,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,042
|
py
|
"""
This package contains modules and classes used to interact with external web services.
"""
import urllib, urllib2, cookielib, os
class RequestProxy(object):
"""
Returns a proxied response object on request.
Not sure if this is the right thing to do yet.
"""
def __init__(self, proxy_class=None, opener=None):
self.opener = opener or None
if not proxy_class:
self.proxy_class = ResponseProxy
else:
self.proxy_class = proxy_class
def request(self, *args, **kwargs):
"""
Returns a
"""
if not self.opener:
response = urllib2.urlopen(*args, **kwargs)
else:
response = self.opener.open(*args, **kwargs)
return self.proxy_class(self, response)
class ResponseProxy(object):
"""
Proxy object that may edit the proxy object
when the response is read.
"""
def __init__(self, proxy, response):
self._proxy = proxy
self._response = response
@property
def proxy(self):
return self._proxy
def __getattribute__(self, name):
# note: @property decorator seems to muck with this
response = object.__getattribute__(self, '_response')
try:
self_method = object.__getattribute__(self, name)
except AttributeError, e:
self_method = None
try:
handler = object.__getattribute__(self, "_on_%s" % name)
except AttributeError, e:
handler = None
if name in response.__dict__ and not self_method:
if handler:
def func(*args, **kwargs):
retval = response.__dict__[name](*args, **kwargs)
handler(retval)
return retval
return func
else:
def func(*args, **kwargs):
return response.__dict__[name](*args, **kwargs)
return func
return self_method
def make_get_request_url(base_url, uri='/', param_dict=None):
"""
Constructs the get request URL.
(This might not be the right abstraction, let's see what happens with cookies)
"""
if param_dict is None:
param_dict = dict()
if not uri:
uri = '/'
if base_url.endswith('/') and uri.startswith('/'):
base_url = base_url[:-1]
elif (not base_url.endswith('/') and not uri.startswith('/')):
uri = "/%s" % uri
# note, may need to use a MultiDict in source implementation.
# or maybe let's use WebOb.Request.
param_str = urllib.urlencode(param_dict)
if param_str:
full_url = "%s%s?%s" % (base_url, uri, param_str)
else:
full_url = "%s%s" % (base_url, uri)
return full_url
def make_request_params(defaults, *args, **kwargs):
if not defaults:
defaults = dict()
defaults.update(kwargs)
return defaults
|
[
"vladimir@makarenko.io"
] |
vladimir@makarenko.io
|
221a8ffa70126e93462e6b05258ee3f72950aa1f
|
ce32ff8c0ad1ad9e42e6b59e201c70df754aa51e
|
/farmmonitor/manage.py
|
00e0f54506f9e616394b71d808fb99fe541ced96
|
[] |
no_license
|
wangxiaoying/farm-monitor
|
d7710559194c2771d09012b6dd204bae12669b6e
|
413ddaf1a23655f705da8b65978d06b704c81723
|
refs/heads/master
| 2021-01-10T21:33:35.643549
| 2015-06-16T09:07:14
| 2015-06-16T09:07:14
| 32,635,833
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "farmmonitor.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"wangxiaoying0369@gmail.com"
] |
wangxiaoying0369@gmail.com
|
f264cc0288deced4fe0c271ca22be87310d2710d
|
d659fb0db310793b918640fdb673b9bd755578bc
|
/standard_lib/remove_elements_list.py
|
83be482832d01d31f152fe83cb07e5eb97fd4837
|
[
"MIT"
] |
permissive
|
astuk/python-snippets
|
562bdcdb23c537650a767fb0369388d9530a67ae
|
212f63f820b6f5842f74913ed08da18d41dfe7a4
|
refs/heads/master
| 2023-06-18T04:29:48.111537
| 2021-07-14T10:55:59
| 2021-07-14T10:55:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
data = [1, 2, 3, 4, 1, 2, 3, 4]
target = 1
print(f"Original: {data}")
data[:] = [elem for elem in data if elem != target]
print(f"New: {data}")
|
[
"f2dahlitz@freenet.de"
] |
f2dahlitz@freenet.de
|
9c515388fcd184ea1ae18872a428a79645351f33
|
5b6af599a2afb4db27b588cfc00831446ff8620f
|
/blog/urls.py
|
62c9faaba9b33b021a3ebd8bbd5d111d134fa5a9
|
[] |
no_license
|
dlatnrud/myblog
|
425acb68be9d3b672e2a84cb84e403cc31c61348
|
8e344e9eadaaf85fed530c25fefc87f916dbee0c
|
refs/heads/main
| 2023-08-26T17:12:52.097986
| 2021-10-22T03:55:18
| 2021-10-22T03:55:18
| 419,949,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
#127.0.0.1:8000/blog
path('', views.index, name='index'),
path('<int:post_id>/', views.detail, name='detail'),
path('post/create/', views.post_create, name='post_create'),
]
|
[
"dlatnrud2268@naver.com"
] |
dlatnrud2268@naver.com
|
2a6450417821ac75724ba063f20fd6289c9bb8b0
|
085ce75a507df6e755cabb7a65c4a2a8c98762ba
|
/dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/_codecs.py
|
2232eace436d8b42e47cad46c20bbf725f5f9b60
|
[] |
no_license
|
Arhzi/habr-docker-article
|
d44302db1fe157d81fe0818e762e82218f50e31f
|
6fb094860b612e307beadaeb22981aa0ee64e964
|
refs/heads/master
| 2021-01-23T20:41:47.398025
| 2015-12-10T08:56:33
| 2015-12-10T08:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,216
|
py
|
# encoding: utf-8
# module _codecs
# from (built-in)
# by generator 1.137
# no doc
# no imports
# functions
def ascii_decode(*args, **kwargs): # real signature unknown
pass
def ascii_encode(*args, **kwargs): # real signature unknown
pass
def charbuffer_encode(*args, **kwargs): # real signature unknown
pass
def charmap_build(*args, **kwargs): # real signature unknown
pass
def charmap_decode(*args, **kwargs): # real signature unknown
pass
def charmap_encode(*args, **kwargs): # real signature unknown
pass
def decode(obj, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
decode(obj, [encoding[,errors]]) -> object
Decodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle ValueErrors.
"""
return object()
def encode(obj, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
encode(obj, [encoding[,errors]]) -> object
Encodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
return object()
def escape_decode(*args, **kwargs): # real signature unknown
pass
def escape_encode(*args, **kwargs): # real signature unknown
pass
def latin_1_decode(*args, **kwargs): # real signature unknown
pass
def latin_1_encode(*args, **kwargs): # real signature unknown
pass
def lookup(encoding): # real signature unknown; restored from __doc__
"""
lookup(encoding) -> CodecInfo
Looks up a codec tuple in the Python codec registry and returns
a CodecInfo object.
"""
pass
def lookup_error(errors): # real signature unknown; restored from __doc__
"""
lookup_error(errors) -> handler
Return the error handler for the specified error handling name
or raise a LookupError, if no handler exists under this name.
"""
pass
def raw_unicode_escape_decode(*args, **kwargs): # real signature unknown
pass
def raw_unicode_escape_encode(*args, **kwargs): # real signature unknown
pass
def readbuffer_encode(*args, **kwargs): # real signature unknown
pass
def register(search_function): # real signature unknown; restored from __doc__
"""
register(search_function)
Register a codec search function. Search functions are expected to take
one argument, the encoding name in all lower case letters, and return
a tuple of functions (encoder, decoder, stream_reader, stream_writer)
(or a CodecInfo object).
"""
pass
def register_error(errors, handler): # real signature unknown; restored from __doc__
"""
register_error(errors, handler)
Register the specified error handler under the name
errors. handler must be a callable object, that
will be called with an exception instance containing
information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple.
"""
pass
def unicode_escape_decode(*args, **kwargs): # real signature unknown
pass
def unicode_escape_encode(*args, **kwargs): # real signature unknown
pass
def unicode_internal_decode(*args, **kwargs): # real signature unknown
pass
def unicode_internal_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_be_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_be_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_ex_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_le_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_le_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_be_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_be_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_ex_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_le_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_le_encode(*args, **kwargs): # real signature unknown
pass
def utf_7_decode(*args, **kwargs): # real signature unknown
pass
def utf_7_encode(*args, **kwargs): # real signature unknown
pass
def utf_8_decode(*args, **kwargs): # real signature unknown
pass
def utf_8_encode(*args, **kwargs): # real signature unknown
pass
# no classes
|
[
"sirnikolasd@yandex.ru"
] |
sirnikolasd@yandex.ru
|
0ec3ee8ba21f62bdf895ad95895a6cecee3ea293
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-4/7edd451a4e81080562d8e2daa1e962bf7fbe4a9b-<apply>-fix.py
|
0d2625c133798c97803281d73411bc9ac3823fff
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
@staticmethod
def apply(module, name, n_power_iterations, eps):
fn = SpectralNorm(name, n_power_iterations, eps)
weight = module._parameters[name]
height = weight.size(0)
u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter((fn.name + '_org'), weight)
module.register_buffer(fn.name, weight)
module.register_buffer((fn.name + '_u'), u)
module.register_forward_pre_hook(fn)
return fn
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
061faf3a7b71dc0672633c4edca636e4eae445de
|
ad9782856ec2f860fccbefa5e75a896691b8e1cc
|
/MonteCarlo/test/opt6s3l/MinBias_14TeV_pythia8_TuneCUETP8M1_cfi_GEN_SIM_OT613_200_IT4025_opt6s3l.py
|
cade5d39c8f174af6bc59396e837fa2023545939
|
[] |
no_license
|
OSU-CMS/VFPix
|
7fe092fc5a973b4f9edc29dbfdf44907664683e5
|
4c9fd903219742a4eba1321dc4181da125616e4c
|
refs/heads/master
| 2020-04-09T05:52:05.644653
| 2019-01-09T13:44:22
| 2019-01-09T13:44:22
| 30,070,948
| 0
| 0
| null | 2018-11-30T13:15:54
| 2015-01-30T12:26:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,781
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: MinBias_14TeV_pythia8_TuneCUETP8M1_cfi --conditions auto:phase2_realistic -n 10 --era Phase2C2 --eventcontent FEVTDEBUG --relval 90000,100 -s GEN,SIM --datatier GEN-SIM --beamspot HLLHC14TeV --geometry Extended2023D17 --fileout file:step1.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('SIM',eras.Phase2C2)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023D17Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2023D17_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedHLLHC14TeV_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('MinBias_14TeV_pythia8_TuneCUETP8M1_cfi nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('file:step1.root'),
outputCommands = process.FEVTDEBUGEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')
process.generator = cms.EDFilter("Pythia8GeneratorFilter",
PythiaParameters = cms.PSet(
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'),
processParameters = cms.vstring('SoftQCD:nonDiffractive = on',
'SoftQCD:singleDiffractive = on',
'SoftQCD:doubleDiffractive = on'),
pythia8CUEP8M1Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:pT0Ref=2.4024',
'MultipartonInteractions:ecmPow=0.25208',
'MultipartonInteractions:expPow=1.6'),
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on')
),
comEnergy = cms.double(14000.0),
crossSection = cms.untracked.double(71390000000.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGoutput_step = cms.EndPath(process.FEVTDEBUGoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.FEVTDEBUGoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
inputDir = "VFPix/MonteCarlo/data/OT613_200_IT4025_opt6s3l/"
fileNames =["pixbar.xml","pixel.xml","pixelProdCuts.xml","pixelStructureTopology.xml","pixelsens.xml","pixfwd.xml","tracker.xml","trackerProdCuts.xml","trackerRecoMaterial.xml","trackerStructureTopology.xml","trackersens.xml"]
for i in range (0, len (process.XMLIdealGeometryESSource.geomXMLFiles)):
xmlFile = process.XMLIdealGeometryESSource.geomXMLFiles[i]
fileName = xmlFile.split("/")[-1]
if fileName in fileNames:
process.XMLIdealGeometryESSource.geomXMLFiles[i] = inputDir + fileName
|
[
"juliette.alimena@cern.ch"
] |
juliette.alimena@cern.ch
|
918d35988abeede9ff21191733d53fc0101d01a2
|
cc734ab47096dfd38b8cb554ced88c0689c450e8
|
/geraGrafico.py
|
d6fe5e35797531bdbc4ae8026746d5742448f779
|
[] |
no_license
|
Claudiocfls/ELE32-lab3
|
efaa46bd73ead3e6030d3e231ce577b53a36027b
|
366b34d5d66d0555451206697ce6116f363c60cf
|
refs/heads/master
| 2020-04-01T18:50:44.395937
| 2018-11-28T15:23:37
| 2018-11-28T15:23:37
| 153,518,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
import DadosGrafico
import Grafico
grafico = Grafico.Grafico()
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("hamming.txt")
grafico.adicionaDados(dados1, "ro" )
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("3, 13, 15, 17.txt")
grafico.adicionaDados(dados1, "ro" )
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("4, 25, 33, 37.txt")
grafico.adicionaDados(dados1, "ro" )
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("6, 117, 127, 155.txt")
grafico.adicionaDados(dados1, "ro" )
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("semcodificacao.txt")
grafico.adicionaDados(dados1, "ro" )
grafico.mostraGrafico()
|
[
"claudiosilva.cfls@gmail.com"
] |
claudiosilva.cfls@gmail.com
|
2146a1abe1e044c23653b6572a99383187acf3c3
|
1de7512183974adfc5dbdd30b3bf7e042ea194d9
|
/poetries_server_beta.py
|
b34d1625a90aa1b8b66c3050158c73b812ec84c9
|
[] |
no_license
|
articuly/Self-study
|
d06b0bdcead38282701e4d5118cefd8b83e80241
|
878ed68fc31dc1f5c2f13bcb5d98539264985c17
|
refs/heads/master
| 2023-05-13T18:34:33.904864
| 2023-05-09T11:03:29
| 2023-05-09T11:03:29
| 223,714,760
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,762
|
py
|
# coding:utf-8
import socket, time
from poetries import POUETRIES # 此处导入诗词库(poetries.py)
def poetries_server():
"""古诗词服务器"""
delay = 0.1 # 诗词显示速度(字间隔时间)
subjects = [item.split()[0] for item in POUETRIES] # 诗词目录
welcome = '欢迎来到风花雪月古诗词库, 请输入序号后回车以选择你喜欢的诗词\r\n'
welcome += '输入fast加速,输入slow减速,输入bye退出\r\n\r\n' # 输入quit或exit,退出并同时关闭诗词服务
for index, subject in enumerate(subjects):
welcome += '%d %s\r\n' % (index + 1, subject)
welcome += '\r\n'
welcome = welcome.encode('gbk')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 56789))
sock.listen(2)
runing = True
while runing:
c_sock, c_addr = sock.accept()
c_sock.sendall(welcome)
while True:
cmd = b''
while not cmd.endswith(b'\r\n'):
cmd += c_sock.recv(1024)
cmd = cmd.strip()
if cmd in [b'bye', b'quit', b'exit']:
c_sock.sendall('再见\r\n'.encode('gbk'))
c_sock.close()
runing = cmd == b'bye'
break
elif cmd == b'help':
c_sock.sendall(welcome)
elif cmd == b'fast':
delay /= 2
c_sock.sendall('加速设置已完成\r\n'.encode('gbk'))
c_sock.sendall('请选择诗词序号,输入help显示诗词目录:\r\n\r\n'.encode('gbk'))
elif cmd == b'slow':
delay *= 2
c_sock.sendall('减速设置已完成\r\n'.encode('gbk'))
c_sock.sendall('请选择诗词序号,输入help显示诗词目录:\r\n\r\n'.encode('gbk'))
else:
try:
index = int(cmd) - 1
assert -1 < index < len(POUETRIES)
except:
c_sock.sendall('请输入有效的诗词序号,输入help显示诗词目录:\r\n\r\n'.encode('gbk'))
continue
c_sock.sendall(b'--------------------------\r\n')
for line in POUETRIES[index].split('\n'):
for word in line:
c_sock.sendall(word.encode('gbk'))
time.sleep(delay)
c_sock.sendall(b'\r\n')
c_sock.sendall(b'--------------------------\r\n')
c_sock.sendall('请选择诗词序号,输入help显示诗词目录:\r\n\r\n'.encode('gbk'))
if __name__ == '__main__':
poetries_server()
|
[
"articuly@gmail.com"
] |
articuly@gmail.com
|
73bee41645a2a5d29d2307248dc9dd4042c9cb15
|
9c315e3762961668a1fe58ad811ae87c5fbf7539
|
/apertium-tools/scrapers-misc/bibleScraper-ibt.py
|
5e2698746ef1e1f835a59b51ad687209b7d5c5ee
|
[] |
no_license
|
frankier/apertium
|
f2b893115c413203b1194e5c0d4feb0adf2b1b3e
|
d3f5515bf2455f3046314a62ea564457bcf504b8
|
refs/heads/gnulib
| 2021-01-20T21:00:53.139135
| 2016-05-27T17:30:01
| 2016-05-27T17:30:01
| 59,847,975
| 0
| 1
| null | 2016-07-07T12:39:01
| 2016-05-27T16:21:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,867
|
py
|
#!/usr/bin/env python3
# WARNING
# ONLY USE THIS SCRIPT WITH PERMESSION FROM ibt.org.ru ADMINISTRATORS
# UNAUTHORIZED ACCESS OF ibt.org.ru IS ILLEAGL IN MOST COUNTRIES!!!
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from bs4 import BeautifulSoup
import urllib.request, re, time, argparse, sys, os
import romanclass as roman
if sys.version_info < (3, 3, 0): fileError = IOError
else: fileError = FileNotFoundError
parser = argparse.ArgumentParser(description = 'Scrape ibt.org')
parser.add_argument('-l', action = 'store', nargs = '*', help = 'Scrape the bibles with these codes')
parser.add_argument('-x', action = 'store', nargs = '*', help = 'Skip scraping certain book(s); OT to get just New Testament')
parser.add_argument('-a', action = 'store_const', const = 2, help = 'List all the valid language codes')
parser.add_argument('-s', action = 'store_const', const = 2, help = 'Parse titles within each chapter')
parser.add_argument('-q', action = 'store_false', help = 'Suppress progress messages')
parser.add_argument('-u', action = 'store_true', help = 'Add to file, don\'t overwrite')
args = parser.parse_args()
urls = args.l
if args.x:
toSkip = args.x
OT = ['Genesis', 'Exodus', 'Leviticus', 'Numbers', 'Deuteronomy', 'Joshua', 'Judges', 'Ruth', '1 Samuel', '2 Samuel', '1 Kings', '2 Kings', '1 Chronicles', '2 Chronicles', 'Ezra', 'Nehemiah', 'Esther', 'Job', 'Psalms', 'Proverbs', 'Ecclesiastes', 'Song of Songs', 'Isaiah', 'Jeremiah', 'Lamentations', 'Ezekiel', 'Daniel', 'Hosea', 'Joel', 'Amos', 'Obadiah', 'Jonah', 'Micah', 'Nahum', 'Habakkuk', 'Zechariah', 'Zephaniah', 'Haggai', 'Malachi']
if "OT" in args.x:
toSkip = OT
else:
toSkip = []
def firstPage(url):
results = re.search('m=(.*)', url)
filename = results.group(1) + ".out"
prefix = url.split('l=')[0]
text = urllib.request.urlopen(url)
soup = BeautifulSoup(text)
selbook = soup.find('select', {'id':'selbook'})
books = [(option['value'], option.text) for option in selbook.find_all('option')]
if args.u:
mode = 'a'
else:
mode = 'w'
with open(filename, mode, encoding = 'utf-8') as outfile:
if not os.path.isdir('.cache'): os.mkdir('.cache')
for urlB, fullB in books:
print(fullB, end='')
if fullB in toSkip:
print(" [skipping]")
else:
sys.stdout.flush()
firstUrl = prefix + '&l=' + urlB
#print(firstUrl)
soup = BeautifulSoup(urllib.request.urlopen(firstUrl).read())
selchap = soup.find('select', {'id':'selchap'})
chap = [(option['value'], option.text) for option in selchap.find_all('option')]
print(": ", end='')
for urlC, fullC in chap:
outfile.write(fullB + ' ' + str(roman.Roman(urlC)) + '\n')
print(fullC, end='')
sys.stdout.flush()
u = 'http://ibt.org.ru/en/text.htm?m=' + results.group(1) + '&l=' + urlB + '.' + str(urlC) + '&g=0'
s = allPages(u, results.group(1))
print(".", end='')
sys.stdout.flush()
outfile.write(s + '\n')
print(" ", end='')
sys.stdout.flush()
print()
def allPages(url, bible):
urlparts = url.split('?')
filepath = os.path.join(os.path.curdir, '.cache', urlparts[1]+'.html')
try:
with open(filepath, encoding = 'utf-8') as infile:
text = infile.read()
except fileError:
text = urllib.request.urlopen(url).read().decode('utf-8')
#print("Downloaded")
with open(filepath, 'w', encoding = 'utf-8') as outfile:
outfile.write(text)
time.sleep(0.5)
soup = BeautifulSoup(text)
flowcolumn = soup.find('div', {'id':'flowcolumn'})
s = ''
i = 1
for verse in flowcolumn.find_all('span', {'class':'cs-' + bible}):
if verse.sup != None:
verse.sup.clear()
#print verse['id']
#print verse.text.encode('utf-8')
if verse.previous_sibling != None:
try:
if verse.previous_sibling.name == 'div' and args.s == 2:
s += verse.previous_sibling.text.strip() + '\n'
except AttributeError:
# Was a string/skip
pass
s += str(i)+ '. ' + verse.text.strip().strip() + '\n'
i += 1
return s
CODES = { 'ADG' : 'Adygei',
'AGL' : 'Agul',
'AVR' : 'Avar',
'CHV' : 'Chuvash',
'CRT' : 'Crimean Tatar',
'KHK' : 'Khakas',
'XKS' : 'Khakas',
'KJV' : 'English',
'WEB' : 'English',
'KUMYK' : 'Kumyk',
'KYLSC' : 'Kyrgyz',
'KYROHC': 'Kyrgyz',
'KYLSA' : 'Kyrgyz Arabic',
'KYROHA': 'Kyrgyz Arabic',
'OSS' : 'Ossetic',
'TTR' : 'Tatar',
'TKL' : 'Turkmen',
'TKLI' : 'Turkmen',
'TKCI' : 'Turkmen Cyrillic',
'TYV' : 'Tuvan',
'TVN' : 'Tuvan',
'RSP' : 'Russian',
'UZVL' : 'Uzbek',
'UZIBTL': 'Uzbek',
'UZV' : 'Uzbek Cyrillic',
'UZIBT' : 'Uzbek Cyrillic',
'LXX' : 'Greek',
'TR' : 'Greek',
'OSMHB' : 'Hebrew',
'KRK' : 'Qaraqalpaq Latin',
'KRKL' : 'Qaraqalpaq Cyrillic',
'SHR' : 'Shor',
'BUR' : 'Buryat',
}
if __name__ == '__main__':
if args.a == 2:
for x in sorted(CODES):
print(x, '\t', CODES[x])
elif urls != None:
for url in urls:
url = url.upper()
if url not in CODES:
print(url, 'is not a valid code. It will be skipped.')
else:
print('Will begin scraping', url)
firstPage('http://ibt.org.ru/en/text.htm?m=' + url)
else:
parser.parse_args(['-h'])
print('No argument selected.')
|
[
"unhammer@72bbbca6-d526-0410-a7d9-f06f51895060"
] |
unhammer@72bbbca6-d526-0410-a7d9-f06f51895060
|
28eb4f066f9dbe9f85d53858545bd15c3df79d6b
|
6f1d57238f3b395b04696a16768bcc507f00630c
|
/A_GCD_Sum.py
|
3a137dfc4dc2318e724874ceedd601f7481d3f84
|
[] |
no_license
|
FazleRabbbiferdaus172/Codeforces_Atcoder_Lightoj_Spoj
|
024a4a2a627de02e4698709d6ab86179b8301287
|
6465e693337777e7bd78ef473b4d270ce757a3a2
|
refs/heads/master
| 2023-07-01T06:32:14.775294
| 2021-07-27T17:07:37
| 2021-07-27T17:07:37
| 271,202,781
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
import math
def digi_sum(n):
ds = 0
for i in str(n):
ds += ord(i) - 48
return ds
for _ in range(int(input())):
n = int(input())
while math.gcd(n, digi_sum(n)) == 1:
n += 1
print(n)
|
[
"fazle.ferdaus1416@gmail.com"
] |
fazle.ferdaus1416@gmail.com
|
310fa56760bac3e6cdd5a4d0331475d6fa83a2ef
|
ef54d37f8a3303013ca7469871a320d303957ed7
|
/robo4.2/fusion/tests/RIST/API/Deprecated/F172/Regression_Data.py
|
d6a5825dd029264612a58aa0b8b5ac7f4264621a
|
[] |
no_license
|
richa92/Jenkin_Regression_Testing
|
d18badfcf16bda682dfe7bcbbd66f54a9a27a58d
|
24a74926170cbdfafa47e972644e2fe5b627d8ff
|
refs/heads/master
| 2020-07-12T10:01:59.099137
| 2019-08-27T12:14:53
| 2019-08-27T12:14:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
admin_credentials = {'userName': 'Administrator', 'password': 'wpsthpvse1'}
enclosure_name = "CN754406XL"
drive_enclosure_name = "CN754406XL, bay 1"
expected_number_of_DE = 1
expected_number_of_drives = 8
|
[
"akul@SAC0MKUVCQ.asiapacific.hpqcorp.net"
] |
akul@SAC0MKUVCQ.asiapacific.hpqcorp.net
|
977cd26e8a9ce77960696f23c7267eaec7fa0dd3
|
584db1be8b6bdedaa56d186692ad72da5ee07164
|
/patron/tests/functional/v3/test_instance_usage_audit_log.py
|
6a882a19ad76b2c75966c710359475c274d32493
|
[
"Apache-2.0"
] |
permissive
|
casbin/openstack-patron
|
66006f57725cf1c3d735cd5529d3459fd77384c8
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
refs/heads/master
| 2023-05-31T05:23:37.721768
| 2015-12-31T12:18:17
| 2015-12-31T12:18:17
| 382,054,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,250
|
py
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_config import cfg
from patron.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'patron.api.openstack.compute.extensions')
class InstanceUsageAuditLogJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-instance-usage-audit-log"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(InstanceUsageAuditLogJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('patron.api.openstack.compute.'
'contrib.instance_usage_audit_log.'
'Instance_usage_audit_log')
return f
def test_show_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log/%s' %
urllib.quote('2012-07-05 10:00:00'))
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('inst-usage-audit-log-show-get-resp',
subs, response, 200)
def test_index_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('inst-usage-audit-log-index-get-resp',
subs, response, 200)
|
[
"hsluoyz@qq.com"
] |
hsluoyz@qq.com
|
306694ae20d0761a8ceb55a2d83fc0495e190317
|
ff182eeaf59b16f79b7d306eef72ddaadf0f4e71
|
/Vaffle_interface/testcase/MessageModule/Message_test21_question_answerquestionlist.py
|
cca8da534dbe3a9cae3189adbf5e7b7bdc28b965
|
[] |
no_license
|
heyu1229/vaffle
|
04d6f8b0d3bd0882ff1cdea54d18d5fdde7933b9
|
2c1c040f78094cf3cfc68f08627a958c4aa5e1d5
|
refs/heads/master
| 2023-06-05T09:55:21.894344
| 2021-03-12T07:26:45
| 2021-03-12T07:26:45
| 381,248,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
# -*- coding:UTF-8 -*-
import unittest,time,json
from Vaffle_interface.public_1.func_requests import FuncRequests
#---------------QA答题列表---------------------
class Message(unittest.TestCase):
def setUp(self):
self.r = FuncRequests()
#-----------------QA答题列表---------------------------------
def testcase_001(self):
sheet_index = 5
row = 22
print("testcase_001 QA答题列表:")
member_id = 'b9f73f23-7bc6-4de6-9f9b-df2c98076221'
payload = {'type':'recommend','page':1}
result=self.r.interface_requests_payload(member_id,sheet_index,row,payload)
self.assertEqual(10000, result["code"])
print("code返回值:10000")
if __name__=="__main__":
unittest.main()
|
[
"921467314@qq.com"
] |
921467314@qq.com
|
dd53c3727ad67dbcc8f4cc1ff5e0b523226dd686
|
89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04
|
/build/android/developer_recommended_flags.gypi
|
79c201deccb3b99bf444200fbc6d40cbfa1cec79
|
[
"BSD-3-Clause"
] |
permissive
|
bino7/chromium
|
8d26f84a1b6e38a73d1b97fea6057c634eff68cb
|
4666a6bb6fdcb1114afecf77bdaa239d9787b752
|
refs/heads/master
| 2022-12-22T14:31:53.913081
| 2016-09-06T10:05:11
| 2016-09-06T10:05:11
| 67,410,510
| 1
| 3
|
BSD-3-Clause
| 2022-12-17T03:08:52
| 2016-09-05T10:11:59
| null |
UTF-8
|
Python
| false
| false
| 2,722
|
gypi
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is the set of recommended gyp variable settings for Chrome for Android development.
#
# These can be used by copying this file to $CHROME_SRC/chrome/supplement.gypi.
#
# Even better, create chrome/supplement.gypi containing the following:
# {
# 'includes': [ '../build/android/developer_recommended_flags.gypi' ]
# }
# and you'll get new settings automatically.
# When using this method, you can override individual settings by setting them unconditionally (with
# no %) in chrome/supplement.gypi.
# I.e. to disable gyp_managed_install but use everything else:
# {
# 'variables': {
# 'gyp_managed_install': 0,
# },
# 'includes': [ '../build/android/developer_recommended_flags.gypi' ]
# }
{
'variables': {
'variables': {
# Set component to 'shared_library' to enable the component build. This builds native code as
# many small shared libraries instead of one monolithic library. This slightly reduces the time
# required for incremental builds.
'component%': 'shared_library',
},
'component%': '<(component)',
# When gyp_managed_install is set to 1, building an APK will install that APK on the connected
# device(/emulator). To install on multiple devices (or onto a new device), build the APK once
# with each device attached. This greatly reduces the time required for incremental builds.
#
# This comes with some caveats:
# Only works with a single device connected (it will print a warning if
# zero or multiple devices are attached).
# Device must be flashed with a user-debug unsigned Android build.
# Some actions are always run (i.e. ninja will never say "no work to do").
'gyp_managed_install%': 1,
# With gyp_managed_install, we do not necessarily need a standalone APK.
# When create_standalone_apk is set to 1, we will build a standalone APK
# anyway. For even faster builds, you can set create_standalone_apk to 0.
'create_standalone_apk%': 1,
# Set clang to 1 to use the clang compiler. Clang has much (much, much) better warning/error
# messages than gcc.
# TODO(cjhopman): Enable this when http://crbug.com/156420 is addressed. Until then, users can
# set clang to 1, but Android stack traces will sometimes be incomplete.
#'clang%': 1,
# Set fastbuild to 1 to build with less debugging information. This can greatly decrease linking
# time. The downside is that stack traces will be missing useful information (like line
# numbers).
#'fastbuild%': 1,
},
}
|
[
"bino.zh@gmail.com"
] |
bino.zh@gmail.com
|
5f9b9da363ca1b7046a85b9ca50ffd5a17546d0a
|
6a1e30c699125d05caf4b3443b83c47d1a733851
|
/EMS/migrations/0002_auto_20200623_1446.py
|
2cd6aa0e0db237346a0fe2c980b78533bd1e4711
|
[] |
no_license
|
Subhrans/Keekers
|
373b259c04bcd277a91e6e7679da5da2b799e184
|
b971b1a1f5c2436b3068b5003d220ca28fdced0f
|
refs/heads/main
| 2023-04-16T17:26:43.207550
| 2021-04-07T18:10:49
| 2021-04-07T18:10:49
| 355,639,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# Generated by Django 3.0.1 on 2020-06-23 09:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('EMS', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='e_experience',
field=models.CharField(default='Year:0 Months:0 Days:0 ', max_length=22),
),
]
|
[
"subhransud525@gmail.com"
] |
subhransud525@gmail.com
|
b90768d806834c51f4937d283dcb46b436f8db8a
|
4e7db10524c938c8c6e687521def2889e20ec646
|
/P16/1-16.py
|
a1030b875a6397384fd635bad46dc8338e9e1d13
|
[] |
no_license
|
mpigrobot/python
|
e5cf60ca438e0d5e63a1e87a266a9e255bc07271
|
bf9262657a7401f37de38318db768e630fab97a9
|
refs/heads/master
| 2020-03-15T15:37:03.228080
| 2018-03-31T07:33:07
| 2018-03-31T07:33:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
#! /usr/bin/env python
#coding:utf-8
def del_space(string):
split_string = string.split(" ")
string_list = [i for i in split_string if i!=""]
result_string = " ".join(string_list)
return result_string
if __name__=="__main__":
one_str = "Hello, I am Qiwsir."
string = del_space(one_str)
print one_str
print string
|
[
"noreply@github.com"
] |
mpigrobot.noreply@github.com
|
91f4d3ccee46519af255d5bac95200dd39e71f50
|
dc86022781700c04d143736d401138881d98fe14
|
/student_profile/urls.py
|
ccd89f5b0862df465e50872ee2d29c60807607f2
|
[] |
no_license
|
SymaxxDigital/college-journal
|
d4b25901788221793e8c3ce25f7fefd6e150df4b
|
c188511ad38fc2fe1b3d5cf907b2ce7be46aad5e
|
refs/heads/master
| 2023-06-17T00:00:16.057099
| 2021-07-12T16:45:21
| 2021-07-12T16:45:21
| 372,991,491
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
from django.urls import path
from .views import (
ProfileView,
ProfileCreateView,
ProfileUpdateView,
DemographicListView,
DemographicCreateView,
DemographicUpdateView,
FamilyListView,
FamilyCreateView,
FamilyUpdateView,
EducationListView,
EducationCreateView,
EducationUpdateView,
PersonalessayListView,
PersonalEssayCreateView,
PersonalEssayUpdateView,
ProfileLinkView,
)
app_name = 'student_profile'
urlpatterns = [
path("", ProfileView.as_view(), name="profile"),
path("add/", ProfileCreateView.as_view(), name="profile_create"),
path("edit/<uuid:pk>/", ProfileUpdateView.as_view(), name="profile_update"),
path("demographics/", DemographicListView.as_view(), name="demographics"),
path("demographic/add", DemographicCreateView.as_view(), name="demographic_create"),
path("demographic/edit/<uuid:pk>", DemographicUpdateView.as_view(), name="demographic_update"),
path("family", FamilyListView.as_view(), name="family"),
path("family/add", FamilyCreateView.as_view(), name="family_create"),
path("family/edit/<uuid:pk>", FamilyUpdateView.as_view(), name="family_update"),
path("education", EducationListView.as_view(), name="education"),
path("education/add", EducationCreateView.as_view(), name="education_create"),
path("education/edit/<uuid:pk>", EducationUpdateView.as_view(), name="education_update"),
path("essays/", PersonalessayListView.as_view(), name="personalessays"),
path("personal-essay/add", PersonalEssayCreateView.as_view(), name="personal_essay_create"),
path("personal-essay/edit/<uuid:pk>", PersonalEssayUpdateView.as_view(), name="personal_essay_update"),
path("links/", ProfileLinkView.as_view(), name="profile_links")
]
|
[
"bukhosizimcode@gmail.com"
] |
bukhosizimcode@gmail.com
|
36674f77a3891e30e9a9320ef731c81cc8fc7b77
|
ec7591c3f478c43e76257aaa500d8f6a2e763d74
|
/stanza/utils/datasets/sentiment/process_airline.py
|
edacb874ed99653b6fc0f5adde34b14c3f9fb636
|
[
"Apache-2.0"
] |
permissive
|
stanfordnlp/stanza
|
5cc3dbe70a96dd565639b7dae1efde6b4fa76985
|
c530c9af647d521262b56b717bcc38b0cfc5f1b8
|
refs/heads/main
| 2023-09-01T12:01:38.980322
| 2023-03-14T16:10:05
| 2023-03-14T16:10:05
| 104,854,615
| 4,281
| 599
|
NOASSERTION
| 2023-09-10T00:31:36
| 2017-09-26T08:00:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
"""
Airline tweets from Kaggle
from https://www.kaggle.com/crowdflower/twitter-airline-sentiment/data#
Some ratings seem questionable, but it doesn't hurt performance much, if at all
Files in the airline repo are csv, with quotes in "..." if they contained commas themselves.
Accordingly, we use the csv module to read the files and output them in the format
<class> <sentence>
Run using
python3 convert_airline.py Tweets.csv train.json
If the first word is an @, it is removed, and after that, leading @ or # are removed.
For example:
@AngledLuffa you must hate having Mox Opal #banned
->
you must hate having Mox Opal banned
"""
import csv
import os
import sys
from stanza.models.classifiers.data import SentimentDatum
import stanza.utils.datasets.sentiment.process_utils as process_utils
def get_phrases(in_directory):
in_filename = os.path.join(in_directory, "Tweets.csv")
with open(in_filename, newline='') as fin:
cin = csv.reader(fin, delimiter=',', quotechar='"')
lines = list(cin)
phrases = []
for line in lines[1:]:
sentiment = line[1]
if sentiment == 'negative':
sentiment = '0'
elif sentiment == 'neutral':
sentiment = '1'
elif sentiment == 'positive':
sentiment = '2'
else:
raise ValueError("Unknown sentiment: {}".format(sentiment))
# some of the tweets have \n in them
utterance = line[10].replace("\n", " ")
phrases.append(SentimentDatum(sentiment, utterance))
return phrases
def get_tokenized_phrases(in_directory):
phrases = get_phrases(in_directory)
phrases = process_utils.get_ptb_tokenized_phrases(phrases)
phrases = [SentimentDatum(x.sentiment, process_utils.clean_tokenized_tweet(x.text)) for x in phrases]
print("Found {} phrases in the airline corpus".format(len(phrases)))
return phrases
def main(in_directory, out_directory, short_name):
phrases = get_tokenized_phrases(in_directory)
os.makedirs(out_directory, exist_ok=True)
out_filename = os.path.join(out_directory, "%s.train.json" % short_name)
# filter leading @United, @American, etc from the tweets
process_utils.write_list(out_filename, phrases)
# something like this would count @s if you cared enough to count
# would need to update for SentimentDatum()
#ats = Counter()
#for line in lines:
# ats.update([x for x in line.split() if x[0] == '@'])
if __name__ == '__main__':
in_directory = sys.argv[1]
out_directory = sys.argv[2]
short_name = sys.argv[3]
main(in_directory, out_directory, short_name)
|
[
"horatio@gmail.com"
] |
horatio@gmail.com
|
c959374bd1150ba5cf9665a077bc81075a46cbde
|
933e89e2d45803ff021cc684b4b78cd2e14a5b15
|
/AllBehaviours/agent_util.py
|
289676eb5c22b11ea1fdb417fd764da9c32b681b
|
[] |
no_license
|
Occy88/MultiAgents
|
8123d2718df5f3ad202410097f821c6d69eadd7b
|
c49d89caf2db49dcc3571d87fa4fb4bade516226
|
refs/heads/master
| 2022-12-14T18:36:02.343056
| 2021-01-01T13:38:10
| 2021-01-01T13:38:10
| 214,148,130
| 0
| 0
| null | 2022-11-22T04:36:34
| 2019-10-10T10:00:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,716
|
py
|
from enum import Enum
from vacuumworld.vwc import action, direction
import math
# RESOLVED AS SINGLE CHARACTERS TO SAVE COMMUNICATION BANDWIDTH
class AgentPercepts(Enum):
LEFT = '1'
RIGHT = '2'
TOP = '3'
FRONT_LEFT = '4'
FRONT_RIGHT = '5'
class AgentActions(Enum):
TURN_LEFT = '6'
TURN_RIGHT = '7'
FORWARD = '8'
class CommunicationKeys(Enum):
GRID_SIZE = 'a'
POSITION = 'b'
NEXT_ACTION = 'c'
OBSERVATIONS = 'd'
class GridDirections(Enum):
"""
Cannot be modified, (used to find orientation)
"""
TOP = 'north'
RIGHT = 'east'
LEFT = 'west'
BOTTOM = 'south'
class GridLocation:
def __init__(self):
self.dirt = None
self.agent = None
self.currently_observed = False
self.age = 0
def encode(self):
agent_details_to_send = self.agent
dirt_details_to_send = self.dirt
if self.agent is not None:
agent_details_to_send = [self.agent[0].split('-')[1], self.agent[1]]
if self.dirt is not None:
dirt_details_to_send = [self.dirt[0].split('-')[1], self.dirt[1]]
return [agent_details_to_send, dirt_details_to_send]
def decode(self, observation):
self.agent = observation[1]
self.dirt = observation[2]
if observation[1] is not None:
self.agent = ['A-' + str(observation[1][0]), observation[1][1]]
if observation[2] is not None:
self.dirt = ['D-' + str(observation[2][0]), observation[2][1]]
self.currently_observed = True
self.age = 0
def update(self):
self.age += 1
def draw(self):
string = '|'
if self.dirt is not None:
string += 'D'
else:
string += ' '
if self.agent is not None:
string += self.agent[0].split('-')[1]
else:
string += ' '
if self.currently_observed:
string += 'X'
else:
string += ' '
age = str(self.age)
age = age.ljust(3)
print(string + ' ' + age, end='')
class GridState:
def __init__(self):
self.size = 0
self.locations = []
def set_size(self, n):
self.size = n
self.locations = []
for y in range(n):
self.locations.append([])
for x in range(n):
self.locations[y].append(GridLocation())
def update(self):
for y in self.locations:
for l in y:
l.update()
def decode(self, observations):
"""
[[7, 3], ['a-1', 'orange', 'north'], None], [[6, 3], None, None], [[7, 2], ['a-2', 'orange', 'north'], None], [[6, 2], None, None]]
:param d:
:return:
"""
try:
for obs in observations:
coords = obs[0]
x = int(coords[0])
y = int(coords[1])
cell = self.locations[y][x]
cell.decode(obs)
except Exception as e:
print(e)
def encode_location(self, x, y):
"""
returns position in the format of an observation
:param x:
:param y:
:return:
"""
l = self.locations[y][x]
return [[x, y]] + l.encode()
def draw(self):
for l in self.locations:
for location in l:
location.draw()
print('')
def split_grid(grid_size):
"""
Splits grid into points that need to be explored to cover the
whole grid.
:param grid_size:
:return:
"""
points_to_explore = []
for y in range(1, grid_size):
points_to_explore.append([])
for x in range(grid_size):
points_to_explore[y].append(GridLocation())
pass
y += 3
return points_to_explore
def get_cam_detections(observation):
"""
returns all squares seen by the camera
:param observation:
:return:[(obs,observation.direction)]
"""
obs_dir_list = {}
if observation.left is not None:
obs_dir_list[AgentPercepts.LEFT.value] = observation.left
if observation.forwardleft is not None:
obs_dir_list[AgentPercepts.FRONT_LEFT.value] = observation.forwardleft
if observation.forward is not None:
obs_dir_list[AgentPercepts.TOP.value] = observation.forward
if observation.forwardright is not None:
obs_dir_list[AgentPercepts.FRONT_RIGHT.value] = observation.forwardright
if observation.right is not None:
obs_dir_list[AgentPercepts.RIGHT.value] = observation.right
return obs_dir_list
def get_closest_agent(agent_list, point):
closest_distance = math.inf
if agent_list is None or len(agent_list)<=0:
return
closest_agent = agent_list[0]
for agent in agent_list:
agent_pos = agent[1]
distance = dist(agent_pos, point)
if distance < closest_distance:
closest_distance = distance
closest_agent = agent
elif distance == closest_distance:
if int(agent[0][0].split('-')[1]) > int(closest_agent[0][0].split('-')[1]):
closest_agent = agent
return closest_agent
def get_agents(observation):
"""
Gets all agents in the observation
returns a list
:param observation:
:return: [(agent,observation.direction)...]
"""
directions = get_cam_detections(observation)
agents = {}
for key, val in directions.items():
if val.agent is not None:
agents[key] = val.agent
return agents
def dist(p1, p2):
summation = 0
for i, v in enumerate(p1):
summation += (v - p2[i]) ** 2
return math.sqrt(summation)
|
[
"octavio.delser@gmail.com"
] |
octavio.delser@gmail.com
|
6d33d07aa654f0785f0b7306fa9ba31354982ae1
|
028d788c0fa48a8cb0cc6990a471e8cd46f6ec50
|
/Python-Fundamentals/Final-Exam/Practice-Exams/Exam-04-04-2020/01_password_reset.py
|
9a2e5e197669e3de86bca07fdc4681b322e7edb2
|
[] |
no_license
|
Sheko1/SoftUni
|
d6b8e79ae545116f4c0e5705ad842f12d77a9c9d
|
a9fbeec13a30231b6a97c2b22bb35257ac1481c0
|
refs/heads/main
| 2023-07-13T15:39:48.826925
| 2021-08-21T12:51:02
| 2021-08-21T12:51:02
| 317,266,200
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
def take_odd(password):
result = ""
for i in range(len(password)):
if i % 2 != 0:
result += password[i]
return result
def cut(index_1, length_1, password):
sub_string = password[index_1:index_1+length_1]
password = password.replace(sub_string, "", 1)
return password
def substitute(old, new, password):
if old in password:
password = password.replace(old, new)
return password
else:
return "Nothing to replace!"
string = input()
command = input()
while command != "Done":
data = command.split(maxsplit=1)
if data[0] == "TakeOdd":
string = take_odd(string)
print(string)
elif data[0] == "Cut":
index, length = data[1].split()
index = int(index)
length = int(length)
string = cut(index, length, string)
print(string)
elif data[0] == "Substitute":
sub_str, replace_str = data[1].split()
if substitute(sub_str, replace_str, string) == "Nothing to replace!":
print(substitute(sub_str, replace_str, string))
else:
string = substitute(sub_str, replace_str, string)
print(string)
command = input()
print(f"Your password is: {string}")
|
[
"martinkypar@gmail.com"
] |
martinkypar@gmail.com
|
a389ef6945b30b5b0a5c5c6f45ca52d6239e2db7
|
f1ddb0bb268b3b1c3fe512b4753ff60fab5c78cb
|
/datapipe/image/kill_isolated_pixels.py
|
b31140cf0a14e5185b5940ec1561c30ed30fc096
|
[
"MIT"
] |
permissive
|
jjlk/ctapipe-wavelet-cleaning
|
18ef70f2a68414974b0e5a187855694631f78121
|
091a657598a0cd493e8494319854bfc1dba3cea0
|
refs/heads/master
| 2021-05-11T10:43:55.292322
| 2018-01-17T14:24:29
| 2018-01-17T14:24:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,928
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__all__ = ['get_islands',
'kill_isolated_pixels',
'kill_isolated_pixels_stats',
'number_of_islands']
import numpy as np
import scipy.ndimage as ndimage
# See: https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.ndimage.measurements.label.html
def get_islands(array, threshold=0.2):
"""
...
Parameters
----------
array : Numpy array
The input image to clean.
threshold : float
The "level of the sea" before island cleaning.
Returns
-------
Numpy array
``filtered_array`` the input image with all pixels below ``threshold`` put to 0 (may contain NaN values).
Numpy array
``label_array`` define the island id each pixel belongs to (doesn't contain NaN values).
Integer
``num_labels`` the number of islands.
"""
array = array.astype('float64', copy=True)
filtered_array = np.copy(array)
# Put NaN pixels to 0
# This is OK as long as it is made temporary and internally to avoid issues
# with scipy
filtered_array[np.isnan(filtered_array)] = 0.
# Put to 0 pixels that are below 'threshold'
if threshold is not None:
filtered_array[filtered_array < threshold] = 0.
mask = filtered_array > 0
# Detect islands ("label")
label_array, num_labels = ndimage.label(mask)#, structure=np.ones((5, 5)))
# Put back NaN in filtered_array (required to avoid bugs in others
# functions (e.g. uncoherent dimensions with pixels_positions).
filtered_array[np.isnan(array)] = np.nan
return filtered_array, label_array, num_labels
def kill_isolated_pixels(array, threshold=0.2):
"""
...
Parameters
----------
array : Numpy array
The input image to clean.
threshold : float
The "level of the sea" before island cleaning.
Returns
-------
Numpy array
The input image ``array`` with isolated islands removed.
Only keeping the biggest islands (the largest surface).
"""
array = array.astype('float64', copy=True)
filtered_array, label_array, num_labels = get_islands(array, threshold)
# Put NaN pixels to 0
# This is OK as long as it is made temporary and internally to avoid issues
# with scipy
filtered_array[np.isnan(filtered_array)] = 0.
# Count the number of pixels for each island
num_pixels_per_island = ndimage.sum(filtered_array, label_array, range(num_labels + 1))
# Only keep the biggest island
mask_biggest_island = num_pixels_per_island < np.max(num_pixels_per_island)
remove_pixel = mask_biggest_island[label_array]
filtered_array[remove_pixel] = 0
# Put back NaN in filtered_array (required to avoid bugs in others
# functions (e.g. uncoherent dimensions with pixels_positions).
filtered_array[np.isnan(array)] = np.nan
return filtered_array
def kill_isolated_pixels_stats(array, threshold=0.2):
array = array.astype('float64', copy=True)
filtered_array = kill_isolated_pixels(array, threshold=threshold)
delta_pe = np.nansum(array - filtered_array)
delta_abs_pe = np.nansum(np.abs(array - filtered_array))
array[np.isfinite(array) & (array != 0)] = 1 # May genereate warnings on NaN values
filtered_array[np.isfinite(filtered_array) & (filtered_array != 0)] = 1 # May genereate warnings on NaN values
delta_num_pixels = np.nansum(array - filtered_array)
return float(delta_pe), float(delta_abs_pe), float(delta_num_pixels)
def number_of_islands(array, threshold=0.2):
filtered_array, label_array, num_labels = get_islands(array, threshold)
return num_labels
|
[
"jd.jdhp@gmail.com"
] |
jd.jdhp@gmail.com
|
94b87efb8bbc64ecfe8f2e4dc896d8dd597d68d0
|
64a646a8c72c5a0a6c981a1864a2021c6526f025
|
/tests/test_printer.py
|
e95cf30018b49e2a9efed0993940e56173a451f5
|
[
"BSD-3-Clause"
] |
permissive
|
vmagamedov/kinko
|
6b11d8bf38196a4100ed5c7679204f55461b7fd1
|
b65f8ae97bbf5d056781e90e22d2a369f440ed4c
|
refs/heads/master
| 2020-04-06T06:58:54.300766
| 2016-06-29T14:51:32
| 2016-06-29T14:51:32
| 38,629,823
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,100
|
py
|
import difflib
from textwrap import dedent
from kinko.nodes import Tuple, Symbol, Keyword, String, List, Placeholder
from kinko.printer import Printer
from .base import TestCase
class TestPrinter(TestCase):
def assertPrints(self, node, output):
first = Printer.dumps(node)
second = dedent(output).strip() + '\n'
if first != second:
msg = ('Printed code is not equal:\n\n{}'
.format('\n'.join(difflib.ndiff(first.splitlines(),
second.splitlines()))))
raise self.failureException(msg)
def testSimple(self):
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), String('bar'), Symbol('baz')]),
"""
html :foo "bar" baz
""",
)
def testNested(self):
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), String('bar'),
Tuple([Symbol('head')])]),
"""
html :foo "bar"
head
""",
)
def testJoin(self):
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), String('bar'),
Tuple([Symbol('join'), List([
Tuple([Symbol('head')]),
Tuple([Symbol('body')]),
])])]),
"""
html :foo "bar"
head
body
""",
)
def testGet(self):
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), Tuple([Symbol('get'), Symbol('bar'),
Symbol('baz')])]),
"""
html :foo bar.baz
""",
)
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), Tuple([Symbol('get'), Placeholder('bar'),
Symbol('baz')])]),
"""
html :foo #bar.baz
""",
)
|
[
"vladimir@magamedov.com"
] |
vladimir@magamedov.com
|
c080be714d98599463d46bacf36758234d7faf91
|
0e9e2bfc8d4b5c297ceb4908eb4ca55c4101ea5b
|
/function_views/migrations/0003_auto_20200420_2102.py
|
e59be5a859cb9efc08590d349ec9d9af9543c697
|
[
"MIT"
] |
permissive
|
cyrilleAdelphe/django-views-tutorial
|
776ee1b08f0c6fc68fb56cf64f57bf20c557b9d0
|
71cd7fdb0846a61fcff7f1822d58b0e862be352a
|
refs/heads/master
| 2023-06-30T10:09:47.021727
| 2020-12-08T03:50:14
| 2020-12-08T03:50:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# Generated by Django 3.0.5 on 2020-04-21 02:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("function_views", "0002_auto_20200420_2052"),
]
operations = [
migrations.AlterField(
model_name="user",
name="bio",
field=models.TextField(null=True),
),
]
|
[
"toddbirchard@gmail.com"
] |
toddbirchard@gmail.com
|
824a8bfdd632b45637dadcb4532b085dc7756e6e
|
2d276785c3663d4798be462115291c4706dbd255
|
/Python从菜鸟到高手/chapter4/demo4.20.py
|
f4c7eb45f4e343ffc86089f644684daa5542aa05
|
[] |
no_license
|
bupthl/Python
|
81c92433bd955663e6cda5fe7cab5ea3d067c3de
|
bdb33aeeb179a43100b9ef7129a925c63a133fd3
|
refs/heads/master
| 2022-02-21T11:02:40.195265
| 2019-08-16T05:49:18
| 2019-08-16T05:49:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
'''
--------《Python从菜鸟到高手》源代码------------
欧瑞科技版权所有
作者:李宁
如有任何技术问题,请加QQ技术讨论群:264268059
或关注“极客起源”订阅号或“欧瑞科技”服务号或扫码关注订阅号和服务号,二维码在源代码根目录
如果QQ群已满,请访问https://geekori.com,在右侧查看最新的QQ群,同时可以扫码关注公众号
“欧瑞学院”是欧瑞科技旗下在线IT教育学院,包含大量IT前沿视频课程,
请访问http://geekori.com/edu或关注前面提到的订阅号和服务号,进入移动版的欧瑞学院
“极客题库”是欧瑞科技旗下在线题库,请扫描源代码根目录中的小程序码安装“极客题库”小程序
关于更多信息,请访问下面的页面
https://geekori.com/help/videocourse/readme.html
'''
print('hello' * 5)
print([20] * 10)
print([None] * 6)
|
[
"registercn@outlook.com"
] |
registercn@outlook.com
|
15729ac399f50b317aa8bb686eb760418a3e65cf
|
1b5f653955779f45e78ca6dda925518779d09e8f
|
/submissions/2779.py
|
ae704befd917231ea60204a5bca5bfbc3b93854e
|
[] |
no_license
|
LeonardoSaid/uri-py-solutions
|
ad285f552934ead54ad2410e23113e84b0724f72
|
43c10c0e99e99d22b4b5ae2871e5d897f8823b42
|
refs/heads/master
| 2020-08-11T00:28:48.661578
| 2020-04-23T20:21:39
| 2020-04-23T20:21:39
| 214,453,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
n = int(input())
c = int(input())
m = list()
while c > 0:
x = int(input())
if x not in m:
m.append(x)
c -= 1
print(n-len(m))
|
[
"noreply@github.com"
] |
LeonardoSaid.noreply@github.com
|
7694ec591f3fb364781ff62e28267ea8d3b26a4e
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/pAFxfge35bT3zj4Bs_19.py
|
d59e6f7d09df6fd8ccc31e7672ecb60bc30a3dd0
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
"""
Write a function that accepts `base` (decimal), `height` (decimal) and `shape`
("triangle", "parallelogram") as input and calculates the area of that shape.
### Examples
area_shape(2, 3, "triangle") ➞ 3
area_shape(8, 6, "parallelogram") ➞ 48
area_shape(2.9, 1.3, "parallelogram") ➞ 3.77
### Notes
* Area of a triangle is `0.5 * b * h`
* Area of a parallelogram is `b * h`
* Assume triangle and parallelogram are the only inputs for `shape`.
"""
def area_shape(base, height, shape):
if shape == "triangle":
area = 0.5 * (base * height)
elif shape == "parallelogram":
area = base * height
return area
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
432eda245cde6554464c98cca73dea1dbe401983
|
2c5b25d0b5d6ba66d013251f93ebf4c642fd787b
|
/wrong_answer_codes/Next_Permutation/Next Permutation_279803526.py
|
0f2f44ba7c182ed06e0f91406f928fa3d8ab7085
|
[] |
no_license
|
abhinay-b/Leetcode-Submissions
|
da8099ac54b5d36ae23db42580064d0f9d9bc63b
|
d034705813f3f908f555f1d1677b827af751bf42
|
refs/heads/master
| 2022-10-15T22:09:36.328967
| 2020-06-14T15:39:17
| 2020-06-14T15:39:17
| 259,984,100
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if len(nums) < 2:
return
idx = len(nums) - 1
found = False
while idx > 0:
if nums[idx-1] < nums[idx]:
nums[idx-1],nums[idx] = nums[idx], nums[idx-1]
found = True
break
idx -= 1
if found == False:
nums = nums[::-1]
|
[
"abhinayb.sssihl@gmail.com"
] |
abhinayb.sssihl@gmail.com
|
292f6b296954e7cea39ad9a0858abbaba0e99edc
|
5f86944bdf1b810a84c63adc6ed01bbb48d2c59a
|
/kubernetes/test/test_v1beta1_certificate_signing_request_status.py
|
c35e4425ee58df047c2b093e26ce28ab6d3998da
|
[
"Apache-2.0"
] |
permissive
|
m4ttshaw/client-python
|
384c721ba57b7ccc824d5eca25834d0288b211e2
|
4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1
|
refs/heads/master
| 2021-01-13T06:05:51.564765
| 2017-06-21T08:31:03
| 2017-06-21T08:31:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_certificate_signing_request_status import V1beta1CertificateSigningRequestStatus
class TestV1beta1CertificateSigningRequestStatus(unittest.TestCase):
""" V1beta1CertificateSigningRequestStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CertificateSigningRequestStatus(self):
"""
Test V1beta1CertificateSigningRequestStatus
"""
model = kubernetes.client.models.v1beta1_certificate_signing_request_status.V1beta1CertificateSigningRequestStatus()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
720356dcaeadc6446daf29ea65dc5897ed2614cf
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_227/ch25_2020_03_09_19_08_42_543739.py
|
28620a9b8ede3883991958f34592cf33371e7b28
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
import math
velocidade = float(input("Qual a velocidade da jaca? "))
ângulo = float(input("Qual o ângulo de lançamento da jaca? "))
distância = (velocidade**2)*(math.sin(math.radians(2*ângulo)))/(9.8)
if distância < 98 :
print('Muito perto')
elif 98 <= distância <= 102 :
print('Acertou!')
else :
print('Muito longe')
|
[
"you@example.com"
] |
you@example.com
|
f8f31a1459502f000a301088ec73cfa9af65dd6d
|
673517e68db4b2540ac3a908a6374aaaa72e0f27
|
/tests/src/event/MakeBeansProfitEvent.py
|
06350a8843655b3b21df11747f98205409c96e13
|
[
"Apache-2.0"
] |
permissive
|
buxx/synergine
|
3a977b69bc35c1a5af1056f98028f8b7412795d2
|
da05d762cdbc993362807d4851e1ca74784438ae
|
refs/heads/master
| 2021-07-03T19:57:24.486164
| 2017-09-04T09:19:45
| 2017-09-04T09:19:45
| 23,734,878
| 6
| 2
|
Apache-2.0
| 2021-06-10T14:15:26
| 2014-09-06T13:15:07
|
Python
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
from tests.src.event.TestEvent import TestEvent
from tests.src.TestSynergyObject import TestSynergyObject
class MakeBeansProfitEvent(TestEvent):
def _prepare(self, object_id, context, parameters={}):
return parameters
|
[
"sevajol.bastien@gmail.com"
] |
sevajol.bastien@gmail.com
|
4b514a998a289f87349c4cdfda7c3e9cb179742c
|
f5b4d2f294085565395cb060d0508cc60b331526
|
/vi/vn_express/spider.py
|
da6e99ead84c2fcf983dd2bcea95a235f8a1ba71
|
[] |
no_license
|
HaManhDong/crawler_news
|
72fd8a18f0fe3358d1033667ee9857755dd07ac8
|
56171952a7bffef26ccfd397b08936ee259d52aa
|
refs/heads/master
| 2021-01-22T20:26:14.036281
| 2016-11-24T03:33:39
| 2016-11-24T03:33:39
| 85,321,142
| 0
| 0
| null | 2017-03-17T14:33:45
| 2017-03-17T14:33:45
| null |
UTF-8
|
Python
| false
| false
| 5,742
|
py
|
import datetime
import scrapy
from scrapy.exceptions import CloseSpider
from database import database_connection
from database.database_connection import NewsData
class NewspaperItem(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field()
summary = scrapy.Field()
content = scrapy.Field()
class NewsItem:
def __init__(self, url, title, summary, content):
self.url = url
self.title = title
self.summary = summary
self.content = content
class VNExpressNewspaperSpider(scrapy.Spider):
db_session = database_connection.connect_to_database()
name = "VnExpress"
# start_urls = ['http://vnexpress.net/', ]
start_urls = \
[
'http://vnexpress.net/', ]
url_set = set(start_urls)
crawled_page = 0
def parse(self, response):
# title = response.xpath()
news_title = VNExpressNewspaperSpider.get_title(response)
news_time = VNExpressNewspaperSpider.get_time(response)
summary = VNExpressNewspaperSpider.get_summary(response)
content = VNExpressNewspaperSpider.get_content(response)
url = response.url
if news_title is not None and summary is not None and content is not None and news_time is not None:
news_vn_express_data = NewsData(url=url, title=news_title, summary=summary, content=content,
time=news_time)
VNExpressNewspaperSpider.db_session.add(news_vn_express_data)
VNExpressNewspaperSpider.db_session.commit()
# filename = "result.txt"
# with open(filename, 'ab') as outfile:
# outfile.write(json.dumps(NewsItem(url, news_title, summary, content).__dict__).encode("UTF-8"))
# outfile.write("\n")
VNExpressNewspaperSpider.crawled_page += 1
if VNExpressNewspaperSpider.crawled_page > 500:
raise CloseSpider('Search Exceeded 500')
next_link_list = []
href_element = response.xpath("//*[contains(@href,'vnexpress.net')]")
mobile_href_element = response.xpath("//*[contains(@data-mobile-href,'vnexpress.net')]")
data_href_element = response.xpath("//*[contains(@data-href,'vnexpress.net')]")
for link in href_element:
link_url = link.xpath("./@href").extract_first()
if link_url not in VNExpressNewspaperSpider.url_set:
VNExpressNewspaperSpider.url_set.add(link_url)
next_link_list.append(link_url)
for link in mobile_href_element:
link_url = link.xpath("./@data-mobile-href").extract_first()
if link_url not in VNExpressNewspaperSpider.url_set:
VNExpressNewspaperSpider.url_set.add(link_url)
next_link_list.append(link_url)
for link in data_href_element:
link_url = link.xpath("./@data-href").extract_first()
if link_url not in VNExpressNewspaperSpider.url_set:
VNExpressNewspaperSpider.url_set.add(link_url)
next_link_list.append(link_url)
for next_link in next_link_list:
yield scrapy.Request(next_link, callback=self.parse)
# yield scrapy.Request(next_page, callback=self.parse)
#
# with open(filename, 'ab') as f:
# f.write()
@staticmethod
def get_title(response):
news_title_element = response.xpath('//div[@class="title_news"]/h1/text()')
if len(news_title_element) > 0:
return news_title_element.extract_first()
return None
@staticmethod
def get_summary(response):
summary_element = response.xpath('//*[contains(@class,"short_intro")]/text()')
if len(summary_element) > 0:
return summary_element.extract_first()
return None
@staticmethod
def get_content(response):
content_block_element = response.xpath('//*[contains(@class,"fck_detail")]')
if len(content_block_element) <= 0:
content_block_element = response.xpath('//*[contains(@class,"block_content_slide_showdetail")]')
if len(content_block_element) > 0:
return_text = ''
text_nodes = content_block_element[0].xpath(".//*[text()]")
for text_node in text_nodes:
return_text += text_node.xpath("./text()").extract_first()
return return_text
return None
@staticmethod
def get_time(response):
# content_block_element =
# response.xpath("//div[contains(@class, 'block_timer_share') and contains(@class, 'class2')]")
content_block_element = response.xpath("//div[contains(@class, 'block_timer_share')]" +
"/div[contains(@class, 'block_timer')]")
if len(content_block_element) > 0:
try:
datetime_data = content_block_element.xpath("./text()").extract()
date_data = datetime_data[0].split(",")[1].split("/")
time_data = datetime_data[1].split(" ")[0].split(":")
if len(date_data) == 3 and len(time_data) == 2:
try:
check_date = datetime.datetime(
int(date_data[2]), int(date_data[1]), int(date_data[0]),
int(time_data[0]), int(time_data[1]))
return str(check_date.year) + '/' + str(check_date.month) + '/' + str(check_date.day) +\
'/' + str(check_date.hour) + '/' + str(check_date.minute)
except ValueError:
return None
except Exception:
return None
return None
|
[
"haminhcongbkhn@gmail.com"
] |
haminhcongbkhn@gmail.com
|
a2ac3234cd4def8f830856f03044bd7161fbd180
|
8ff60c41c1cb387271a19b839ddb4ee951b41057
|
/message/migrations/0001_initial.py
|
3c0cfe80cb066405982f5f16be2f4526e2d184cb
|
[] |
no_license
|
ajaypythonmate/codecrux
|
579306b7691898fe6bce43ac6ba385bb38c89d90
|
31a0e3b8b92ab3693efe7f053ba3ba94ef614b39
|
refs/heads/main
| 2023-04-02T09:51:35.834632
| 2021-04-06T04:52:27
| 2021-04-06T04:52:27
| 355,058,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,028
|
py
|
# Generated by Django 3.1.3 on 2021-02-01 10:29
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_message_datetime', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 1, 10, 29, 54, 688446), null=True)),
('subject', models.CharField(max_length=1200)),
('start_date_time', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 1, 10, 29, 54, 688602), null=True)),
('end_date_time', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 1, 10, 29, 54, 688628), null=True)),
('participant_web_link', models.CharField(blank=True, max_length=1200, null=True)),
('teamviewr_id', models.CharField(blank=True, max_length=1200, null=True)),
('access_token', models.CharField(blank=True, max_length=1200, null=True)),
('refresh_token', models.CharField(blank=True, max_length=1200, null=True)),
('conference_call_information', models.CharField(blank=True, max_length=1200, null=True)),
('password', models.CharField(blank=True, max_length=1200, null=True)),
('archived_by', models.ManyToManyField(blank=True, null=True, related_name='conversation_archived', to=settings.AUTH_USER_MODEL)),
('includes', models.ManyToManyField(blank=True, null=True, related_name='conversation_includes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 1, 10, 29, 54, 691297), null=True)),
('message', models.CharField(max_length=1200)),
('is_read', models.BooleanField(default=False)),
('attachment', models.FileField(blank=True, null=True, upload_to='uploads/')),
('conversation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='conversation_message', to='message.conversation')),
('read_by', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
('sent_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_message', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('date',),
},
),
]
|
[
"ajay.khuswaha@pythonmate.com"
] |
ajay.khuswaha@pythonmate.com
|
184a87884146cb9b01e533a853cb3669cca251cf
|
50a8942417f2124ffeeedb527bb036d3227e4f52
|
/src/briefcase/__init__.py
|
531000413b17257869aed573d1eded5004db2a08
|
[
"BSD-3-Clause"
] |
permissive
|
Viridescence/briefcase
|
b711b3334ef8e5fad7cded9eb60076437c94972d
|
a04adcc0091cbcc83d9c12d57bb6cc9fa705de42
|
refs/heads/master
| 2023-03-09T22:52:42.711239
| 2021-03-01T02:09:28
| 2021-03-01T02:09:28
| 342,579,930
| 0
| 0
|
BSD-3-Clause
| 2021-02-26T13:12:43
| 2021-02-26T13:12:42
| null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
__all__ = [
'__version__',
]
# Examples of valid version strings
# __version__ = '1.2.3.dev1' # Development release 1
# __version__ = '1.2.3a1' # Alpha Release 1
# __version__ = '1.2.3b1' # Beta Release 1
# __version__ = '1.2.3rc1' # RC Release 1
# __version__ = '1.2.3' # Final Release
# __version__ = '1.2.3.post1' # Post Release 1
__version__ = '0.3.4'
|
[
"russell@keith-magee.com"
] |
russell@keith-magee.com
|
2ef807073708068bda7360e6552b4bd687dc2821
|
b38264a28f59248454972c54d19e4136aa3f181b
|
/tp3/algorithm_em.py
|
c1f399bc42fd5dfca969b15fb951c6e87392bae7
|
[] |
no_license
|
icannos/machine-learning-medical-data
|
0ed8e4c47712daae081c2a8536766415b15505e2
|
8d79db4965a0297e1094f49780e920a75071e4bb
|
refs/heads/master
| 2020-12-15T10:01:46.479424
| 2020-03-18T14:10:34
| 2020-03-18T14:10:34
| 235,068,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,465
|
py
|
import numpy as np
import random
from scipy.stats import multivariate_normal, norm
class myEM:
def __init__(self, n_components=1, dim=1):
self.dim = dim
self.n_components = n_components
self.mu = None
self.sigma = None
self.reset()
def reset(self):
self.sigma = np.random.uniform(-1, 1, size=(self.n_components, self.dim, self.dim))
for i in range(self.n_components):
self.sigma[i] = np.matmul(self.sigma[i], np.transpose(self.sigma[i]))
self.mu = np.random.uniform(-3, 3, size=(self.n_components, self.dim))
def fit(self, data, nb_iteration=100):
# Learning procedure (optimization)
for iter in range(1, nb_iteration):
hat_mu = self.update_mu(data)
hat_sigma = self.update_sigma(data)
self.mu = hat_mu
self.sigma = hat_sigma + 1e-13
def fit_predict(self, X):
self.fit(X)
return self.predict(X)
def predict(self, X):
return np.argmax(self.predict_proba(X), axis=1)
def predict_proba(self, X):
y = []
for i in range(X.shape[0]):
y.append([multivariate_normal(mean=self.mu[j], cov=self.sigma[j]).pdf(X[i])
for j in range(self.n_components)])
return np.array(y)
def update_mu(self, X):
pnk = self.proba_nk(X)
mu = np.zeros((self.n_components, *X.shape[1:]))
for k in range(self.n_components):
mu[k] = np.sum(pnk[:, k].reshape(-1,1)*X, axis=0) / (np.sum(pnk[:, k]).reshape(-1,1)+1E-10)
return mu
def update_sigma(self, X):
sigma = np.zeros((self.n_components, self.dim, self.dim))
pnk = self.proba_nk(X)
for k in range(self.n_components):
sigma[k] = np.cov(np.transpose(X), aweights=pnk[:, k]+1E-10)
return sigma
def proba_x(self, X):
probs = self.predict_proba(X)
probk = self.proba_k(X)
p = np.zeros(X.shape[0])
for k in range(self.n_components):
p += probs[:, k] * probk[k]
return p
def proba_nk(self, X):
px = self.proba_x(X)
pk = self.proba_k(X)
p = self.predict_proba(X)
p = p * pk
pnk = p / px.reshape((-1,1))
return pnk
def proba_k(self, X):
probs = self.predict_proba(X)
normalization = np.sum(probs, axis=0)
return normalization / np.sum(normalization)
|
[
"maxime.darrin@ens-lyon.fr"
] |
maxime.darrin@ens-lyon.fr
|
23a3ae57a468e1f126e12d677509104c6dd7bca9
|
78d23de227a4c9f2ee6eb422e379b913c06dfcb8
|
/Interview-prep/merge_list.py
|
5261ca9770974f95bb58bed058d70e103a38271e
|
[] |
no_license
|
siddharthcurious/Pythonic3-Feel
|
df145293a3f1a7627d08c4bedd7e22dfed9892c0
|
898b402b7a65073d58c280589342fc8c156a5cb1
|
refs/heads/master
| 2020-03-25T05:07:42.372477
| 2019-09-12T06:26:45
| 2019-09-12T06:26:45
| 143,430,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
import itertools
L = [[1,2,3], [4,5], [6,7,8], [9,10,11]]
merged = itertools.chain.from_iterable(L)
for a in merged:
print(a)
|
[
"sandhyalalkumar@gmail.com"
] |
sandhyalalkumar@gmail.com
|
b5a23212cb338feb0206ac68fd0138470c6b4ef2
|
08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2
|
/kubernetes/test/test_v2beta1_object_metric_status.py
|
dbd605d082a20f5aee5371947ac2a7b38d3ef172
|
[
"Apache-2.0"
] |
permissive
|
ex3cv/client-python
|
5c6ee93dff2424828d064b5a2cdbed3f80b74868
|
2c0bed9c4f653472289324914a8f0ad4cbb3a1cb
|
refs/heads/master
| 2021-07-12T13:37:26.049372
| 2017-10-16T20:19:01
| 2017-10-16T20:19:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v2beta1_object_metric_status import V2beta1ObjectMetricStatus
class TestV2beta1ObjectMetricStatus(unittest.TestCase):
""" V2beta1ObjectMetricStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta1ObjectMetricStatus(self):
"""
Test V2beta1ObjectMetricStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v2beta1_object_metric_status.V2beta1ObjectMetricStatus()
pass
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
d07c88d41d056a346ca71e8dd0c968788a908880
|
64b33c5c68b2b668f086fd218a1ce0e381f7fb40
|
/binary_image/PASCAL_VOC/NDUDE.py
|
59336883d98fe5377da828aee95b6b35171f3a2a
|
[] |
no_license
|
hongjoon0805/ICE-N-DUDE
|
ce1f9c498ae490a76259be4d796e252a1ea6dedd
|
138e76ca56d39bc88a90a70e9c6d08d1c3a24b39
|
refs/heads/master
| 2020-04-20T11:43:55.646416
| 2019-02-02T11:50:54
| 2019-02-02T11:50:54
| 148,457,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,724
|
py
|
# coding: utf-8
# In[1]:
from core import *
from tools import *
import numpy as np
from numpy import *
import os
import tensorflow as tf
import keras as K
import sys
import argparse
# In[4]:
# ICML_2019/image
class State_Estimation_Process:
def __init__(self, n, k, nb_x_classes, nb_z_classes, x, z, param_name = 'test'):
self.n, self.k, self.x, self.z, self.nb_x_classes, self.nb_z_classes = n, k, x, z, nb_x_classes, nb_z_classes
self.param_name = param_name
self.raw_error = error_rate(x,z)
self.C = make_context(z, k, nb_z_classes, n)
self.train_batch_size = 100 + 200 * (nb_x_classes - 2)
self.test_batch_size = 3000
self.epochs = nb_z_classes * 5
def denoise(self, pred_prob): # Estimate latent variables using softmax output
n, k, x, z = self.n, self.k, self.x, self.z
"""
pred_class[0] = Say What You See(s[0]=z[i]) = -1
pred_class[i+1] = Always Say i(s[i+1]=i) = i
"""
# s(z) = z
pred_class = np.argmax(pred_prob, axis = -1) - 1
# mask Say What You see
mask = pred_class == -1
# mask-> Say What You see || others-> 0,1,2,3
x_hat = z[k:n-k] * mask + (mask^1)*pred_class
x_hat = np.hstack((z[:k], x_hat, z[n-k:n]))
error = normalized_error_rate(x,x_hat,self.raw_error)
return error, x_hat
def N_DUDE(self, PI): # Denoising process
n, k, nb_x_classes, nb_z_classes, z, param_name, C = self.n, self.k, self.nb_x_classes, self.nb_z_classes, self.z, self.param_name, self.C
epochs, train_batch_size, test_batch_size = self.epochs, self.train_batch_size, self.test_batch_size
iteration = 3
# fine-tuning the weights from ICE process
L_new = L_NEW(PI, nb_x_classes, nb_z_classes)
Y = make_pseudo_label(z, k, L_new, nb_z_classes, n)
model = ICE_N_DUDE_model(nb_x_classes, nb_z_classes, k, lr = 0.0001)
model.load_weights("weights/"+param_name+".hd5")
# model training...
hist = model.fit(C, Y, epochs=epochs // 2, batch_size=train_batch_size*4, verbose=1, validation_data=(C, Y))
model.load_weights("weights/"+param_name+".hd5")
pred_prob = model.predict(C, batch_size = test_batch_size*4, verbose = 0)
return self.denoise(pred_prob)
# In[ ]:
img_arr = ['2012_000003.jpg', '2012_000004.jpg', '2012_000007.jpg', '2012_000010.jpg', '2012_000014.jpg', '2012_000015.jpg', '2012_000016.jpg', '2012_000019.jpg', '2012_000025.jpg', '2012_000027.jpg', '2012_000028.jpg', '2012_000029.jpg', '2012_000030.jpg', '2012_000031.jpg', '2012_000032.jpg', '2012_000035.jpg', '2012_000036.jpg', '2012_000040.jpg', '2012_000042.jpg', '2012_000044.jpg', '2012_000045.jpg', '2012_000049.jpg', '2012_000050.jpg', '2012_000051.jpg', '2012_000055.jpg', '2012_000056.jpg', '2012_000058.jpg', '2012_000059.jpg', '2012_000060.jpg', '2012_000065.jpg', '2012_000067.jpg', '2012_000069.jpg', '2012_000070.jpg', '2012_000071.jpg', '2012_000072.jpg', '2012_000074.jpg', '2012_000078.jpg', '2012_000083.jpg', '2012_000084.jpg', '2012_000085.jpg', '2012_000086.jpg', '2012_000087.jpg', '2012_000089.jpg', '2012_000100.jpg', '2012_000102.jpg', '2012_000104.jpg', '2012_000105.jpg', '2012_000106.jpg', '2012_000108.jpg', '2012_000113.jpg']
try:
parser = argparse.ArgumentParser()
parser.add_argument("--t", help="PI type", type=int)
parser.add_argument("--i", help="image number: 0~67", type=int)
args = parser.parse_args()
result_name = sys.argv[0]
type_num = args.t
nb_x_classes = 2
nb_z_classes = nb_x_classes
img_num = args.i
k = 50
except:
result_name = "test"
type_num = 1
nb_x_classes = 2
nb_z_classes = nb_x_classes
img_num = 0
k = 50
PI_type_arr = ['20%', '30%', '10%']
# In[ ]:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]='0, 1, 2, 3'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.backend.set_session(session)
# In[ ]:
PI_type = PI_type_arr[type_num]
PI = load_channel('true', nb_x_classes, 1, type_num)
print(PI)
f = open('results/' + '%d_'%nb_x_classes + PI_type + '_' + result_name,'a')
x,z = load_img(PI, [img_arr[img_num]])
param_name = "NDUDE_%d"%(type_num)
n = len(x)
print(x[:20])
print(z[:20])
print(n)
print(error_rate(x,z))
# In[ ]:
# State Estimation Process
SE = State_Estimation_Process(n, k, nb_x_classes, nb_z_classes, x, z, param_name = param_name)
error, x_hat = SE.N_DUDE(PI)
f.write("%d %.5f\n"%(img_num, error))
print('%d %.5f'%(img_num, error))
|
[
"hongjoon0805@gmail.com"
] |
hongjoon0805@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.