blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93d1c4b038d428ed57ee5e22dfd6aa42a7abb5be
|
d0168d08221da5bf95c0dd511efeecddc9b0a73d
|
/profiles/migrations/0001_initial.py
|
cdaa8070d22ad1710c0d0041619d3e087f6b3285
|
[] |
no_license
|
alexarirok/roret-farm-software
|
900b5842c7b39c4a19543e138a719e4b496531a9
|
aa23fd729351f0d045b2e310dc839a8b4d639c6d
|
refs/heads/master
| 2021-04-08T21:03:59.709224
| 2020-05-01T00:07:53
| 2020-05-01T00:07:53
| 248,808,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
# Generated by Django 3.0.5 on 2020-04-23 21:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(max_length=50, null=True)),
('lastName', models.CharField(max_length=50)),
('email', models.EmailField(blank=True, max_length=254)),
('phoneNumber', models.IntegerField(blank=True)),
('department', models.CharField(blank=True, max_length=30)),
('bio', models.TextField(blank=True, max_length=500)),
],
),
]
|
[
"akorir233@gmail.com"
] |
akorir233@gmail.com
|
22f3c9cd4a9a1004dd7c7bb512643d2bbf2cbdb2
|
048405bfa0b48eaf78dd2298bdfe61472bd74eef
|
/scripts/multiproc.py
|
d0bf77fe6d0ed47a785ac752a9bddf3529d5e1ed
|
[] |
no_license
|
sousa-edvan/greedy_grasp_ant
|
2218ae20f707baa8d5428db76129e5c758a21d07
|
12f5ac99b4d0e9599a2ecd138f8f6a3551fe2473
|
refs/heads/master
| 2022-01-27T02:30:35.977782
| 2019-07-19T16:39:20
| 2019-07-19T16:39:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
import os
import pandas as pd
import subprocess
from multiprocessing import cpu_count, Pool
from auto_tqdm import tqdm
from notipy_me import Notipy
def score(data):
csv = "scores/{data}.csv".format(data=data)
subprocess.run([
"./gga/greedy_grasp_ant",
"--data=data/{data}".format(data=data),
"--log={csv}".format(csv=csv),
"--all"
])
df = pd.read_csv(csv, index_col=0)[["mean"]]
df.columns = [data]
return df.T
data = os.listdir("data")
with Notipy():
with Pool(cpu_count()) as p:
df = pd.concat(list(tqdm(p.imap(score, data), total=len(data))))
df.to_csv("scores/all_scores.csv")
|
[
"cappelletti.luca94@gmail.com"
] |
cappelletti.luca94@gmail.com
|
46716e05f494d85df10a692e589e37f999ee1bdd
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/CJ/16_0_2_anrieff_b.py
|
9217d8d49f04d08baad00e10f7695015df8cedd7
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 570
|
py
|
#!/usr/bin/env python
# Contestant: Veselin 'anrieff' Georgiev
# Round: Google Code Jam Qualification 2016
# Task: B. Revenge of the pancakes
# Solution: Greedy. At each step, find the largest single-colored block at the top, and flip it, until we finish.
TC = int(raw_input().strip())
for tc in xrange(1, TC + 1):
print "Case #%d:" % tc,
a = list(raw_input().strip())
n = len(a)
steps = 0
while a.count('-') != 0:
steps += 1
i = 0
while i < n and a[i] == a[0]:
i += 1
for j in xrange(i):
a[j] = '-' if a[j] == '+' else '+' # reverse
print steps
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
e82f2bd71cc0846186353d8c20817723d286fc4f
|
4d4fcde3efaa334f7aa56beabd2aa26fbcc43650
|
/server/src/uds/reports/lists/__init__.py
|
2cb963d21ee727e9b5b0bcc891ec8e5716d7db72
|
[] |
no_license
|
xezpeleta/openuds
|
a8b11cb34eb0ef7bb2da80f67586a81b2de229ef
|
840a7a02bd7c9894e8863a8a50874cdfdbf30fcd
|
refs/heads/master
| 2023-08-21T17:55:48.914631
| 2021-10-06T10:39:06
| 2021-10-06T10:39:06
| 414,489,331
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2020 Virtual Cable S.L.U.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
# Make reports visible to autoloader
from . import users
|
[
"dkmaster@dkmon.com"
] |
dkmaster@dkmon.com
|
89dffaba38711b93fdcb658ebbf0b28432889f78
|
113b962bd5e2eb770067bd374a15dfe8a1c2d09f
|
/py_scripts/get_mappedcount_byLibID.py
|
8a5cee444afcf4ef2e1d1d2b47beaaa11f6be665
|
[] |
no_license
|
aungthurhahein/biotech_script
|
ecce51950bcef69405843da12ece2f84ea5541d6
|
2fda699343e6c46543fa1df2412c8ca2f2622cda
|
refs/heads/master
| 2020-12-24T06:20:13.028141
| 2016-07-06T15:23:34
| 2016-07-06T15:23:34
| 25,574,741
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
"""
# get occurences of ref_ids by Lib_IDs
# modification tips: file type, column of file ids
# __author__ = 'atrx'
# Date: 22012015
"""
import sys
from Bio import SeqIO
usage = "Usage %s infile" % sys.argv[0] # specific massage for no input
try:
fastafile = sys.argv[1]
contigid = sys.argv[2]
except:
print usage, sys.exit(1)
fasta_file = open(fastafile, 'r')
ref_file = open(contigid, 'r')
id_list = []
contig_list = []
id_key = []
for l in ref_file:
id = l.split()
id_list.append(l)
id_key.append(id[1].strip())
for seq in SeqIO.parse(fasta_file, "fasta"):
contig_list.append(seq.id)
for seq_record in contig_list:
contigid = seq_record.strip()
if contigid in id_key:
lo = id_key.index(contigid)
print id_list[lo].strip()
else:
print "0 " + seq_record
|
[
"aungthurhahein@gmail.com"
] |
aungthurhahein@gmail.com
|
fa568dcd357b037a884e720bb3f4b2961b3d5e46
|
343413e76c09d2bd3d009f382d9dcd19c984d58f
|
/.history/main_20201229180214.py
|
e7c1f84e7a234bab2a2ddd0a968647204387eebe
|
[] |
no_license
|
rozbeh1212/cipher
|
7b81e640501639cefb0fe6bf100647dd2602291e
|
abdebdd7d1e155ffab78ce38be8bf28074366c42
|
refs/heads/master
| 2023-02-04T13:44:36.892470
| 2020-12-29T14:44:10
| 2020-12-29T14:44:10
| 325,314,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
alphabet = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
]
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
def caeser(start_text, shipft_amount, cipher_direction):
end_text = ""
for letter in start_text:
position = alphabet.index(position)
if cipher_direction == "decode":
shift_amount *= -1
new_position = position + shift_amount
end_text += alphabet[new_position]
print(f"the {c}d text is {plane_text}")
import art
caeser(start_text=text, shift_amount=shift, cipher_direction=direction)
|
[
"eb.mehrdad@gmail.com"
] |
eb.mehrdad@gmail.com
|
84a2a9db3cd847433912ae84459035f42045f6bc
|
da3e36172daaf863ef73372f8c36cc2629ec1769
|
/UMDC/03/17g.py
|
ce55196990dd77c97e38c5ebc70122baad56ce1d
|
[] |
no_license
|
mentecatoDev/python
|
08eef1cb5a6ca2f16b01ee98192ccf1a65b9380a
|
80ddf541d3d1316ba8375db8f6ec170580e7831b
|
refs/heads/master
| 2021-06-30T07:03:51.957376
| 2021-02-22T09:40:46
| 2021-02-22T09:40:46
| 222,322,503
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,333
|
py
|
"""
Ejercicio 17g
Escribir funciones que resuelvan los siguientes problemas:
g) Dadas dos fechas (dia1, mes1, año1, dia2, mes2, año2), indicar el tiempo
transcurrido entre ambas, en años, meses y dias. Nota: en todos los casos,
involucrar las funciones escritas previamente cuando sea posible.
"""
def bisiesto(anio):
"""Devuelve True si el anio es bisiesto."""
if anio % 4:
return False
else:
if anio % 100:
return True
else:
if anio % 400:
return False
else:
return True
def dias_mes(mes, anio):
"""Devuelve los días de cualquier mes teniendo en cuenta el anio."""
if mes in (1, 3, 5, 7, 8, 10, 12):
return 31
elif mes in (4, 6, 9, 11):
return 30
elif mes == 2:
if bisiesto(anio):
return 29
else:
return 28
else:
return -1
def validar_fecha(dia, mes, anio):
dm = dias_mes(mes, anio)
if dm == -1:
return -1
if dm < dia:
return False
elif mes > 12:
return False
else:
return True
def dias_faltan(dia, mes, anio):
if validar_fecha(dia, mes, anio):
return dias_mes(mes, anio)-dia
else:
return -1
print(dias_faltan(1, 1, 2000))
def dias_fin_anio(dia, mes, anio):
if validar_fecha(dia, mes, anio):
dias = 0
for m in range(mes+1, 12+1):
dias += dias_mes(m, anio)
dias += dias_faltan(dia, mes, anio)
return dias
else:
return -1
def dias_principio(dia, mes, anio):
if validar_fecha(dia, mes, anio):
if bisiesto(anio):
return 365 - dias_fin_anio(dia, mes, anio)
else:
return 364 - dias_fin_anio(dia, mes, anio)
else:
return -1
def dias_transcurridos(dia1, mes1, anio1, dia2, mes2, anio2):
if anio1 == anio2:
total = -dias_principio(dia1, mes1, anio1) + \
dias_principio(dia2, mes2, anio2)
else:
total = dias_fin_anio(dia1, mes1, anio1) + \
dias_principio(dia2, mes2, anio2)+1
for a in range(anio1+1, anio2):
if bisiesto(a):
total += 366
else:
total += 365
return total
print(dias_transcurridos(1, 1, 2001, 31, 12, 2002))
|
[
"favila@iesromerovargas.com"
] |
favila@iesromerovargas.com
|
eee6c26c594ab5b9fa6e26288db0e7e9dee3d498
|
ff886f5f947460576feaec2a049f6a9f78f2a63f
|
/core/management/commands/wait_for_db.py
|
460989d579a419bc219cba5e76cc9fcb204aa701
|
[
"MIT"
] |
permissive
|
devendraprasad1984/loan_payment_app
|
2bc927afbc084504bb10a959105d72f6f419e2c8
|
1a4c31d03a8c5ecf4dae2a981373649f4f699aa3
|
refs/heads/main
| 2023-07-21T19:00:40.692978
| 2021-09-09T03:36:04
| 2021-09-09T03:36:04
| 400,111,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""django overriding default app run until database is made available"""
def handle(self, *args, **options):
self.stdout.write('waiting for db connection...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write(self.style.ERROR('database is not available, re-checking in 1sec'))
time.sleep(1)
self.stdout.write(self.style.SUCCESS('database is available'))
|
[
"devendraprasad1984@gmail.com"
] |
devendraprasad1984@gmail.com
|
95414f7ed3b48f7baf6bd13799ea4698d7f6093f
|
199522cb43b4e2c7e3bf034a0e604794258562b1
|
/0x0F-python-object_relational_mapping/3-my_safe_filter_states.py
|
3659d402edd14791ff0d3dce555884770499752c
|
[] |
no_license
|
jormao/holbertonschool-higher_level_programming
|
a0fd92f2332f678e6fe496057c04f2995d24a4ac
|
360b3a7294e9e0eadcadb57d4c48c22369c05111
|
refs/heads/master
| 2020-09-29T01:36:20.094209
| 2020-05-15T03:27:06
| 2020-05-15T03:27:06
| 226,915,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
#!/usr/bin/python3
"""
script that takes in arguments and displays all values in the
states table of hbtn_0e_0_usa where name matches the argument.
But this time, write one that is safe from MySQL injections!
"""
import MySQLdb
from sys import argv
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost", port=3306, user=argv[1],
passwd=argv[2], db=argv[3])
cur = db.cursor()
cur.execute("SELECT * FROM states\
WHERE name = %s\
ORDER BY id", (argv[4],))
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
db.close()
|
[
"jormao@gmail.com"
] |
jormao@gmail.com
|
041350efe6b160a115e9e22c301c74a34ff53193
|
71257430418ed7410ddffb6df692a5e816eb53b7
|
/61hunter.py
|
ed47aa6e3c043824bfe5d9b810408fd96bd965c2
|
[] |
no_license
|
aarthisandhiya/aarthi
|
917283541b9aa133db5d50a3b68eda2a10c38af7
|
00b31831832ea573dfd886eb0001ad824325136d
|
refs/heads/master
| 2020-04-15T05:10:42.585357
| 2019-07-21T13:57:58
| 2019-07-21T13:57:58
| 164,411,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
a=int(input())
c=0
b=[int(a) for a in input().split()]
u,v=map(int,input().split())
for i in range(0,len(b)):
if b[i]==u:
while b[i]<int(v):
c=c+1
i=i+1
print(c)
|
[
"noreply@github.com"
] |
aarthisandhiya.noreply@github.com
|
0ac0305052893eb0942f039d2bc543f72d5454e5
|
cf09d6430e37b5460d7208d6cae6d3af0fa15925
|
/jsonbot/jsb/lib/reboot.py
|
9209bd73573898a076abeb23cc8bd8fa26b3fd6a
|
[
"MIT"
] |
permissive
|
Lujeni/old-projects
|
2bbf0ff89852a3e4a9677475a615d2ee4b07d635
|
657304c8b017a98935de9728fc695abe8be7cc4f
|
refs/heads/master
| 2021-03-12T23:08:34.054777
| 2014-10-16T23:10:15
| 2014-10-16T23:10:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
# jsb/reboot.py
#
#
""" reboot code. """
## jsb imports
from jsb.lib.fleet import getfleet
from jsb.imports import getjson
json = getjson()
## basic imports
import os
import sys
import pickle
import tempfile
import logging
import time
## reboot function
def reboot():
""" reboot the bot. """
logging.warn("reboot - rebooting")
os.execl(sys.argv[0], *sys.argv)
## reboot_stateful function
def reboot_stateful(bot, ievent, fleet, partyline):
""" reboot the bot, but keep the connections (IRC only). """
logging.warn("reboot - doing statefull reboot")
session = {'bots': {}, 'name': bot.cfg.name, 'channel': ievent.channel, 'partyline': []}
fleet = getfleet()
for i in fleet.bots:
logging.warn("reboot - updating %s" % i.cfg.name)
data = i._resumedata()
if not data: continue
session['bots'].update(data)
if i.type == "sxmpp": i.exit() ; continue
if i.type == "convore": i.exit() ; continue
if i.type == "tornado":
i.exit()
time.sleep(0.1)
for socketlist in i.websockets.values():
for sock in socketlist: sock.stream.close()
session['partyline'] = partyline._resumedata()
sfile, sessionfile = tempfile.mkstemp('-session', 'jsb-', text=True)
logging.warn("writing session file %s" % sessionfile)
json.dump(session, open(sessionfile, "w"))
args = []
skip = False
for a in sys.argv[1:]:
if skip: skip = False ; continue
if a == "-r": skip = True ; continue
args.append(a)
os.execl(sys.argv[0], sys.argv[0], '-r', sessionfile, *args)
|
[
"julien@thebault.co"
] |
julien@thebault.co
|
e25ee8942b20a0704262265705ad3ad2b5b7b407
|
f99f30752e9bb9e023b37c731f64fb2155ac3daf
|
/03/zip.py
|
69160f8e55ebbe1da4acd417e0cd571fe8488b3e
|
[] |
no_license
|
chu83/python-basics
|
148ff6977f5ca04775951d90ed1f5f763c51a9ff
|
19fe0937842c668f604876be0aeb0962a2630dd2
|
refs/heads/master
| 2023-01-19T01:29:25.203738
| 2020-11-29T18:34:33
| 2020-11-29T18:34:33
| 311,258,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
print('========== zip() 함수 사용 예 ===========')
s1 = ['foo', 'bar', 'baz']
s2 = ['one', 'two', 'three', 'four']
z = zip(s1, s2)
print(z, type(z))
print('========== 순회1 ===========')
for t in z:
print(t, type(t))
z = zip(s1, s2)
for a, b in z:
print(a, b)
print('========== 순회2 ===========')
z = zip(s1, s2)
|
[
"59534807+chu83@users.noreply.github.com"
] |
59534807+chu83@users.noreply.github.com
|
318d59a2c7fd3d07c465da350c7d3b65dd8f4934
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ivs_write_f/playback-key-pair_import.py
|
e6227c4b54f4786793a1d735b01d2cf516e72ad9
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
delete-playback-key-pair : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/delete-playback-key-pair.html
get-playback-key-pair : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/get-playback-key-pair.html
list-playback-key-pairs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/list-playback-key-pairs.html
"""
write_parameter("ivs", "import-playback-key-pair")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
4cdb8d4ce152583225c607c387d527a82eced8d3
|
7c9707f0f1cb8e633ac605934f3dbd8036790868
|
/projet/rpi_manager/models.py
|
f61c7c196096c5f2351a5ccd5919b2269e0b3f2e
|
[] |
no_license
|
ometeore/hydropo
|
891e1abd4c1b8ccd0a3b27a043abf894b70ceb5b
|
324076d4b7ddbd14e718c424eb24d129c2a2243c
|
refs/heads/master
| 2023-06-14T08:35:55.838469
| 2021-07-04T16:28:09
| 2021-07-04T16:28:09
| 290,198,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,146
|
py
|
from django.db import models
from django import forms
from datetime import datetime
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
class Rpi(models.Model):
name = models.CharField(max_length=200)
uid_name = models.CharField(max_length=200)
last_connect = models.DateTimeField()
is_conected = models.BooleanField()
# plutot que de comparer des str sources de bugs
# import datetime
# regler le passage a minuit aussi
# date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S.%f')
def compare_time(self, begin_test, end_test, cat):
if cat:
schedule = self.water.all()
else:
schedule = self.lights.all()
for times in schedule:
if begin_test > str(times.begin) and begin_test < str(times.end):
return False
if end_test > str(times.begin) and end_test < str(times.end):
return False
if begin_test < str(times.begin) and end_test > str(times.end):
return False
return True
def broadcast_schedule(self):
message = {}
message["manual"] = False
schedule_water_list = [
[str(elm.begin), str(elm.end)] for elm in self.water.all()
]
message["water"] = schedule_water_list
schedule_lights_list = [
[str(elm.begin), str(elm.end)] for elm in self.lights.all()
]
message["lights"] = schedule_lights_list
objectif_ph = self.ph.filter(objectif=True)
message["ph"] = objectif_ph[0].value
objectif_ec = self.ec.filter(objectif=True)
message["ec"] = objectif_ec[0].value
####### This part is sending the message to the websocket in group call "group0"
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
self.uid_name, {"type": "send_message", "message": message}
)
def broadcast_manual(self, tool):
message = {}
message["manual"] = True
message["tool"] = tool
print("ASK FOR MANUAL MODE FOR ID: {}".format(self.uid_name))
print(message)
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
self.uid_name, {"type": "send_message", "message": message}
)
class WaterSchedule(models.Model):
begin = models.TimeField()
end = models.TimeField()
rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="water")
class LightSchedule(models.Model):
begin = models.TimeField()
end = models.TimeField()
rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="lights")
class Ph(models.Model):
date = models.DateTimeField()
value = models.FloatField()
objectif = models.BooleanField()
rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="ph")
class Ec(models.Model):
date = models.DateTimeField()
value = models.IntegerField()
objectif = models.BooleanField()
rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="ec")
|
[
"pilt64@hotmail.fr"
] |
pilt64@hotmail.fr
|
6d21cd382eeb98e10bb5bc8a2a202726211def5f
|
ce6ace34704e74c2a53e9b38b2630876d9cd52e2
|
/mdias_addons/metro_park_maintenance/models/day_plan_limit.py
|
8f25985b0d5a8f7e281a0c359a8c74c657e8ef34
|
[] |
no_license
|
rezaghanimi/main_mdias
|
e3cfd8033204d8e7e484041f506892621a3e3479
|
13b428a5c4ade6278e3e5e996ef10d9fb0fea4b9
|
refs/heads/master
| 2022-09-17T20:15:42.305452
| 2020-05-29T05:38:35
| 2020-05-29T05:38:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class DayPlanLimit(models.Model):
'''
日计划限制
'''
_name = 'metro_park_maintenance.day_plan_limit'
location = fields.Many2one(string='地点', comodel_name='metro_park_base.location')
max_repair_after_high_run = fields.Integer(string='高峰车最大检修数量')
max_repair_back_time = fields.Char(string="返回时间", help='最大返回时间')
class DayPlanLimit(models.TransientModel):
'''
日计划向导限制
'''
_name = 'metro_park_maintenance.day_plan_wizard_limit'
location = fields.Many2one(string='地点', comodel_name='metro_park_base.location')
max_repair_after_high_run = fields.Integer(string='高峰车最大检修数量')
max_repair_back_time = fields.Char(string="返回时间", help='最大返回时间')
|
[
"619851623@qq.com"
] |
619851623@qq.com
|
f23d62dafdbb77a295d93ac632a4441e517a6c10
|
c92d5b8509f23444622529aa24d4bc85bf1d3c9f
|
/main/question47/book1.py
|
44b832b082eb8ed9403996e4f3f5e5ee8a3f4ad1
|
[] |
no_license
|
qcymkxyc/JZoffer
|
75dfb747394018f14552f521413b01a5faa9c07f
|
28628616589061653a8322d5b400f9af32f2249d
|
refs/heads/master
| 2021-07-15T00:53:00.711360
| 2019-02-10T03:16:52
| 2019-02-10T03:16:52
| 149,714,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
#!/usr/bin/env python
# _*_coding:utf-8_*_
"""
@Time : 19-1-24 上午10:58
@Author: qcymkxyc
@File: book1.py
@Software: PyCharm
"""
def max_value(matrix):
"""
动态规划
:param matrix: List[List[int]]
矩阵
:return: int
最大值
"""
n_row, n_col = len(matrix), len(matrix[0])
value_matrix = list()
for i, v in enumerate(matrix):
value_matrix.append([0] * len(v))
# 第一行初始化
for i in range(n_col):
value_matrix[0][i] += sum(matrix[0][:i + 1])
# 第一列初始化
for row in range(n_row):
value_matrix[row][0] = sum(map(lambda x:x[0], matrix[:row + 1]))
for row in range(1,n_row):
for col in range(1,n_col):
value_matrix[row][col] = max(value_matrix[row - 1][col], value_matrix[row][col - 1]) + \
matrix[row][col]
return value_matrix[-1][-1]
|
[
"qcymkxyc@163.com"
] |
qcymkxyc@163.com
|
8168b5bf889b97e447da255e86d69e116f571d47
|
c8975f8bbe32637399a3ca00ad21e8e6602e358d
|
/aoc/year2021/day14/day14.py
|
bf40f0314c32c659180cf4eaa5ae69c3475ea98d
|
[
"Unlicense"
] |
permissive
|
Godsmith/adventofcode
|
0e8e0beb813300206b2810b523b54a6c40ca936f
|
3c59ea66830f82b63881e0ea19bfe3076f2a500d
|
refs/heads/master
| 2021-12-28T13:05:42.579374
| 2021-12-26T22:19:55
| 2021-12-26T22:24:01
| 225,074,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
from collections import Counter
from aocd import get_data
from more_itertools import pairwise
def run(data, iterations):
new_element_from_pair = {tuple(line.split(" -> ")[0]): line.split(" -> ")[1] for line in data.splitlines()[2:]}
new_pairs_from_pair = {(e1, e2): [(e1, inserted), (inserted, e2)] for (e1, e2), inserted in new_element_from_pair.items()}
template = data.splitlines()[0]
element_counter = Counter(template)
pair_counter = Counter(pairwise(template))
for _ in range(iterations):
new_pair_counter = Counter()
for pair in pair_counter:
for new_pair in new_pairs_from_pair[pair]:
new_pair_counter[new_pair] += pair_counter[pair]
element_counter[new_element_from_pair[pair]] += pair_counter[pair]
pair_counter = new_pair_counter
return element_counter.most_common()[0][1] - element_counter.most_common()[-1][1]
print(run(get_data(), 10))
print(run(get_data(), 40))
|
[
"filip.lange@gmail.com"
] |
filip.lange@gmail.com
|
f32d24f9fdb32d8eb2a1eef5c82ae7102d01c864
|
cc5f2ee6a5de6faf141f10b1b10717243821a0a5
|
/problems/problem 106.py
|
e9b15f310b5e23ecf09e8a1864e6f1ea2dd3f449
|
[] |
no_license
|
alexandrepoulin/ProjectEulerInPython
|
faf634025c86bc74fc764d315813bf1706e58f63
|
97cb52cdd7508f2db891d1644e3d247814571718
|
refs/heads/master
| 2020-04-22T11:47:04.594328
| 2019-02-16T00:17:40
| 2019-02-16T00:17:40
| 170,341,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
print("Starting")
import useful
## only need to check subset pairs which have the same number of elements
## there are useful.nChooseK(n,s)*useful.nChooseK(n-s,s)*0.5 such pairs
## for a specific subset, we only need to look at pairs which have interweining elements
## such as (1,3)(2,4)
## number of times this doesn't happend is given by Catalan numbers given by c
## multiply that by the total number of ways to make two subsets of that size
## or useful.nChooseK(n,2*s)
## and you find how many pairs you need to check for a subset size
def c(s):
return useful.nChooseK(2*s,s)/(s+1)
def x(n,s):
return useful.nChooseK(n,s)*useful.nChooseK(n-s,s)*0.5-c(s)*useful.nChooseK(n,2*s)
answer = 0
N= 12
for s in range(2,7):
answer += x(N,s)
print(answer)
|
[
"alexpoulice@gmail.com"
] |
alexpoulice@gmail.com
|
3cd515eca280170fe3a32456a2936ef77006c086
|
286b6dc56323f982092ffafbfac8a32dbbaeb7ef
|
/Day_09/sample_pyvmomi.py
|
10033c34292fb7b9d33af55fc34b5e48284d85bb
|
[] |
no_license
|
learndevops19/pythonTraining-CalsoftInc
|
ccee0d90aadc00bfdb17f9578620f6bf92f80a4c
|
c5f61516b835339b394876edd1c6f62e7cc6f0c3
|
refs/heads/master
| 2021-02-05T04:27:17.590913
| 2019-11-20T17:27:06
| 2019-11-20T17:27:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,761
|
py
|
import ssl
from pyVim import connect
from pyVmomi import vim
def connectVcenter(vCenterHost, username, password, portNum=443):
"""
Description : Performs vCenter connection.
Parameters : vCenterHost - vCenter server ip address (STRING)
username - vCenter server username (STRING)
password - vCenter server password (STRING)
portNum - Port number for connection, default is 443 (INT)
Returns : Service instance object
"""
context = ssl._create_unverified_context()
si = connect.SmartConnect(
host=vCenterHost, user=username, pwd=password, port=portNum, sslContext=context
)
return si
def getObj(content, vimtype, name):
"""
Description: Get the vsphere object associated with a given text name
Parameters : content - Data object having properties for the
ServiceInstance managed object (OBJECT)
vimtype - Managed object type (OBJECT)
name - Managed object entity name (STRING)
Return: Matched Managed object (OBJECT)
"""
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True
)
for vmObj in container.view:
if vmObj.name == name:
return vmObj
def getDatacenterByName(si, name):
"""
Description: Find a datacenter by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - datacenter name (STRING)
Return: datacenter Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.Datacenter], name)
def getClusterByName(si, name):
"""
Description: Find a cluster by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - cluster name (STRING)
Return: cluster Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.ClusterComputeResource], name)
def getHostByName(si, name):
"""
Description: Find a host by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - host name (STRING)
Return: host Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.HostSystem], name)
def getVirtualMachineByName(si, name):
"""
Description: Find a vm by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - vm name (STRING)
Return: virtual machine Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.VirtualMachine], name)
def getDatastoreByName(si, name):
"""
Description: Find a datastore by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - datastore name (STRING)
Return: datastore Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.Datastore], name)
def getNetworkByName(si, name, isVDS=False):
"""
Description: Find a network by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - network name (STRING)
Return: network Object
"""
if isVDS is False:
networkObj = getObj(si.RetrieveContent(), [vim.Network], name)
else:
networkObj = getObj(
si.RetrieveContent(), [vim.dvs.DistributedVirtualPortgroup], name
)
return networkObj
# connect vcenter
siObj = connectVcenter(vcenterIp, vcenterUsername, vcenterPassword)
# print(siObj.content.about)
# get datacenter by name
datacenterName = "UCP CI Datacenter"
datacenterObj = getDatacenterByName(siObj, datacenterName)
print("datacenterName is", datacenterObj.name, datacenterObj.datastore[0].name)
# get cluster by name
# clusterName = 'Dockerized'
# clusterObj = getClusterByName(siObj, clusterName)
# print("clusterName is", clusterObj.name)
# get host by name
# hostName = '192.168.25.205'
# hostObj = getHostByName(siObj, hostName)
# print("hostName is", hostObj.name)
# get datastore by name
# datastoreName = 'ds1'
# datastoreObj = getDatastoreByName(siObj, datastoreName)
# print("datastoreName is", datastoreObj.name)
# get network by name
# networkName = 'VM Network'
# networkObj = getNetworkByName(siObj, networkName)
# print("networkName is", networkObj.name)
# print("Vm's in this network", [vm.name for vm in networkObj.vm])
# get all vms inside datacenter
# vmsList = datacenterObj.vmFolder.childEntity
# for vm in vmsList:
# print("Virtual Machine - ", vm.name)
# get vm by name
# vmObj = getVirtualMachineByName(siObj, 'k8s-master')
# print('VirtualMachineName', vmObj.name, dir(vmObj))
# poweroff the above virtual machine
# vmObj.PowerOff()
# poweron the above virtual machine
# vmObj.PowerOn()
|
[
"rajpratik71@gmail.com"
] |
rajpratik71@gmail.com
|
aaf684914f88ee47e002fe6283aad1328b10f3ad
|
6cce023315d4083c7df0fcdeea2a037b00818878
|
/py-files/data_setup.py
|
3a1efa85492db400854022be0137e9d4defafa58
|
[] |
no_license
|
Limmen/Distributed_ML
|
e02e865a123e552d3795c76a4a0846f2da6f3a55
|
d5b65a0bcb89182e3ac773b0a3cec46625dabccb
|
refs/heads/master
| 2021-05-09T03:52:41.530823
| 2018-01-28T12:21:47
| 2018-01-28T12:21:47
| 119,255,519
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
import argparse
import pyspark
from pyspark.sql.functions import udf
from pyspark.sql.types import *
import tensorflow as tf
import pandas as pd
import numpy as np
SEQ_LABELS_TRAIN = "data/y_train.csv"
SEQ_FEATURES_TRAIN = "data/x_train.csv"
SEQ_LABELS_TEST = "data/y_test.csv"
SEQ_FEATURES_TEST = "data/x_test.csv"
conf = pyspark.SparkConf()
conf = conf.setAppName("har_data_setup").set("spark.hadoop.validateOutputSpecs", "false")
sc = pyspark.SparkContext(conf=conf)
sql = pyspark.SQLContext(sc)
CLEANED_DATA_PATH = "./cleaned_data"
def read_raw_data(sql):
seq_features_train_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_FEATURES_TRAIN)
seq_labels_train_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_LABELS_TRAIN)
seq_features_test_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_FEATURES_TEST)
seq_labels_test_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_LABELS_TEST)
return seq_features_train_raw,seq_labels_train_raw, seq_features_test_raw, seq_labels_test_raw
seq_features_train_raw, seq_labels_train_raw,seq_features_test_raw,seq_labels_test_raw = read_raw_data(sql)
features_train_size = seq_features_train_raw.count()
labels_train_size = seq_labels_train_raw.count()
features_test_size = seq_features_test_raw.count()
labels_test_size = seq_labels_test_raw.count()
print("train feat size: {0}, train label size: {1}, test feat size {2}, test label size {3}".format(features_train_size, labels_train_size, features_test_size, labels_test_size))
seq_labels_test_raw.printSchema
classes = seq_labels_train_raw.unionAll(seq_labels_test_raw).select("_c0").distinct().rdd.map(lambda row: row._c0).zipWithIndex().collectAsMap()
seq_labels_train_clean = seq_labels_train_raw.select("_c0").rdd.map(lambda row: classes[row._c0])
seq_labels_test_clean = seq_labels_test_raw.select("_c0").rdd.map(lambda row: classes[row._c0])
labels_train_np = seq_labels_train_clean.collect()
labels_test_np = seq_labels_test_clean.collect()
np.savetxt(CLEANED_DATA_PATH + "/train/labels/y_train.csv", np.array(labels_train_np).astype(int), fmt='%i', delimiter=",")
np.savetxt(CLEANED_DATA_PATH + "/test/labels/y_test.csv", np.array(labels_test_np).astype(int), fmt='%i', delimiter=",")
np.savetxt(CLEANED_DATA_PATH + "/classes/classes.csv", np.array([[k,v] for k,v in classes.items()]),fmt="%s", delimiter=",")
np.savetxt(CLEANED_DATA_PATH + "/size/sizes.csv", np.array([["features_train_size", features_train_size], ["labels_train_size", labels_train_size], ["features_test_size", features_test_size], ["labels_test_size", labels_test_size]]), fmt="%s", delimiter=",")
|
[
"kimham@kth.se"
] |
kimham@kth.se
|
8cf423d1f9f0257fa371e065ae2d57628aeedaf2
|
ee4db47ccecd23559b3b6f3fce1822c9e5982a56
|
/Analyse Data/NumpPy.py
|
d2bf919d8ad04f330f143dfea2c477d7387bd3ee
|
[] |
no_license
|
meoclark/Data-Science-DropBox
|
d51e5da75569626affc89fdcca1975bed15422fd
|
5f365cedc8d0a780abeb4e595cd0d90113a75d9d
|
refs/heads/master
| 2022-10-30T08:43:22.502408
| 2020-06-16T19:45:05
| 2020-06-16T19:45:05
| 265,558,242
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,021
|
py
|
# Introduction to NumPy: Numerical Python
#NumPy is great at storing and manipulating numerical data in arrays.
import numpy as np
# NumPy Arrays
#A NumPy array is a special type of list. It’s a data structure that organizes multiple items. Each item can be of any type (strings, numbers, or even other arrays).
test_1 = np.array([92, 94, 88, 91, 87])
# test_1 is now a numpy array
#Creating an Array from a CSV
# Note the delimiter can be in other formats such as semi colon and tabs.
test_2 = np.genfromtxt('test_2.csv', delimiter=',')
# Operations with NumPy Arrays
# Let’s compare how to add a number to each value in a python list versus a NumPy array:
# With a list
A = [1, 2, 3, 4, 5,6]
A_plus_3 = []
for i in range(len(A)):
l_plus_3.append(A[i] + 3)
# With an array
a = np.array(l)
a_plus_3 = a + 3
#Squaring each value:
a ** 2
#array([ 1, 4, 9, 16, 25, 36])
#Taking the square root of each value:
np.sqrt(a)
#array([ 1, 1.41421356, 1.73205081, 2, 2.23606798, 2.44948974])
# Add extra 2 points to test_3
test_3 = np.array([87, 85, 72, 90, 92])
test_3_fixed = test_3 + 2
# Operations with NumPy Arrays II
# Arrays can also be added to or subtracted from each other in NumPy,
# assuming the arrays have the same number of elements.
a = np.array([1, 2, 3, 4, 5])
b = np.array([6, 7, 8, 9, 10])
a + b
#array([ 7, 9, 11, 13, 15])
test_1 = np.array([92, 94, 88, 91, 87])
test_2 = np.array([79, 100, 86, 93, 91])
test_3 = np.array([87, 85, 72, 90, 92])
test_3_fixed = test_3 + 2
total_grade = test_1 + test_2 + test_3_fixed
# average score
final_grade = total_grade / 3
print(final_grade)
# Two-Dimensional Arrays
# in NumPy we can create an array of arrays.
# If the arrays that make up our bigger array are all the same size, then it has a special name: a two-dimensional array.
#we could have also stored all of this data in a single, two-dimensional array: Notice the double square brackets syntax [[]]
np.array([[92, 94, 88, 91, 87],
[79, 100, 86, 93, 91],
[87, 85, 72, 90, 92]])
coin_toss_again = np.array([[1,0,0,1,0],[0,0,1,1,1]])
# Selecting Elements from a 1-D Array
# This uses normal indexing
test_1 = np.array([92, 94, 88, 91, 87])
test_2 = np.array([79, 100, 86, 93, 91])
test_3 = np.array([87, 85, 72, 90, 92])
jeremy_test_2 = test_2[-2]
#grabs 93
manual_adwoa_test_1 = test_1[1:3]
# grabs 94 88
# Selecting Elements from a 2-D Array
# The syntax for selecting from a 2-d array is a[row,column] where a is the array.
a = np.array([[32, 15, 6, 9, 14],
[12, 10, 5, 23, 1],
[2, 16, 13, 40, 37]])
a[2,1]
#16
# selects the first column
a[:,0]
#array([32, 12, 2])
a[1,:]
#array([12, 10, 5, 23, 1])
# selects the first three elements of the first row
a[0,0:3]
#array([32, 15, 6])
student_scores = np.array([[92, 94, 88, 91, 87],
[79, 100, 86, 93, 91],
[87, 85, 72, 90, 92]])
tanya_test_3 = student_scores[2,0]
cody_test_scores = student_scores[:,4]
# Logical Operations with Arrays
# < > == != | &
porridge = np.array([79, 65, 50, 63, 56, 90, 85, 98, 79, 51])
cold = porridge[porridge < 60]
hot = porridge[porridge > 80]
just_right = porridge[(porridge >= 60) & (porridge <= 80)]
print(cold,
hot,
just_right
)
# Review
import numpy as np
temperatures = np.genfromtxt('temperature_data.csv',delimiter=',')
print(temperatures)
temperatures_fixed = temperatures + 3.0
monday_temperatures = temperatures_fixed[0,:]
thursday_friday_morning = temperatures_fixed[3:,1]
temperature_extremes = temperatures_fixed[(temperatures_fixed < 50) | (temperatures_fixed > 60)]
# Project Bettys Bakery
import numpy as np
cupcakes = np.array([2,0.75,2,1,0.5])
recipes = np.genfromtxt('recipes.csv',delimiter=',')
print(recipes)
eggs = recipes[:,2]
print(eggs)
#egg = recipes[recipes[:,2] == 1]
cookies = recipes[2,:]
print(cookies)
double_batch = cupcakes * 2
print(double_batch)
grocery_list = cookies + double_batch
print(grocery_list)
|
[
"oluchukwuegbo@gmail.com"
] |
oluchukwuegbo@gmail.com
|
e1e458abdbc5777af32bf1194e4add3db39fd867
|
36b9fa9f2d8ff655546a33cb47ddacd009bc00c9
|
/autogalaxy/profiles/light/linear/__init__.py
|
bb275334df92a6bf4b34d18f21e6aa123ae5dc62
|
[
"MIT"
] |
permissive
|
Jammy2211/PyAutoGalaxy
|
67b76968b8516309b2ebdbff7affd5c1923cf0b1
|
d1a2e400b7ac984a21d972f54e419d8783342454
|
refs/heads/main
| 2023-08-19T01:00:22.320073
| 2023-08-17T15:39:46
| 2023-08-17T15:39:46
| 216,190,501
| 27
| 9
|
MIT
| 2023-09-13T14:07:43
| 2019-10-19T10:45:44
|
Python
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
from .abstract import LightProfile, LightProfileLinear, LightProfileLinearObjFuncList
from .gaussian import Gaussian, GaussianSph
from .moffat import Moffat
from .sersic import Sersic, SersicSph
from .exponential import Exponential, ExponentialSph
from .dev_vaucouleurs import DevVaucouleurs, DevVaucouleursSph
from .sersic_core import SersicCore
from .exponential_core import ExponentialCore
|
[
"james.w.nightingale@durham.ac.uk"
] |
james.w.nightingale@durham.ac.uk
|
7a8252f05c1ee87e900b5ed853a3cabc43688b96
|
74081581575e80b2b0f6b75ba912d58ea4f37ac6
|
/maskrcnn_benchmark/modeling/detector/detectors.py
|
35064d2f9992fd2c2e08d4a29ad31d1e5a229f8f
|
[] |
no_license
|
youngfly11/LCMCG-PyTorch
|
5f6b9f231613b86ac7b250ca0f34229402e1615e
|
e95299b9a9f1b13e21750ef0dcde0941d703d009
|
refs/heads/master
| 2021-10-25T19:29:12.967318
| 2021-10-25T03:35:14
| 2021-10-25T03:35:14
| 221,908,808
| 56
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .generalized_rcnn import GeneralizedRCNN
from .generalized_rcnn_det import GeneralizedRCNNDet
_DETECTION_META_ARCHITECTURES = {"GeneralizedRCNN": GeneralizedRCNN, "GeneralizedRCNNDet": GeneralizedRCNNDet}
def build_detection_model(cfg):
meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE]
return meta_arch(cfg)
|
[
"liuyf3@shanghaitech.edu.cn"
] |
liuyf3@shanghaitech.edu.cn
|
cbf3083dd8ea5ae4718b4b154ac624468f4e7c15
|
68b23f776fddb77de735419cbf30f33a49e9def2
|
/backend/terminus/home/urls.py
|
501046c917470aac71074c89c9f1d1a75f5cceac
|
[] |
no_license
|
vash512/terminus
|
cbd00f74a600a13fd52aa2206c3eb1e7b5301ec7
|
4eb86d853bc76c22cd1af3c86fed1bc10d457c88
|
refs/heads/master
| 2016-09-05T14:49:42.655635
| 2015-07-09T03:34:38
| 2015-07-09T03:34:38
| 32,414,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
urlpatterns=patterns('home.views',
url(r'^$', 'index_view', name='index'),
url(r'^humans.txt$', TemplateView.as_view(template_name='statics/humans.txt', content_type='text/plain; charset=utf-8')),
url(r'^robots.txt$', TemplateView.as_view(template_name='statics/robots.txt', content_type='text/plain; charset=utf-8')),
url(r'^sitemap.xml$', TemplateView.as_view(template_name='statics/sitemap.xml', content_type='application/xml; charset=utf-8')),
url(r'^contacto/', 'contacto' ),
url(r'^acercade/', 'acercade'),
url(r'^corpuscontable', 'corpus'),
url(r'^ayuda', 'ayuda'),
#terminos urls de prueba
url(r'^terminos', 'terminos'),
url(r'^terminos/termino', 'termino_detalle'),
url(r'^q/$', 'busqueda'),
url(r'^q/termino', 'busqueda_list'),
url(r'^docs/doc', 'doc_detalle'),
url(r'^docs/$', 'docs'),
#estas direcciones las debe administrar terminos.urls y terminos.views
url(r'^login/', 'log_in'),
url(r'^registro/', 'registro'),
url(r'^logout/', 'log_out'),
)
|
[
"xtornasol512@gmail.com"
] |
xtornasol512@gmail.com
|
a05385930991319e2dc5ebf3029f337f10410b3a
|
ffba5c4a64a87214160c5904b220be8a6e88cd58
|
/python-packages/maizy_f/r.py
|
5e83d68deeca516eed184231752129e90e707f19
|
[] |
no_license
|
maizy/dev-setup
|
6e8ae5bc9b56dd85f7612b453e92e31043816189
|
5eb8473cf9c66c66ff8fd9e8c72cecf931f62494
|
refs/heads/master
| 2022-11-13T22:08:00.184435
| 2022-11-13T08:33:25
| 2022-11-13T08:33:25
| 7,286,016
| 2
| 0
| null | 2017-12-22T11:57:00
| 2012-12-22T13:20:57
|
Python
|
UTF-8
|
Python
| false
| false
| 361
|
py
|
# coding: utf-8
# Copyright (c) Nikita Kovaliov, maizy.ru, 2013
from __future__ import print_function, absolute_import, unicode_literals
from fabric.api import task, run
@task
def info():
run('uname -a')
run('hostname')
lsb_rel = run('which lsb_release')
if lsb_rel != '':
print('Debian like os found')
run('lsb_release -a')
|
[
"nikita@maizy.ru"
] |
nikita@maizy.ru
|
9290a1f679623cb6793f2eaef635da4b4689e597
|
6fce025097cebfd9d1dd37f6611e7fdfdbea90e6
|
/rainfields/model_conv.py
|
8a22d603ee800aed0a84aee26d7728f6111a7b66
|
[] |
no_license
|
ANU-WALD/pluvi_pondus
|
ec0439d19acdcf4fdf712d6b14a1714297d661b2
|
ff8680f7115ab2cb75138bf6705abb59618e47d1
|
refs/heads/master
| 2021-07-01T14:32:14.501631
| 2020-08-22T09:41:28
| 2020-08-22T09:41:28
| 138,804,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten
from tensorflow.keras.optimizers import Adam, SGD
import numpy as np
x = np.load("x_conv.npy")[:10000000]
print(x.shape)
y = np.load("y_conv.npy")[:10000000,None]
print(y.shape)
prec_mask = np.nonzero(y>0)
print(prec_mask)
print(len(prec_mask))
#print(prec_mask.shape)
#print(prec_mask[0])
print(prec_mask[0].shape)
x_prec = x[prec_mask[0], :]
y_prec = y[prec_mask[0], :]
print(x_prec.shape, y_prec.shape)
zero_mask = np.nonzero(y==0)
x_dry = x[zero_mask[0], :]
y_dry = y[zero_mask[0], :]
print(x_dry.shape, y_dry.shape)
idxs = np.arange(x_dry.shape[0])
np.random.seed(0)
np.random.shuffle(idxs)
n = x_prec.shape[0] * 2
x_dry = x_dry[idxs[:n],:]
y_dry = y_dry[idxs[:n],:]
print(x_dry.shape, y_dry.shape)
x = np.concatenate((x_prec, x_dry), axis=0)
y = np.concatenate((y_prec, y_dry), axis=0)
print(x.shape, y.shape)
idxs = np.arange(x.shape[0])
np.random.shuffle(idxs)
x = x[idxs,:]
x = np.reshape(x, (x.shape[0], -1))
y = y[idxs,:]
print(x.shape, y.shape)
model= Sequential()
model.add(Dense(100, activation='relu', input_dim=100))
model.add(Dense(200, activation='relu'))
model.add(Dense(400, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(1, activation='relu'))
"""
model= Sequential()
model.add(Conv2D(16, kernel_size=3, activation='relu', padding='same', input_shape=(5,5,4)))
model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same'))
model.add(Conv2D(64, kernel_size=3, activation='relu', padding='same'))
model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same'))
model.add(Flatten())
model.add(Dense(1, activation='relu'))
"""
x_train = x[:175000,:]
x_test = x[175000:,:]
y_train = y[:175000,:]
y_test = y[175000:,:]
print(y_train.shape, y_test.shape)
print(np.square(y_train).mean(axis=0))
print(np.square(y_test).mean(axis=0))
print(np.abs(y_train).mean(axis=0))
print(np.abs(y_test).mean(axis=0))
#classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.compile(optimizer=Adam(lr=0.000001), loss='mse', metrics=['mae', 'mse'])
model.fit(x_train, y_train, batch_size=32, nb_epoch=10, validation_data=(x_test, y_test))
|
[
"pablo.larraondo@anu.edu.au"
] |
pablo.larraondo@anu.edu.au
|
53b09cda44362a8837373232a16c18428dcb871d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02585/s497439083.py
|
c64c92fb3dffd1704057e5332cba0a7d2217f5e0
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
n,k = map(int,input().split())
P = list(map(int,input().split()))
C = list(map(int,input().split()))
g = [[0]*(n) for _ in range(n)]
A = [n]*n
# for i in range(n):
# tmp = 0
# idx = i
# cnt = 0
# set_ =set()
# while cnt<n:
# if C[idx] not in set_:
# tmp += C[idx]
# set_.add(C[idx])
# g[i][cnt] = tmp
# idx = P[idx]-1
# cnt += 1
# else:
# p = len(set_)
# A[i] = p
# break
ans = -float('inf')
for i in range(n):
S = []
idx = P[i]-1
S.append(C[idx])
while idx != i:
idx = P[idx]-1
S.append(S[-1] +C[idx])
v,w = k//len(S),k%len(S)
if k<=len(S):
val = max(S[:k])
elif S[-1]<=0:
val = max(S)
else:
val1 = S[-1] *(v-1)
val1 += max(S)
val2 = S[-1]*v
if w!=0:
val2 += max(0,max(S[:w]))
val = max(val1,val2)
ans = max(ans,val)
# for i in range(n):
# v,w = k//A[i],k%A[i]
# if A[i]<k:
# if g[i][A[i]-1]<=0:
# val = max(g[i][:A[i]])
# else:
# val1 = (v-1)*g[i][A[i]-1]
# val1 += max(g[i][:A[i]])
# val2 = v*g[i][A[i]-1]
# if w!=0:
# val2 += max(0,max(g[i][:w]))
# val = max(val1,val2)
# else:
# val = max(g[i][:k])
# ans = max(ans,val)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5f28d3473174758f29072135291cc13603f342ab
|
94bb77d0847df86ead773650cf4aa0885ed3ad4e
|
/dappcrowd/restapi/submissions_endpoint.py
|
d2fb9fe38acbfc271ba77225c557ec7a4ae17f5a
|
[] |
no_license
|
Tribler/dappcoder
|
3766f0b252ac38d889ad3596b5b6335669d31100
|
8ae43d51a284929bc081c87debc9ef003d1f9116
|
refs/heads/master
| 2020-04-01T07:53:38.236183
| 2019-01-17T13:07:53
| 2019-01-17T13:07:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,196
|
py
|
import json
from twisted.web import http
from twisted.web.server import NOT_DONE_YET
from dappcrowd.restapi.root_endpoint import DAppCrowdEndpoint
class SubmissionsEndpoint(DAppCrowdEndpoint):
def render_GET(self, request):
"""
Get all submissions.
"""
dappcrowd_overlay = self.get_dappcrowd_overlay()
return json.dumps({"submissions": dappcrowd_overlay.persistence.get_submissions()})
def getChild(self, path, request):
return SubmissionPKEndpoint(self.ipv8, self.ipfs_api, path)
def render_PUT(self, request):
"""
Create a new submission for an app request.
"""
parameters = http.parse_qs(request.content.read(), 1)
required_params = ['project_pk', 'project_id', 'submission']
for required_param in required_params:
if required_param not in parameters:
request.setResponseCode(http.BAD_REQUEST)
return json.dumps({"error": "missing parameter %s" % required_param})
def on_block_created(blocks):
request.write(json.dumps({"success": True}))
request.finish()
self.get_dappcrowd_overlay().create_submission(parameters['project_pk'][0].decode('hex'), parameters['project_id'][0], parameters['submission'][0]).addCallback(on_block_created)
return NOT_DONE_YET
class SubmissionPKEndpoint(DAppCrowdEndpoint):
def __init__(self, ipv8, ipfs_api, public_key):
DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api)
self.public_key = public_key.decode('hex')
def getChild(self, path, request):
return SpecificSubmissionEndpoint(self.ipv8, self.ipfs_api, self.public_key, path)
class SpecificSubmissionEndpoint(DAppCrowdEndpoint):
def __init__(self, ipv8, ipfs_api, public_key, submission_id):
DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api)
self.public_key = public_key
self.submission_id = submission_id
self.putChild("reviews", SpecificSubmissionReviewsEndpoint(ipv8, ipfs_api, public_key, submission_id))
def render_GET(self, request):
if not self.get_dappcrowd_overlay().persistence.has_submission(self.public_key, self.submission_id):
request.setResponseCode(http.NOT_FOUND)
return json.dumps({"error": "the submission is not found"})
return json.dumps({
"submission": self.get_dappcrowd_overlay().persistence.get_submission(self.public_key, self.submission_id)
})
class SpecificSubmissionReviewsEndpoint(DAppCrowdEndpoint):
def __init__(self, ipv8, ipfs_api, public_key, submission_id):
DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api)
self.public_key = public_key
self.submission_id = submission_id
def render_GET(self, request):
if not self.get_dappcrowd_overlay().persistence.has_submission(self.public_key, self.submission_id):
request.setResponseCode(http.NOT_FOUND)
return json.dumps({"error": "the submission is not found"})
return json.dumps({
"reviews": self.get_dappcrowd_overlay().persistence.get_reviews(self.public_key, self.submission_id)
})
|
[
"mdmartijndevos@gmail.com"
] |
mdmartijndevos@gmail.com
|
b4b798b4b5b230c3088019cf13bf6acb5fe54680
|
95d4c8f4cda7ad1c7623a2df02da8cee1ad0941d
|
/src/classifier/german_pos_tagger.py
|
a66192956c55f524f2b1142c45afdf0df24c1383
|
[
"MIT"
] |
permissive
|
tiefenauer/ip7-python
|
8f587b7b77430facff19b24441490756b01d5b17
|
512105ba39110ec77d2ea0961dd7c2a42d4ec26d
|
refs/heads/master
| 2021-09-09T20:37:21.647146
| 2018-02-20T12:07:46
| 2018-02-20T12:07:46
| 107,635,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,382
|
py
|
# -*- coding: utf-8 -*-
#
# Natural Language Toolkit: ClassifierBasedGermanTagger
#
# URL: <http://www.experimentallabor.de/>
#
# Copyright 2011 Philipp Nolte
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tag German text.
"""
import re
from nltk.tag.sequential import ClassifierBasedTagger
class ClassifierBasedGermanTagger(ClassifierBasedTagger):
"""A classifier based German part-of-speech tagger. It has an accuracy of
96.09% after being trained on 90% of the German TIGER corpus. The tagger
extends the NLTK ClassifierBasedTagger and implements a slightly modified
feature detector.
"""
def feature_detector(self, tokens, index, history):
"""Implementing a slightly modified feature detector.
@param tokens: The tokens from the sentence to tag.
@param index: The current token index to tag.
@param history: The previous tagged tokens.
"""
word = tokens[index]
if index == 0: # At the beginning of the sentence
prevword = prevprevword = None
prevtag = prevprevtag = None
# word = word.lower() # Lowercase at the beginning of sentence
elif index == 1:
prevword = tokens[index - 1] # Note: no lowercase
prevprevword = None
prevtag = history[index - 1]
prevprevtag = None
else:
prevword = tokens[index - 1]
prevprevword = tokens[index - 2]
prevtag = history[index - 1]
prevprevtag = history[index - 2]
if re.match('[0-9]+([\.,][0-9]*)?|[0-9]*[\.,][0-9]+$', word):
# Included "," as decimal point
shape = 'number'
elif re.compile('\W+$', re.UNICODE).match(word):
# Included unicode flag
shape = 'punct'
elif re.match('([A-ZÄÖÜ]+[a-zäöüß]*-?)+$', word):
# Included dash for dashed words and umlauts
shape = 'upcase'
elif re.match('[a-zäöüß]+', word):
# Included umlauts
shape = 'downcase'
elif re.compile("\w+", re.UNICODE).match(word):
# Included unicode flag
shape = 'mixedcase'
else:
shape = 'other'
features = {
'prevtag': prevtag,
'prevprevtag': prevprevtag,
'word': word,
'word.lower': word.lower(),
'suffix3': word.lower()[-3:],
# 'suffix2': word.lower()[-2:],
# 'suffix1': word.lower()[-1:],
'preffix1': word[:1], # included
'prevprevword': prevprevword,
'prevword': prevword,
'prevtag+word': '%s+%s' % (prevtag, word),
'prevprevtag+word': '%s+%s' % (prevprevtag, word),
'prevword+word': '%s+%s' % (prevword, word),
'shape': shape
}
return features
|
[
"git@tiefenauer.info"
] |
git@tiefenauer.info
|
73b8eea0e247cc2fb5986af3fd0beca8578749f2
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/E9FwvGyad5CDbiH4C_9.py
|
d750b3eefe4c93d1c5db878cb337dcc042cf9e95
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
"""
Create a function that takes a 2D array as an argument and returns the number
of people whose view is blocked by a tall person. The concert stage is pointed
towards the top of the 2D array and the tall person (represented by a 2)
blocks the view of all the people (represented by a 1) behind them.
### Examples
block([
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 2],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]
]) ➞ 2
# The tall person blocks 2 people behind him thus
# the function returns 2.
block([
[1, 2, 1, 1],
[1, 1, 1, 2],
[1, 1, 1, 1],
[1, 1, 1, 1],
]) ➞ 5
# There are 2 tall people that block everyone behind
# them. The first tall person in the first row blocks 3
# people behind him while the second tall person in
# the second row blocks 2 people behind him thus the
# function returns 5.
block([
[1, 1, 1, 1],
[2, 1, 1, 2],
[1, 1, 1, 1],
[1, 1, 1, 1],
]) ➞ 4
### Notes
1. There is only a maximum of 1 tall person in every column.
2. No view is blocked if the tall person is in the last row.
"""
def block(lst):
total = 0
for x in range(len(lst[0])):
for y in range(len(lst)-1, 0, -1):
if lst[y][x] < lst[y-1][x]:
total += len(lst) - y
return total
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
2d4b29a8afb8ba840c0c97a4e5296c98779b4382
|
1f696631898e0279951709e150da6d87045e4bc4
|
/mysite/blog/migrations/0003_auto_20201018_1329.py
|
55a90dffcfe644d3983ed6e04e389fcac44cd412
|
[] |
no_license
|
henryfrstr/django_project_blog
|
9f50b004b2fed59304c3f5a1f05247d44a232992
|
0f3c391a3cd790ae504fb84a786158a1d775abda
|
refs/heads/main
| 2022-12-31T05:05:20.989719
| 2020-10-25T11:26:27
| 2020-10-25T11:26:27
| 305,067,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# Generated by Django 3.0.8 on 2020-10-18 10:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20201018_1327'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='titles',
new_name='title',
),
]
|
[
"63148122+henryfrstr@users.noreply.github.com"
] |
63148122+henryfrstr@users.noreply.github.com
|
10177a53490eb98107c90432833b44de0dc5241f
|
36e3d735e06d0642f1e8c26bff57305a01cc627c
|
/nms/priClient/settings.py
|
70dc9c3a439ae7865ae186e64032e891229bbeb1
|
[] |
no_license
|
WilsonWangTHU/ipv6_server
|
5c768cdaeaf22ee508c5fff162b208481a42f95d
|
5088f58ab25061e65127699ed328ddaab24f9aac
|
refs/heads/master
| 2021-01-18T21:18:39.653994
| 2016-05-27T04:22:23
| 2016-05-27T04:22:23
| 55,656,523
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,819
|
py
|
"""
Django settings for subClient project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2+ytb#pjeh*g!9_3m(id@&mn$c+f56$q6fp=*%lkr)wp8hpfz%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'net_data'
]
MIDDLEWARE_CLASSES = [
# 'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'priClient.urls'
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
WSGI_APPLICATION = 'priClient.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = os.path.join(BASE_DIR, 'static/')
|
[
"wode406@hotmail.com"
] |
wode406@hotmail.com
|
ee692b4e30708d7c40cc7efe0f041f668c08dcb4
|
f63314b4852fb97ad740e53e450110fcd08a515b
|
/app.py
|
ce08afaac1bc0e74d4fe1216c543d83672b30fd1
|
[] |
no_license
|
xsomam/askfm-scrapper
|
ee0e2c4439d5be3e4ebd7fceb27d01fbff3aa4da
|
2e997268f40cd37dfc56bc7adc496d609106b327
|
refs/heads/master
| 2023-04-09T03:45:56.074797
| 2020-08-07T23:48:31
| 2020-08-07T23:48:31
| 384,990,560
| 0
| 0
| null | 2023-03-17T21:32:19
| 2021-07-11T15:56:16
| null |
WINDOWS-1250
|
Python
| false
| false
| 7,611
|
py
|
# Basic imports
import os
import time
import json
import logging
# Modules imports
import requests
from bs4 import BeautifulSoup
# Imports from files
from answer import SinglePost
from locators import *
logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y %H:%M:%S',
level=logging.INFO,
filename='logs.txt')
logger = logging.getLogger('app')
ask_url = 'https://ask.fm' #Base Ask URL
BASE_DIR = os.getcwd()
# List with pending profile links to scrape.
# First link is simply base URL: ask.fm/profile.
# After first visit, if there is "next" button on page (next page)
# Then this link is appended to this array and program continues scraping
_pending_list = []
# Array for each Singlequestion_obj object.
# Singlequestion_obj object contains every answer encapsulated within one question_obj
# With info about question, answer etc.
_question = []
# Retrieving function.
# First loops through profile, and stops looping if there are no more "next" pages left
# If there are none, starts writing text content to drive by iterating through questions array
def retrieve(askfm_nick, dl_img, to_csv, to_json):
logger.info('Running "retrieve" function.')
_n = 1
# Append base Askfm profile link to pending list to be scraped first
_pending_list.append(ask_url + '/' + askfm_nick)
# scraping będzie trwał tak długo, jak istnieje chociaż 1 link w liście "_pending_list".
# len = 0 oznacza, że nie ma więcej linków
# Trial of creating folder for downloaded content with user's nickname as name
try:
logger.info('Trial of creation of directory for scraping: '+ BASE_DIR)
os.mkdir(askfm_nick)
except Exception:
logger.info('FAIL of creation of directory for scraping: ' + BASE_DIR)
print('Directory already exists or another error happened. Skipping...')
pass
# Loop runs as long as there is some page to scrape.
# If there was no "next" page, loop ends
while len(_pending_list) > 0:
for link in _pending_list:
print(f'Connecting : {link}')
logger.info('Establishing connection to: ' + link)
# Get content of page, parse it with BS4
try:
site = requests.get(link).content
soup = BeautifulSoup(site, 'html.parser')
# Select each indivifual question on profile
all_questions = soup.select(SiteLocators.QUESTIONS)
except Exception:
print(f'Connection error at: {link}. Retrial in 5 seconds...')
# logger.info(f'Connection error at: {link}. Retrial in 5 seconds...')
time.sleep(5)
continue
# From array of questions we crate actual objects which will contain
# Only important data (like question, answer in text etc.)
# WHich we will perform operations on later
for question in all_questions:
question_obj = SinglePost(question)
_question.append(question_obj)
logger.info(f'Adding question #{_n}.')
# If given question had image, we access it and download it
if question_obj.image:
save_one_image(askfm_nick, question_obj)
_n += 1
# Remove already scraped profile from pedning list
_pending_list.remove(link)
print(f'{link} removed from temp...')
logger.info(f'{link} removed from temp.')
# If there is next page, we again start looping
next_page = soup.select_one(SiteLocators.NEXT)
logger.info('Retrieving next page link')
print('Retrieving next page link')
if next_page:
_pending_list.append(ask_url + next_page.attrs['href'])
logger.info(f"Link to next site appended to temp list: {ask_url}{next_page.attrs['href']}")
print(f"{ask_url}{next_page.attrs['href']} appending successful! Looping back...")
else:
logger.info(f'No "Next" link found. Retrieving done.')
print('No "Next" link found. Retrieving done.')
perform_file_operations(askfm_nick, dl_img, to_csv, to_json)
# Function dispatching file operations
def perform_file_operations(askfm_nick, dl_img, to_csv, to_json):
for each in _question:
if to_csv:
save_to_csv(askfm_nick)
if to_json:
save_to_json(askfm_nick)
def save_to_json(askfm_nick):
logger.info('Running "save_to_json" function.')
print('Saving to JSON')
_list = []
file_name = f'{askfm_nick}.json'
with open(os.path.join(BASE_DIR, askfm_nick,file_name), 'w') as file:
for each in _question:
json_dict = {
'question': each.question,
'answer': each.answer,
'likes': each.likes,
'time': each.date,
'link': each.link,
'img': f"{askfm_nick}-{each.link.split('/')[-1]}.{each.image_extension}" if each.image else None,
'asker_url': each.asker
}
_list.append(json_dict)
# if each.image:
# save_images(each.image_link, each.image_extension, each.link)
json.dump(_list, file, indent=4, ensure_ascii=True)
print(f'Saved to JSON: {file_name}')
def save_to_csv(askfm_nick):
logger.info('Running "save_to_csv" function.')
pass
def save_images(askfm_nick):
logger.info('Running "save_images" function.')
for each in _question:
if each.image:
print('Saving image....')
_photo_name = f"{askfm_nick}-{each.link.split('/')[-1]}.{each.image_extension}"
try:
logger.info('Trial of saving image begins.')
logger.info('Requesting image from: ' + each.image_link)
photo_file = requests.get(each.image_link).content
img_path = os.path.join(BASE_DIR, askfm_nick, _photo_name)
with open(img_path, 'wb') as f:
f.write(photo_file)
logger.info('Saving image to: ' + img_path)
print(f"Image saved: {_photo_name}")
except Exception:
print(f"Could not get image {_photo_name}. Skipping...")
logger.info('Error with saving image: ' + _photo_name)
pass
def save_one_image(askfm_nick, question_obj):
logger.info('Running "save_one_image" function.')
print('Saving image....')
_photo_name = f"{askfm_nick}-{question_obj.link.split('/')[-1]}.{question_obj.image_extension}"
try:
logger.info('Trial of saving image begins.')
logger.info('Requesting image from: ' + question_obj.image_link)
photo_file = requests.get(question_obj.image_link).content
img_path = os.path.join(BASE_DIR, askfm_nick,_photo_name)
with open(img_path, 'wb') as f:
f.write(photo_file)
logger.info('Saving image to: ' + img_path)
print(f"Image saved: {_photo_name}")
except Exception:
print(f"Could not get image {_photo_name}. Skipping...")
logger.info('Error with saving image: ' + _photo_name)
pass
|
[
"a@b.com"
] |
a@b.com
|
023d1d8dece7491ac60d165dac3295008bf0a004
|
6109a95a284891792c35d0d19906ab8d1697f9c7
|
/src/k8s-configuration/azext_k8s_configuration/vendored_sdks/v2021_05_01_preview/aio/operations/_operations.py
|
3c82397333fb155b013c9017f213f97b26d8e9e6
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Tatsinnit/azure-cli-extensions
|
3e5a1752edced00d7c33660027d2c17fae074569
|
a1959b123d4c11149adae2728ab5791949889d54
|
refs/heads/master
| 2022-10-05T17:40:10.825889
| 2022-03-16T10:33:56
| 2022-03-16T10:33:56
| 250,102,909
| 0
| 0
|
MIT
| 2020-03-25T22:12:01
| 2020-03-25T22:12:01
| null |
UTF-8
|
Python
| false
| false
| 4,949
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ResourceProviderOperationList"]:
"""List all the available operations the KubernetesConfiguration resource provider supports.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceProviderOperationList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ResourceProviderOperationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceProviderOperationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceProviderOperationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.KubernetesConfiguration/operations'} # type: ignore
|
[
"noreply@github.com"
] |
Tatsinnit.noreply@github.com
|
cee01a1f512e64d11d177b39003a6d66c4c62798
|
f375899369ba86aed1da89101c31817168ffec40
|
/cinema/urls.py
|
f73d16e3686a88eb9e7b40b41d324fb2021b3100
|
[] |
no_license
|
sheremilbekov/cinema
|
f596a7d1e41f03161a3ddf0c3594f39619c812df
|
d8baac5c7e25a90340a35e1e0b0cce093014f965
|
refs/heads/master
| 2023-03-30T10:19:20.654540
| 2021-04-07T12:10:54
| 2021-04-07T12:10:54
| 354,850,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
"""cinema URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('cooking/', include('main.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"you@example.com"
] |
you@example.com
|
16effac639ce13ca5ccf22f2cfad0658eac06638
|
547ba955855ff623a8ef6e80fcfaddebadf34bed
|
/Chapter08/B06246_08_14-slope.py
|
a5a8f6c01c4af2755f280d506db81016afd04122
|
[] |
no_license
|
CodedQuen/QGIS-Python-Programming-Cookbook
|
94a36d265d0336d5bb36ac02d637ba17ee765b04
|
f84e0159f7c8ec81a29573a7fd2e03b046efce33
|
refs/heads/master
| 2022-05-28T05:04:35.053121
| 2020-05-05T09:44:50
| 2020-05-05T09:44:50
| 261,414,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
# Computing Road Slope using Elevation Data
# https://github.com/GeospatialPython/Learn/raw/master/road.zip
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import processing
dem = "/qgis_data/road/dem.asc"
road = "/qgis_data/road/road.shp"
slope = "/qgis_data/road/slope.tif"
segRoad = "/qgis_data/road/segRoad.shp"
steepness = "/qgis_data/road/steepness.shp"
hillshade = "/qgis_data/road/hillshade.tif"
demLyr = QgsRasterLayer(dem, "DEM")
roadLyr = QgsVectorLayer(road, "Road", "ogr")
ext = demLyr.extent()
xmin = ext.xMinimum()
ymin = ext.yMinimum()
xmax = ext.xMaximum()
ymax = ext.yMaximum()
demBox = "{},{},{},{}".format(xmin,xmax,ymin,ymax)
processing.runalg("grass7:r.slope",dem,0,False,1,0,demBox,0,slope)
ext = roadLyr.extent()
xmin = ext.xMinimum()
ymin = ext.yMinimum()
xmax = ext.xMaximum()
ymax = ext.yMaximum()
roadBox = "{},{},{},{}".format(xmin,xmax,ymin,ymax)
processing.runalg("grass7:v.split.length",road,500,roadBox,-1,0.0001,0,segRoad)
slopeLyr = QgsRasterLayer(slope, "Slope")
segRoadLyr = QgsVectorLayer(segRoad, "Segmented Road", "ogr")
QgsMapLayerRegistry.instance().addMapLayers([segRoadLyr,slopeLyr], False)
processing.runalg("saga:addgridvaluestoshapes",segRoad,slope,0,steepness)
steepLyr = QgsVectorLayer(steepness, "Road Gradient", "ogr")
roadGrade = (
("Rolling Hill", 0.0, 20.0, "green"),
("Steep", 20.0, 40.0, "yellow"),
("Very Steep", 40.0, 90.0, "red"))
ranges = []
for label, lower, upper, color in roadGrade:
sym = QgsSymbolV2.defaultSymbol(steepLyr.geometryType())
sym.setColor(QColor(color))
sym.setWidth(3.0)
rng = QgsRendererRangeV2(lower, upper, sym, label)
ranges.append(rng)
field = "slopetif"
renderer = QgsGraduatedSymbolRendererV2(field, ranges)
steepLyr.setRendererV2(renderer)
processing.runalg("saga:analyticalhillshading",dem,0,158,45,4,hillshade)
hs = QgsRasterLayer(hillshade, "Terrain")
QgsMapLayerRegistry.instance().addMapLayers([steepLyr, hs])
|
[
"noreply@github.com"
] |
CodedQuen.noreply@github.com
|
9620af649f65a0c0002935d9e24ea87dd7578b35
|
b0cdab54c5e81681125c01801148c287605ee8d0
|
/speciality/migrations/0005_auto_20181228_2150.py
|
ad4d5210ff776dbc55eeccf74f5266e8a064ed44
|
[] |
no_license
|
lpd76/rdavid2
|
5528746749acc51d4d0f5efd77886929798e2569
|
18aa5120fe4ba0ea44f611dd52b008db52641f17
|
refs/heads/master
| 2020-04-13T20:47:58.141579
| 2019-01-17T16:51:31
| 2019-01-17T16:51:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
# Generated by Django 2.1.4 on 2018-12-28 21:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('speciality', '0004_auto_20181228_2140'),
]
operations = [
migrations.AlterModelOptions(
name='speciality',
options={'verbose_name_plural': 'specialities'},
),
migrations.AlterField(
model_name='specialitydetails',
name='speciality',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='speciality.Speciality'),
),
]
|
[
"louisphilippe.david@gmail.com"
] |
louisphilippe.david@gmail.com
|
b68987bce2f40abf5a5b3be0d046f49f02354bc8
|
650f9e246de38d0ceaee8726f27801f3337e24ce
|
/string-trainer/simple/csimple.py
|
7d3e49c4a22d4dd82a8951120a8f4e6961a58054
|
[
"MIT"
] |
permissive
|
paulscottrobson/old-trainer-archive
|
57b6fbb5886e5fe526c37f40b7fb9c179176bce7
|
b3659d3f36b7443594202e0ae9439e80e493a22c
|
refs/heads/master
| 2021-01-20T02:46:44.751050
| 2017-04-26T07:26:34
| 2017-04-26T07:26:34
| 89,451,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,861
|
py
|
# *****************************************************************************************
#
# Simple compiler for TAB1 format
#
# *****************************************************************************************
import re,sys
# *****************************************************************************************
# Compiler / Processor Exception
# *****************************************************************************************
class CompilerException(Exception):
def __init__(self,message):
self.message = message
Exception.__init__(self)
# *****************************************************************************************
# Strum class
# *****************************************************************************************
class Strum:
def __init__(self,strumDef,qbTime,voices,label = ""):
self.strum = strumDef
self.qbTime = qbTime
self.label = label
self.preRender = self.convertToRender(strumDef,voices)
def getStrum(self):
return self.strum
def getQuarterBeatTime(self):
return self.qbTime
def getLabel(self):
return self.label
def toString(self):
s = self.strum+"@"+str(self.time)
if self.label != "":
s = s + "("+self.label+")"
return s
def convertToRender(self,strum,voices):
strum = strum.upper().strip()
r = []
while strum != "":
if strum[0] == 'X':
r.append(-1)
strum = strum[1:]
elif strum[0] in Strum.FRETS:
diatonic = Strum.FRETS.index(strum[0])
r.append(Strum.TOCHROMATIC[diatonic % 7]+int(diatonic / 7) * 12)
strum = strum[1:]
if (strum+" ")[0] == '+':
r[-1] += 1
strum = strum[1:]
else:
raise CompilerException("Bad strum "+strum)
# first strum given is the treble so make it the last.
r.reverse()
# right pad
while len(r) < voices:
r.insert(0,-1)
return "".join([chr(x+97) if x >= 0 else "-" for x in r])
def render(self):
return self.preRender
Strum.TOCHROMATIC = [
0, 2, 4, 5, 7, 9, 10
# D E F# G A B C
]
Strum.FRETS = "0123456789TLWHF"
Strum.QBOFFSETS = { "O":8, "o":8, "-":-2, "=":-3, ".":2 }
# *****************************************************************************************
# Bar class
# *****************************************************************************************
class Bar:
def __init__(self,barNumber,beats,voices):
self.barNumber = barNumber
self.beats = beats
self.strums = []
self.voices = voices
self.qbPosition = 0
def add(self,strumDef,label = ""):
self.strums.append(Strum(strumDef,self.qbPosition,self.voices,label))
self.qbPosition += 4
return self
def toString(self):
s = "#{0} B:{1} V:{2} C:{3} {{".format(self.barNumber,self.beats,self.voices,len(self.strums))
s = s + " ".join([x.toString() for x in self.strums]) + "}"
return s
def isOffset(self,c):
return c in Strum.QBOFFSETS
def offset(self,c):
if not self.isOffset(c):
raise CompilerException("Unknown offset "+c)
self.qbPosition += Strum.QBOFFSETS[c]
def render(self):
r = ""
qbPosition = 0
for strum in self.strums:
qbElapsed = strum.getQuarterBeatTime() - qbPosition
while qbElapsed > 0:
amt = min(8,qbElapsed)
r = r + str(amt)
qbElapsed = qbElapsed - amt
r = r + strum.render()
qbPosition = strum.getQuarterBeatTime()
return r
# *****************************************************************************************
# Song Class
# *****************************************************************************************
class Song:
def __init__(self,sourceFile):
self.reset()
self.loadTab1(sourceFile)
self.compileBody()
if self.get("title") == "":
raise CompilerException("No title provided")
def reset(self):
self.bars = []
self.keys = { "title":"","author":"","beats":"4","tempo":"100", \
"version":"1", "tuning":"d3,a4,d4", "type":"dulcimer" }
def get(self,key):
return self.keys[key.strip().lower()]
def loadTab1(self,sourceFile):
# pre process file - tabs, spaces, comments
source = open(sourceFile).readlines()
source = [x if x.find("//") < 0 else x[:x.find("//")] for x in source]
source = [x.replace("\t"," ").strip() for x in source]
# key updates.
for assign in [x for x in source if x.find(":=") >= 0]:
assign = [x.strip() for x in assign.split(":=")]
if assign[0] == '"' and assign[-1] == '"':
assign = assign[1:-1]
self.keys[assign[0].lower()] = assign[1]
source = [x for x in source if x.find(":=") < 0]
self.source = source
def compileBody(self):
for line in range(0,len(self.source)):
if self.source[line] != "":
for barPart in [x.strip() for x in self.source[line].split("|") if x.strip() != ""]:
newBar = Bar(len(self.bars),int(self.get("beats")),3)
self.bars.append(newBar)
try:
self.compileTab1(newBar,barPart.upper())
except CompilerException as cEx:
newMsg = cEx.message+" @ "+str(line+1)
raise Exception(newMsg)
def compileTab1(self,bar,src):
while src != "":
m = re.match("^([X"+Strum.FRETS+"\\+]+)\\s*(.*)$",src)
if m is not None:
strum = m.group(1)
bar.add(strum)
src = m.group(2)
elif src[0] in Strum.QBOFFSETS:
bar.offset(src[0])
src = src[1:].strip()
else:
raise CompilerException("Unknown command "+src)
def exportToJSON(self,handle):
handle.write("{ \n")
keys = [x for x in self.keys.keys()]
keys.sort()
for k in keys:
handle.write(' {0:14}:"{1}",\n'.format('"'+k+'"',self.keys[k]))
handle.write(' "bars": [\n')
for n in range(0,len(self.bars)):
r = self.bars[n].render()
handle.write('{0:14}"{1}"{2}\n'.format("",r,"," if n < len(self.bars)-1 else ""))
handle.write("\n ]\n")
handle.write("} \n")
s = Song("twinkle.tab1")
s.exportToJSON(sys.stdout)
s.exportToJSON(open("../app/music.json","w"))
|
[
"paul@robsons.org.uk"
] |
paul@robsons.org.uk
|
10252106e1b1114e8e4adf56f12d670ac5aee1e0
|
397c9e2743c41cf591692c4fc37f43a9070119bd
|
/build/env/lib/python2.7/site-packages/SQLAlchemy-1.2.0b3-py2.7-linux-x86_64.egg/sqlalchemy/cutils.py
|
a62e8adc17fa043f78d6b8b32d3c703fd2682408
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/myhue
|
cf41238c782d12b3a1a0ee9ef70196359bb67894
|
5f566970a5a1fa5af9f01832c9e9808c47634bc7
|
refs/heads/master
| 2022-11-18T05:37:24.467150
| 2019-11-23T16:16:22
| 2019-11-23T16:16:22
| 282,390,507
| 0
| 0
|
Apache-2.0
| 2020-07-25T07:03:40
| 2020-07-25T07:03:39
| null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'cutils.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"352322399@qq.com"
] |
352322399@qq.com
|
93c4453f26512207811cdba404053b9a07b2e9c1
|
b68887f55cfcd0225d732acfbfcc7f3724e49d5d
|
/pages/factories.py
|
a59431ddc5e91da1966fb1ba58c8d6ad49dcbfb0
|
[
"MIT"
] |
permissive
|
rds0751/nhsuk-content-store
|
0ac7eb06f85cc97cd57e58a3f24e19db9991a8a2
|
7bd6a386e3583779ddba2347a4b3a80fdf75b368
|
refs/heads/master
| 2020-04-19T08:53:54.273378
| 2019-01-29T05:08:18
| 2019-01-29T05:08:18
| 168,092,530
| 0
| 0
| null | 2019-01-29T05:05:33
| 2019-01-29T05:05:33
| null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
import factory
from home.factories import HomePageFactory, ParentBasedFactory
from . import models
class ConditionsPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory):
title = 'Conditions'
slug = 'conditions'
path = '000100010002'
depth = 3
_ParentFactory = HomePageFactory
_unique = True
class Meta:
model = models.FolderPage
class ConditionPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory):
path = factory.Sequence(lambda n: '000100010002%04d' % (n + 1))
depth = 4
_ParentFactory = ConditionsPageFactory
_unique = False
class Meta:
model = models.EditorialPage
class SymptomsPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory):
title = 'Symptoms'
slug = 'symptoms'
path = '000100010001'
depth = 3
_ParentFactory = HomePageFactory
_unique = True
class Meta:
model = models.FolderPage
class SymptomPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory):
path = factory.Sequence(lambda n: '000100010001%04d' % (n + 1))
depth = 4
_ParentFactory = SymptomsPageFactory
_unique = False
class Meta:
model = models.EditorialPage
class ConditionFolderPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory):
path = factory.Sequence(lambda n: '000100010002%04d' % (n + 1))
depth = 4
_ParentFactory = ConditionsPageFactory
_unique = False
class Meta:
model = models.FolderPage
|
[
"marcofucci@gmail.com"
] |
marcofucci@gmail.com
|
6acd9d44dc1191828b5807335b648d30c0e9194d
|
0eda43d797abfc69ad28000b3c3599af44049bdf
|
/setup.py
|
21f2ea356d1b8e4b3e0b98a7bd61d346e529cf0b
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
biomodels/BIOMD0000000048
|
d8d23b0491ac80e27692b6e115b9884ee46397d6
|
6d17577fdde45ed5c0ec8457eacb860458e30215
|
refs/heads/master
| 2021-01-18T14:19:32.446581
| 2014-10-16T05:18:50
| 2014-10-16T05:18:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
from setuptools import setup, find_packages
setup(name='BIOMD0000000048',
version=20140916,
description='BIOMD0000000048 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000048',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
[
"stanleygu@gmail.com"
] |
stanleygu@gmail.com
|
47f0abfaceb11e660d4f305e745db7fe9fee819f
|
da84fa23cc4cf2e81e50892085ac162508bff155
|
/nestris_ocr/capturing/linux/linux_mgr.py
|
6381209b8dd85f1880aca2b7eb9cbc653ec9f0cd
|
[] |
no_license
|
alex-ong/NESTrisOCR
|
83ddaba55b100f0ee20e924731459e547e321887
|
488beeb30e596ccd0548152e241e1c6f772e717b
|
refs/heads/master
| 2023-01-10T08:02:41.702538
| 2021-07-07T06:33:39
| 2021-07-07T06:33:39
| 169,196,192
| 25
| 8
| null | 2022-12-27T15:37:40
| 2019-02-05T05:44:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
import Xlib
import Xlib.display
from Xlib import X
class WindowMgr:
"""Encapsulates some calls for window management"""
def __init__(self, hwnd=None):
self.handle = hwnd
def checkWindow(self, hwnd):
"""checks if a window still exists"""
return hwnd
def getWindows(self):
"""
Return a list of tuples (handler, window name) for each real window.
"""
windows = []
def getWindowHierarchy(window, windows):
children = window.query_tree().children
for w in children:
try:
w.get_image(0, 0, 1, 1, X.ZPixmap, 0xFFFFFFFF)
windows.append(
(
w.id,
w.get_wm_class()[1] if w.get_wm_class() is not None else "",
)
)
except Xlib.error.BadMatch:
pass
finally:
windows = getWindowHierarchy(w, windows)
return windows
root = Xlib.display.Display().screen().root
windows = getWindowHierarchy(root, windows)
return windows
|
[
"the.onga@gmail.com"
] |
the.onga@gmail.com
|
11065362a8ac77972c519aadeae585300bb5085d
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_25/models/active_directory_get_response.py
|
ee529854e041a3ff612ccf174315845d4e2c49ef
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,613
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.25
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class ActiveDirectoryGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ActiveDirectory]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ActiveDirectory]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ActiveDirectory]): A list of Active Directory computer account configuration objects.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ActiveDirectoryGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ActiveDirectoryGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"noreply@github.com"
] |
PureStorage-OpenConnect.noreply@github.com
|
e0cf8c5298a8ee4e8a3b21eb3b1fe65504c3047e
|
204ec78fcebcea9e1e1da4905cf3fad0a514b01f
|
/test/unit/test_timeout.py
|
4990b87aad1b2d40888f75acc3481c349d3eb4e0
|
[
"Apache-2.0"
] |
permissive
|
ARMmbed/pyOCD
|
659340bf8753aa8e15a72890b8bea64dff2c2f42
|
d4cdcf7e532cae17caad866839287bbe1e0d952b
|
refs/heads/master
| 2023-05-31T13:45:15.797588
| 2020-10-12T13:55:47
| 2020-10-12T13:55:47
| 190,203,829
| 3
| 1
|
Apache-2.0
| 2019-07-05T11:05:40
| 2019-06-04T13:09:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
# pyOCD debugger
# Copyright (c) 2017-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import (time, sleep)
import pytest
from pyocd.utility.timeout import Timeout
class TestTimeout:
def test_no_timeout(self):
with Timeout(0.05) as to:
cnt = 0
while to.check():
sleep(0.01)
cnt += 1
if cnt == 4:
break
else:
assert False
assert not to.did_time_out
def test_timeout_a(self):
s = time()
with Timeout(0.05) as to:
while to.check():
sleep(0.01)
assert to.did_time_out
assert (time() - s) >= 0.05
def test_timeout_b(self):
timedout = False
s = time()
with Timeout(0.05) as to:
cnt = 0
while cnt < 10:
if to.did_time_out:
timedout = True
sleep(0.02)
cnt += 1
assert timedout
assert to.did_time_out
assert (time() - s) >= 0.05
def test_timeout_c(self):
timedout = False
with Timeout(0.05) as to:
cnt = 0
while cnt < 10:
if to.did_time_out:
timedout = True
cnt += 1
assert not timedout
assert not to.did_time_out
|
[
"flit@me.com"
] |
flit@me.com
|
4a1c309a93de9647a0f1adc90e88ad9c8624b3be
|
2b8c88dfee5c5a784357515eafe8cd5f997c8774
|
/leetcode/dynamic_programming/code-84.py
|
1b9e7013a5652e79e6603e09d069daf7eb6aa134
|
[] |
no_license
|
archenRen/learnpy
|
e060f3aa2f77c35fc1b12345720af6c8b528da57
|
934ef76b97297f746a722a48c76672c7bc744cd9
|
refs/heads/master
| 2022-04-28T20:25:59.114036
| 2020-05-03T02:16:03
| 2020-05-03T02:16:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
# This is a TLE solution.
def largestRectangleArea2(heights: 'List[int]') -> int:
n = len(heights)
max_val = 0
for i in range(n):
min_val = heights[i]
max_val = max(max_val, min_val)
for j in range(i-1, -1, -1):
min_val = min(heights[j], min_val)
max_val = max(max_val, min_val * (i - j + 1))
return max_val
def largestRectangleArea(heights: 'List[int]') -> int:
# The stack maintain the indexes of buildings with ascending height.
n = len(heights)
heights.append(0)
stack = []
ans = 0
i = 0
while i <= n:
if not stack or heights[i] >= heights[stack[-1]]:
stack.append(i)
else:
tp = stack.pop()
if stack:
ans = max(ans, heights[tp] * (i - stack[-1] - 1))
else:
ans = max(ans, heights[tp] * i)
i -= 1
i += 1
return ans
# print(largestRectangleArea([2, 1, 5, 6, 2, 3])) # expect 10 (2*5)
# print(largestRectangleArea([2, 1, 3, 6, 2, 3]))# expect 8 (4*2)
# print(largestRectangleArea([2,3]))
# print(largestRectangleArea([3]))
print(largestRectangleArea(list(range(10))))
|
[
"wangdi03@ppdai.com"
] |
wangdi03@ppdai.com
|
610ac8671393a3cc93c8ac2f5fb7cbe982e9e849
|
96090102d5e87f1771ba5a90f7b676f4ccb0afa6
|
/src/profiles/forms.py
|
ef4d8adbf95e2f2acf6f725493fe0bef6afcef2b
|
[] |
no_license
|
rahulsayon/SocialWedding
|
b4b37ad69b89236784c6fb983ab27b4cd2e4266e
|
ab96b6a5d381936463065e75f74d0c8ffd3b1907
|
refs/heads/master
| 2022-12-18T15:08:39.380348
| 2020-09-27T14:49:40
| 2020-09-27T14:49:40
| 299,053,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
from django import forms
from . models import Profile
class ProfileModalForm(forms.ModelForm):
class Meta:
model = Profile
fields = [ 'first_name','last_name','bio','avatar' ]
|
[
"rahulsayon95@gmail.com"
] |
rahulsayon95@gmail.com
|
ffd4ff39507434f06cbbc5a0767aeadf66cdf5a4
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/idea/party/issue/line_friend/group_lot_guy_lombok_kind/door/oauth.py
|
555d4e970019c6d7f81128a63b321c2efb7bdedb
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,672
|
py
|
const request = require('request')
const uuidv4 = require('uuid/v4')
const { LimitReachedError } = require('./errors')
const ITEMS_IN_REQUEST_LIMIT = 25
const REQUEST_CHAR_LIMIT = 5000
const CHAR_PER_HOUR_LIMIT = 2000000
// const subscriptionKey = process.env.TRANSLATOR_TEXT_KEY;
// if (!subscriptionKey) {
// throw new Error('Environment variable for your subscription key is not set.')
// }
const subscriptionKey = 'a674785ff843a278a87995ef4ee1659b'
function MicrosoftTranslator () {}
MicrosoftTranslator.prototype.translate = function (strings, targetLang) {
console.log(`Microsoft: translating ${strings.length} strings to ${targetLang}...`)
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
to: targetLang
},
headers: {
'5bb321a1b738949e8bace956a490028a': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: strings.map(str => ({ text: str })),
json: true
}
return new Promise((resolve, reject) => {
request(options, (err, res, body) => {
if (err) {
reject(err)
return
}
if (body.error) {
console.log('body', body)
if (body.error.code === 400077) {
reject(new LimitReachedError('Microsoft', 'Maximum request size'))
} else if (body.error.code === 403001) {
reject(new LimitReachedError('Microsoft', 'Quota per hour'))
} else {
reject(new Error(body.error.message))
}
} else {
let translations = body
.reduce((accum, item) => accum.concat(item.translations), [])
.map(i => i.text)
resolve(translations)
}
})
}).then(translations => {
console.log(`Microsoft: Translation succeed. Got ${translations.length} translations.`)
return translations
})
}
MicrosoftTranslator.prototype.getRequestLimit = function () {
return REQUEST_CHAR_LIMIT
}
MicrosoftTranslator.prototype.getRequestItemsCountLimit = function () {
return ITEMS_IN_REQUEST_LIMIT
}
MicrosoftTranslator.prototype.getMaxLimit = function () {
return CHAR_PER_HOUR_LIMIT
}
module.exports = MicrosoftTranslator
// new MicrosoftTranslator()
// .translate([(new Array(5001)).join('a'), 'b'], 'ru')
// .then(translations => console.log('Result', translations))
// .catch(err => console.error(err))
/*
* Limits: https://docs.microsoft.com/en-us/azure/cognitive-services/translator/request-limits
* https://docs.microsoft.com/en-us/azure/cognitive-services/translator/reference/v3-0-translate?tabs=curl
* */
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
7094d4bbe7a500eb46faa9fac35c316ada1389af
|
77fc5af96da1d461c86c7f9668b64b99ca04a1b6
|
/codes/horner.py
|
4458f960d38c57f60ba6940082b190afccdbd331
|
[] |
no_license
|
rene-d/edupython
|
5b6bc8ddb5eb8ec896ee70fb961d4e689af1075a
|
1261d0c7aae17bb2d4ff3370860768b73ba4172d
|
refs/heads/master
| 2020-11-24T10:07:18.504472
| 2019-12-21T21:03:08
| 2019-12-21T21:03:08
| 228,099,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
# Méthode de Hörner
# https://edupython.tuxfamily.org/sources/view.php?code=horner
# Créé par IANTE, le 12/07/2011
from lycee import *
P=liste_demande('entrez les coefficients de P(x) par ordre des puissances croissantes')
r=demande('Entrez une racine évidente')
Q=[0]*(len(P)-1)
v=0
for d in range(len(P)-2,-1,-1):
v=P[d+1]+r*v
Q[d]=v
print (affiche_poly(P)+'=('+affiche_poly([-r,1])+')('+affiche_poly(Q)+')')
|
[
"rene.devichi@gmail.com"
] |
rene.devichi@gmail.com
|
39d5e277eb935eee8876c1af0b0557edcf5f6b91
|
146012dda21ab72badad6daa8f98e6b26fedb128
|
/13day/9-名片系统.py
|
c647a413c352cb726036cb58e94329648c26b284
|
[] |
no_license
|
fengshuai1/1805
|
41786c3561beca580ba82d9e9d4347571e38e198
|
8dc3e6605cc1d6f91685ae45bfebfc062f0aa489
|
refs/heads/master
| 2020-03-19T07:41:40.608389
| 2018-06-28T01:45:43
| 2018-06-28T01:45:43
| 136,140,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
list = []#存放名字
print("名片管理系统".center(50,"*"))
while True:
print("1:添加名片".center(50," "))
print("2:查找名片".center(50," "))
print("3:修改名片".center(50," "))
print("4:删除名片".center(50," "))
print("5:打印名片".center(50," "))
num = int(input("请选择功能"))
if num == 1:
d = {}#空字典
while True:
name = input("请输入要添加的名字")
if len(name) > 4:
print("太长,请重新输入")
continue
job = input("请输入要添加的职位")
if len(job) > 4:
print("太长,请重新输入")
continue
phone = input("请输入手机号")
if len(phone) != 11 or phone.startswith("1") == False:
print("手机号输入有误,请重新输入")
continue
d["name"] = name
d["job"] = job
d["phone"] = phone
#添加到列表
list.append(d)
print("添加成功")
break
elif num == 2:
name = input("请输入要查找的姓名")
flag = False#假设没有咱们要找的人
for i in list:
if name == i["name"]:
print("姓名:%s\n职位:%s\n电话:%s"%(i["name"],i["job"],i["phone"]))
flag = True#找到了
break
if flag == False:
print("查无此人")
elif num == 3:
#要改之前,你得先查到你要找的那个
name = input("请输入你要改的人的姓名")
flag = False
for i in list:
if name == i["name"]:
print("1:修改名字")
print("2:修改职位")
print("3:修改电话")
num_1 = int(input("请选择功能"))
if num_1 == 1:
new_name = input("请输入新的名字")
i["name"] = new_name
elif num_1 == 2:
new_job = input("请输入新的职位")
i["job"] = new_job
elif num_1 == 3:
new_phone = input("请输入新的电话")
i["phone"] = new_phone
flag = True
break
if flag == False:
print("查无此人")
elif num == 4:
name = input("请输入你要删除的名字")
flag = False
for position,i in enumerate(list):#把索引遍历出来
if name == i["name"]:
flag = True#找到了
print("1:确认删除")
print("2:取消删除")
num_2 = int(input("请选择序号"))
if num_2 == 1:
list.pop(position)#直接删除
break
if flag == False:
print("查无此人")
elif num == 5:#打印名片
print("名字\t职位\t电话")
for i in list:
print(" "+i["name"]+"\t "+i["job"]+"\t "+i["phone"])
|
[
"1329008013@qq.com"
] |
1329008013@qq.com
|
c441941156bd0808bc93eb34a0c6ef9a076dbaee
|
06164402e4a9c46a03d579175e588519dbd4048d
|
/experiments/experiments_gdsc/cross_validation/vb_nmf/linesearch_xval_vb.py
|
013c70a9a0481ff098be2e4b97b6fb3098dc6e91
|
[
"Apache-2.0"
] |
permissive
|
XuanHeIIIS/BNMTF
|
19547e36466ecee8d45fb0002d305ee6b7ba6c23
|
34df0c3cebc5e67a5e39762b9305b75d73a2a0e0
|
refs/heads/master
| 2020-03-27T12:47:58.375964
| 2018-06-10T10:22:19
| 2018-06-10T10:22:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
"""
Run the cross validation with line search for model selection using VB-NMF on
the Sanger dataset.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../../"
sys.path.append(project_location)
import numpy, random
from BNMTF.code.models.bnmf_vb_optimised import bnmf_vb_optimised
from BNMTF.code.cross_validation.line_search_cross_validation import LineSearchCrossValidation
from BNMTF.data_drug_sensitivity.gdsc.load_data import load_gdsc
# Settings
standardised = False
iterations = 1000
init_UV = 'random'
K_range = [15,20,25,30]
no_folds = 10
restarts = 1
quality_metric = 'AIC'
output_file = "./results.txt"
alpha, beta = 1., 1.
lambdaU = 1./10.
lambdaV = 1./10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
# Load in the Sanger dataset
(_,X_min,M,_,_,_,_) = load_gdsc(standardised=standardised,sep=',')
# Run the cross-validation framework
#random.seed(42)
#numpy.random.seed(9000)
nested_crossval = LineSearchCrossValidation(
classifier=bnmf_vb_optimised,
R=X_min,
M=M,
values_K=K_range,
folds=no_folds,
priors=priors,
init_UV=init_UV,
iterations=iterations,
restarts=restarts,
quality_metric=quality_metric,
file_performance=output_file
)
nested_crossval.run()
"""
all_MSE = [2.2242309355503416, 2.3108126630384804, 2.4095896447817631, 2.2188694213830114, 2.4185938516134278, 2.1808748510586002, 2.2503432196374651, 2.2305023229025145, 2.3595465204422488, 2.2186318302878667]
all_R2 = [0.8123419361488506, 0.8011409466575017, 0.7943028271877304, 0.8125046212085996, 0.7934881370166628, 0.8111969927756486, 0.8058878338360765, 0.811089129626958, 0.798953276136085, 0.8151865445946502]
Average MSE: 2.2821995260695718 +- 0.0066998949966021598
Average R^2: 0.80560922451887629 +- 5.8495363723835686e-05
"""
|
[
"tab43@cam.ac.uk"
] |
tab43@cam.ac.uk
|
dda125c8083666e799a4bccbfac1e27a51202a18
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/ec_13284-2532/sdB_EC_13284-2532_lc.py
|
dfa44dd24cbfe5cc7255aa0893f9c5a3ba440b9b
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[202.803875,-25.791181], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_EC_13284-2532 /sdB_EC_13284-2532_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
d9bdb178ecc13cd0d02f628d51c3fc104d950945
|
0ddcfcbfc3faa81c79e320c34c35a972dab86498
|
/puzzles/power_of_three.py
|
0c90784597ced25c72515a818f2ab265938bf1d4
|
[] |
no_license
|
IvanWoo/coding-interview-questions
|
3311da45895ac4f3c394b22530079c79a9215a1c
|
1312305b199b65a11804a000432ebe28d1fba87e
|
refs/heads/master
| 2023-08-09T19:46:28.278111
| 2023-06-21T01:47:07
| 2023-06-21T01:47:07
| 135,307,912
| 0
| 0
| null | 2023-07-20T12:14:38
| 2018-05-29T14:24:43
|
Python
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
# https://leetcode.com/problems/power-of-three/
"""
Given an integer n, return true if it is a power of three. Otherwise, return false.
An integer n is a power of three, if there exists an integer x such that n == 3x.
Example 1:
Input: n = 27
Output: true
Example 2:
Input: n = 0
Output: false
Example 3:
Input: n = 9
Output: true
Constraints:
-231 <= n <= 231 - 1
Follow up: Could you solve it without loops/recursion?
"""
from math import log
def is_power_of_three(n: int) -> bool:
if n <= 0:
return False
val = round(log(n, 3))
return 3**val == n
def is_power_of_three(n: int) -> bool:
def helper(n: int):
if n <= 0:
return False
if n == 1:
return True
div, residual = divmod(n, 3)
if residual:
return False
return helper(div)
return helper(n)
|
[
"tyivanwu@gmail.com"
] |
tyivanwu@gmail.com
|
566fdde94b7a27a1ac308ac870b09e58209d60fc
|
2827d7a837eb29c3cb07793ab6d3d5a753e18669
|
/alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetAppendRequest.py
|
3e1af80821fc15b93a0a4328c878c0180e7b136d
|
[
"Apache-2.0"
] |
permissive
|
shaobenbin/alipay-sdk-python
|
22e809b8f5096bec57d2bb25414f64bdc87fa8b3
|
5232ad74dff2e8a6e0e7646ab3318feefa07a37d
|
refs/heads/master
| 2020-03-21T04:51:39.935692
| 2018-06-21T07:03:31
| 2018-06-21T07:03:31
| 138,131,022
| 0
| 0
| null | 2018-06-21T06:50:24
| 2018-06-21T06:50:24
| null |
UTF-8
|
Python
| false
| false
| 4,058
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDiscountBudgetAppendModel import AlipayMarketingCampaignDiscountBudgetAppendModel
class AlipayMarketingCampaignDiscountBudgetAppendRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDiscountBudgetAppendModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDiscountBudgetAppendModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.discount.budget.append'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
22eb63305890280ff00427e395dc7ee12f3f314c
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4330/codes/1594_1800.py
|
0302eb5caf63f16066aa6406b53455d42458aa87
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
a=int(input("Insira o valor de A"))
b=int(input("Insira o valor de B"))
c=int(input("Insira o valor de C"))
x = (a**2)+(b**2)+(c**2)
y = a+b+c
t=x/y
print(round(t,7))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
c9580567614da5bed9f9c744137f3d463eb77515
|
dac7d0abff54dbeb9e6587f17866a34b5e7f3948
|
/Cobbity/compare.py
|
ec3b6cf07d175832a7fb04e914de1c0c894bf84c
|
[] |
no_license
|
KipCrossing/EMI_Field
|
5665aba5ff5fbf4a4d42fc9b3efc9aa3b3f51eea
|
e52142648388a25d26f682986c586cd1827e31e0
|
refs/heads/master
| 2020-05-22T12:37:42.892290
| 2019-09-12T01:27:24
| 2019-09-12T01:27:24
| 186,342,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
import pandas as pd
df_OpenEM = pd.read_csv("~/Cobbity/Output/Smooth_C2_OpenEM.xyz", header=None, delimiter=r"\s+")
df_DUALEM = pd.read_csv("~/Cobbity/Output/Smooth_C2_DUALEM.xyz", header=None, delimiter=r"\s+")
print(df_OpenEM.head())
print(df_DUALEM.head())
New_OpenEM_readings = []
New_OpenEM_lon = []
New_OpenEM_lat = []
sum = 0
for read in df_OpenEM[2].tolist():
if read > -9999:
New_OpenEM_readings.append(read)
New_OpenEM_lon.append(df_OpenEM[0].tolist()[sum])
New_OpenEM_lat.append(df_OpenEM[1].tolist()[sum])
sum += 1
print(len(New_OpenEM_lon),len(New_OpenEM_lat),len(New_OpenEM_readings))
New_DUALEM_readings = []
New_DUALEM_lon = []
New_DUALEM_lat = []
sum = 0
for read in df_DUALEM[2].tolist():
if read > -9999:
New_DUALEM_readings.append(read)
New_DUALEM_lon.append(df_DUALEM[0].tolist()[sum])
New_DUALEM_lat.append(df_DUALEM[1].tolist()[sum])
sum += 1
print(len(New_DUALEM_lon),len(New_DUALEM_lat),len(New_DUALEM_readings))
data = {"DUALEM": New_DUALEM_readings,"OpenEM": New_OpenEM_readings,"X1":New_DUALEM_lon,"X2":New_OpenEM_lon,"Y1":New_DUALEM_lat,"Y2":New_OpenEM_lat}
df_out = pd.DataFrame(data, columns=["DUALEM","OpenEM","X1","X2","Y1","Y2"])
df_out.to_csv("~/Cobbity/Output/compare_Smooth_DUALEM_OpenEm.csv")
count = 0
for i in New_DUALEM_lon:
if New_DUALEM_lon[count] == New_OpenEM_lon[count] and New_DUALEM_lat[count] == New_OpenEM_lat[count]:
print(count)
count += 1
|
[
"kip.crossing@gmail.com"
] |
kip.crossing@gmail.com
|
d839e4467adb97c603f1bbf720207d83942d87d2
|
46267e38d63bb487ccef4612593676412ea956d7
|
/astraeus/core.py
|
268d58bf9ad346c038f6b1a1989ccc7a00c0339b
|
[
"MIT"
] |
permissive
|
eos-sns/astraeus
|
17f63fc02e27b8b40b8470fb8202b9bb4b50e3d6
|
bbbe820bdc02d7c0209854b80b1f952bfaaf984a
|
refs/heads/master
| 2020-04-25T12:56:35.666259
| 2019-09-18T12:15:04
| 2019-09-18T12:15:04
| 172,793,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,379
|
py
|
# -*- coding: utf-8 -*-
import abc
import datetime
import uuid
from astraeus.models.memcache import MemcacheClientBuilder, MemcacheFacade
from astraeus.models.mongodb import MongoDBBuilder
class Hasher:
""" Something that hashes something """
@abc.abstractmethod
def hash_key(self, key):
return 0
class UUIDHasher(Hasher):
""" Hashing based on UUID4 """
def hash_key(self, key=None):
hashed = str(uuid.uuid4())
hashed = hashed.replace('-', '')
return hashed
class Astraeus(object):
""" Saves in-memory data about stuff """
MEMCACHE_PORT = 11211 # default memcache port
EXPIRE_SECONDS = ((60 * 60) * 24) * 14 # 14 days
def __init__(self,
port=MEMCACHE_PORT,
expire_seconds=EXPIRE_SECONDS,
hash_function=UUIDHasher().hash_key):
"""
:param port: port where memcache runs
:param expire_seconds: values in memcache will be null after that
:param hash_function: function to compute hash of key
"""
client = MemcacheClientBuilder() \
.with_server('localhost') \
.with_port(port) \
.build()
self.memcache = MemcacheFacade(client, expire_seconds)
self.hasher = hash_function # function to hash stuff
def _get_key(self, val):
return self.hasher(str(val)) # todo better jsonify ?
def save(self, val):
"""
:param val: Saves val in memcache database
:return: key of memcache
"""
assert not (val is None)
key = self._get_key(val)
if self.memcache.set(key, val):
return key
return None
def retrieve(self, key):
assert not (key is None)
return self.memcache.get(key)
class MongoAstraeus(Astraeus):
""" Normal Astraeus, but saves data also in MongoDB for reduncancy
reasons """
MONGO_DB = 'astraeus' # todo move to config
def _get_parent(self):
return super(self.__class__, self)
def __init__(self,
mongo_collection,
mongo_db=MONGO_DB,
port=Astraeus.MEMCACHE_PORT,
expire_seconds=Astraeus.EXPIRE_SECONDS,
hash_function=UUIDHasher().hash_key):
super(self.__class__, self).__init__(port, expire_seconds, hash_function)
mongo = MongoDBBuilder() \
.with_db(mongo_db) \
.build()
self.mongo = mongo[mongo_collection] # specify collection
def _try_save_to_memcache(self, val):
try:
return self._get_parent().save(val)
except:
print('Cannot save {} to memcache'.format(val))
return None
def _try_save_to_mongodb(self, memcache_key, val):
if not memcache_key:
memcache_key = self._get_key(val)
try:
item = self.build_mongo_item(memcache_key, val)
self.mongo.insert_one(item)
return memcache_key
except:
print('Cannot save {} to mongodb'.format(val))
return None
def save(self, val):
key = self._try_save_to_memcache(val) # first save to memcache ...
key = self._try_save_to_mongodb(key, val) # ... then in mongo
return key
def _try_retrieve_from_memcache(self, key):
try:
return self._get_parent().retrieve(key)
except:
print('Cannot retrieve {} from memcache'.format(key))
return None
def _try_retrieve_from_mongodb(self, key):
try:
results = self.mongo.find({'key': key})
if results:
most_recent = max(results, key=lambda x: x['time']) # sort by date
return most_recent['val'] # DO NOT check expiration: this is a redundant database
except:
print('Cannot retrieve {} from mongodb'.format(key))
return None
def retrieve(self, key):
val = self._try_retrieve_from_memcache(key) # first try with memcache ...
if not val:
return self._try_retrieve_from_mongodb(key) # ... then with mongo
return val
@staticmethod
def build_mongo_item(key, val):
time_now = datetime.datetime.now()
return {
'key': key,
'val': val,
'time': time_now
}
|
[
"sirfoga@protonmail.com"
] |
sirfoga@protonmail.com
|
19d14b124965f2f461568792ad34bb6bbd4dc10d
|
5fe72bb13baf3649058ebe11aa86ad4fc56c69ed
|
/hard-gists/367ff95d4d3d3770fa7b/snippet.py
|
6cd51cef4fd2bff70541bd8d5ea0c23646114dd5
|
[
"Apache-2.0"
] |
permissive
|
dockerizeme/dockerizeme
|
8825fed45ff0ce8fb1dbe34959237e8048900a29
|
408f3fa3d36542d8fc1236ba1cac804de6f14b0c
|
refs/heads/master
| 2022-12-10T09:30:51.029846
| 2020-09-02T13:34:49
| 2020-09-02T13:34:49
| 144,501,661
| 24
| 20
|
Apache-2.0
| 2022-11-21T12:34:29
| 2018-08-12T21:21:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
from collections import defaultdict
from django.db.models.signals import *
class DisableSignals(object):
def __init__(self, disabled_signals=None):
self.stashed_signals = defaultdict(list)
self.disabled_signals = disabled_signals or [
pre_init, post_init,
pre_save, post_save,
pre_delete, post_delete,
pre_migrate, post_migrate,
]
def __enter__(self):
for signal in self.disabled_signals:
self.disconnect(signal)
def __exit__(self, exc_type, exc_val, exc_tb):
for signal in self.stashed_signals.keys():
self.reconnect(signal)
def disconnect(self, signal):
self.stashed_signals[signal] = signal.receivers
signal.receivers = []
def reconnect(self, signal):
signal.receivers = self.stashed_signals.get(signal, [])
del self.stashed_signals[signal]
# Example usage:
# with DisableSignals():
# user.save() # will not call any signals
|
[
"42325807+dockerizeme@users.noreply.github.com"
] |
42325807+dockerizeme@users.noreply.github.com
|
f931f93487dee0b1d116ef38d52fa5222198b620
|
b6c09a1b87074d6e58884211ce24df8ec354da5c
|
/345. 反转字符串中的元音字母.py
|
f259c3af854c1e4b250ef47b593bf61f4f86067c
|
[] |
no_license
|
fengxiaolong886/leetcode
|
a0ee12d67c4a10fb12d6ca4369762ab5b090cab1
|
4c0897bc06a297fa9225a0c46d8ec9217d876db8
|
refs/heads/master
| 2023-03-18T22:16:29.212016
| 2021-03-07T03:48:16
| 2021-03-07T03:48:16
| 339,604,263
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
"""
编写一个函数,以字符串作为输入,反转该字符串中的元音字母。
"""
def reverseVowels(s):
query = "aeiouAEIOU"
vow = []
idx = []
for i, j in enumerate(s):
if j in query:
vow.append(j)
idx.append(i)
vow = vow[::-1]
s = list(s)
for i, j in zip(idx, vow):
s[i] = j
return "".join(s)
print(reverseVowels("hello"))
print(reverseVowels("leetcode"))
|
[
"xlfeng886@163.com"
] |
xlfeng886@163.com
|
e30926a419b5d166b02a76f3f5c8ed329de20e60
|
ff9fedd28f7436ba9945421e061fd2e1dadbf5c3
|
/Alogithms/Dijkstra/dijkstra.py
|
3d1510e8e6c59b494d2b934513ca7381f575586b
|
[] |
no_license
|
ritwikbadola/Empirical-Analysis-Of-Algorithms
|
0ed1b9c2c92813d11af33405527a4ecced8b2845
|
7ffb7a03e9d356d5368d2d79a49a8dabf49ed6c7
|
refs/heads/master
| 2022-08-19T12:39:24.875859
| 2020-05-16T03:53:35
| 2020-05-16T03:53:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,979
|
py
|
# Python program for Dijkstra's single
# source shortest path algorithm. The program is
# for adjacency matrix representation of the graph
# Library for INT_MAX
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
def printSolution(self, dist):
print "Vertex \tDistance from Source"
for node in range(self.V):
print node, "\t", dist[node]
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minDistance(self, dist, sptSet):
# Initilaize minimum distance for next node
min = sys.maxint
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if dist[v] < min and sptSet[v] == False:
min = dist[v]
min_index = v
return min_index
# Funtion that implements Dijkstra's single source
# shortest path algorithm for a graph represented
# using adjacency matrix representation
def dijkstra(self, src):
dist = [sys.maxint] * self.V
dist[src] = 0
sptSet = [False] * self.V
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minDistance(dist, sptSet)
# Put the minimum distance vertex in the
# shotest path tree
sptSet[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shotest path tree
for v in range(self.V):
if self.graph[u][v] > 0 and sptSet[v] == False and \
dist[v] > dist[u] + self.graph[u][v]:
dist[v] = dist[u] + self.graph[u][v]
# self.printSolution(dist)
# Driver program
g = Graph(25)
g.graph = [ [0, 156, 0, 0, 246, 0, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 462, 0, 0, 171, 0, 157, 0, 363],
[156, 0, 323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 323, 0, 151, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 151, 0, 0, 545, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[246, 0, 0, 0, 0, 174, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 545, 174, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[184, 0, 0, 0, 0, 0, 0, 83, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 0, 0, 0],
[0, 0, 0, 0, 100, 0, 83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 224, 0, 0, 209, 0, 0, 0, 0, 217, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 209, 0, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 116, 0, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 0, 157, 251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 157, 0, 342, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 251, 342, 0, 111, 208, 0, 0, 0, 0, 0, 382, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 217, 0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 208, 0, 0, 335, 462, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 335, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[462, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 462, 0, 0, 212, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 212, 0, 135, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135, 0, 174, 0, 0, 0, 0],
[171, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 174, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 382, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 0, 0],
[363, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ];
g.dijkstra(0);
# This code is contributed by Divyanshu Mehta
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
e8e564dd8a81a7204c2c1219c8828de5d75a5b39
|
b76615ff745c6d66803506251c3d4109faf50802
|
/pyobjc-framework-Cocoa/PyObjCTest/test_nsexpression.py
|
10aca71722b9813074d199da83ce3d260fed8d3b
|
[
"MIT"
] |
permissive
|
danchr/pyobjc-git
|
6ef17e472f54251e283a0801ce29e9eff9c20ac0
|
62b787fddeb381184043c7ff136f1c480755ab69
|
refs/heads/master
| 2021-01-04T12:24:31.581750
| 2020-02-02T20:43:02
| 2020-02-02T20:43:02
| 240,537,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSExpression(TestCase):
def testConstants(self):
self.assertEqual(NSConstantValueExpressionType, 0)
self.assertEqual(NSEvaluatedObjectExpressionType, 1)
self.assertEqual(NSVariableExpressionType, 2)
self.assertEqual(NSKeyPathExpressionType, 3)
self.assertEqual(NSFunctionExpressionType, 4)
self.assertEqual(NSUnionSetExpressionType, 5)
self.assertEqual(NSIntersectSetExpressionType, 6)
self.assertEqual(NSMinusSetExpressionType, 7)
self.assertEqual(NSSubqueryExpressionType, 13)
self.assertEqual(NSAggregateExpressionType, 14)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(NSBlockExpressionType, 19)
@min_os_level("10.9")
def testConstants10_9(self):
self.assertEqual(NSAnyKeyExpressionType, 15)
@min_os_level("10.11")
def testConstants10_11(self):
self.assertEqual(NSConditionalExpressionType, 20)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgIsBlock(NSExpression.expressionForBlock_arguments_, 0, b"@@@@")
self.assertResultIsBlock(NSExpression.expressionBlock, b"@@@@")
@min_os_level("10.6")
def testMethod10_6_unsupported(self):
self.assertArgIsPrintf(NSExpression.expressionWithFormat_, 0)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
b9132f16bfc5b5e0cc2704d85af65a089cffd7cb
|
eee647635af1583d9b1150b7cd3195336291e1d2
|
/ABC133/c.py
|
eb49ffdc05d6db403c85c8227196668dd8d288ac
|
[] |
no_license
|
lilium513/competition_programing
|
42f69222290b09b491477b8a2b9c2d4513ebe301
|
45082bf542224b667e753ad357cf145f683fde54
|
refs/heads/master
| 2020-06-22T03:16:34.510906
| 2019-07-31T18:22:31
| 2019-07-31T18:22:31
| 197,619,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
def do():
L, R = list(map(int, input().split(" ")))
ans = 10 ** 15
if R - L < 5000: #差が小さい場合は全探索
for i in range(L,R + 1):
for j in range(i+1,R + 1):
if (i*j) % 2019 < ans:
ans = (i*j) % 2019
else:#そうでなければ確実に一つ2019の倍数がある
ans = 0
print(ans)
|
[
"lim.intefx@gmail.com"
] |
lim.intefx@gmail.com
|
e798b57fa3a276c7acb65be428cc91e5a58aca43
|
e3f2ab2999a851121897c02ee81bd85c2543bb96
|
/ketan/codes/ee18btech11030/ee18btech11030_1.py
|
7034225e0dcac1c1afe24ced57259387f4318dfb
|
[] |
no_license
|
yashwanthguguloth24/control
|
ee38822c00d709ab63a35a9ebf7be886abae7eb7
|
cff91230294686a4ee9432b04aea4333198512c1
|
refs/heads/master
| 2022-09-16T14:49:10.111030
| 2020-06-01T03:21:08
| 2020-06-01T03:21:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
###################################################################
# This is python code for Bode plots.
# By Moparthi Varun Sankar
# April 28 , 2020
# Released under GNU GPL
###################################################################
from scipy import signal
import matplotlib.pyplot as plt
from pylab import*
#if using termux
import subprocess
import shlex
#end if
#Defining the transfer function
s1 = signal.lti([16200,21*16200,110*16200], [11, 18*11 ,99*11,162*11,0]) #G(s)
s2 = signal.lti([1,0.121], [754.223*1,754.223*0.0001604]) #Gc(s)
s3 = signal.lti([16200,342160.2,1823164.2,215622],[8296.2,149333,821522,1344116.2,215.6,0]) #G(s)*Gc(s)
#signal.bode takes transfer function as input and returns frequency,magnitude and phase arrays
w1,mag1,phase1 = signal.bode(s1,n=1000)
w2,mag2,phase2 = signal.bode(s2,n=1000)
w3,mag3,phase3 = signal.bode(s3,n=1000)
plt.figure()
plt.subplot(2,1,1)
plt.grid()
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Magnitude(db)')
plt.semilogx(w1, mag1,label='Uncompensated') # Magnitude plot for G(s)
plt.semilogx(w2, mag2,label='Compensator') # Magnitude plot for Gc(s)
plt.semilogx(w3, mag3,label='Compensated') # Magnitude plot for G(s)*Gc(s)
plt.plot(38.95,0,'o')
plt.text(38.95,0, '({}, {})'.format(38.95,0))
plt.plot(0.0001604,0,'o')
plt.text(0.0001604,0, '({}, {})'.format(0.0001604,0))
plt.plot(0.121,-57.55,'o')
plt.text(0.121,-57.55, '({}, {})'.format(0.121,-57.55))
plt.plot(1.21,0,'o')
plt.text(1.21,0, '({}, {})'.format(1.21,0))
plt.legend()
plt.subplot(2,1,2)
plt.grid()
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Phase(degree)')
plt.semilogx(w1, phase1,label='Uncompensated') # Phase plot for G(s)
plt.semilogx(w2, phase2,label='Compensator') # Phase plot for Gc(s)
plt.semilogx(w3, phase3,label='Compensated') # Phase plot for G(s)*Gc(s)
plt.annotate('', (1.21,-117), (1.21,-127), arrowprops=dict(facecolor='red',arrowstyle='<|-|>',mutation_scale=15))
plt.annotate("Lag in Phase",(1.21,-117))
plt.plot(38.95,-184,'o')
plt.text(38.95,-184, '({}, {})'.format(38.95,-184))
plt.legend()
#if using termux
plt.savefig('./figs/ee18btech11030/ee18btech11030_2.pdf')
plt.savefig('./figs/ee18btech11030/ee18btech11030_2.eps')
subprocess.run(shlex.split("termux-open ./figs/ee18btech11030/ee18btech11030_2.pdf"))
#else
#plt.show()
|
[
"gadepall@gmail.com"
] |
gadepall@gmail.com
|
c9f81bef1f3181735e2d92ff5e734356f7d6e16f
|
14373275670c1f3065ce9ae195df142146e2c1a4
|
/stubs/SQLAlchemy/sqlalchemy/cimmutabledict.pyi
|
1a1a3006afc360bf3f13c4a33677a997d14fb729
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
sobolevn/typeshed
|
eb7af17c06a9722f23c337e6b9a4726223155d58
|
d63a82640390a9c130e0fe7d409e8b0b836b7c31
|
refs/heads/master
| 2023-08-04T05:59:29.447015
| 2023-06-14T21:27:53
| 2023-06-14T21:27:53
| 216,265,622
| 2
| 0
|
Apache-2.0
| 2022-02-08T10:40:53
| 2019-10-19T20:21:25
|
Python
|
UTF-8
|
Python
| false
| false
| 737
|
pyi
|
from _typeshed import SupportsKeysAndGetItem
from collections.abc import Iterable
from typing import Generic, TypeVar, overload
from typing_extensions import final
_KT = TypeVar("_KT")
_KT2 = TypeVar("_KT2")
_VT = TypeVar("_VT")
_VT2 = TypeVar("_VT2")
@final
class immutabledict(dict[_KT, _VT], Generic[_KT, _VT]):
@overload
def union(self, __dict: dict[_KT2, _VT2]) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
@overload
def union(self, __dict: None = None, **kw: SupportsKeysAndGetItem[_KT2, _VT2]) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
def merge_with(
self, *args: SupportsKeysAndGetItem[_KT | _KT2, _VT2] | Iterable[tuple[_KT2, _VT2]] | None
) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
|
[
"noreply@github.com"
] |
sobolevn.noreply@github.com
|
6e0e7be32af312f6e4e5c22864d619f58343b46b
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/qtconsole/usage.py
|
9748f0e934f04e3c18259feed28ecd2d79a87874
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977
| 2017-12-14T13:50:44
| 2017-12-14T13:50:44
| 118,736,043
| 0
| 0
| null | 2018-01-24T08:30:23
| 2018-01-24T08:30:22
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4999bdffa49207a0fc3b0d1a32be17cab386bc93cb7e7f592a5154ee85dcc4e9
size 8349
|
[
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] |
seokinj@jangseog-in-ui-MacBook-Pro.local
|
c58f1c2970ecc1f52452603ec752fee605c737c0
|
053221e1d90b365f68701dbd5b6466f30d1f6fd7
|
/Day2/vd9.py
|
fd7cce53fa7b1ae816f5b6dbeb603d15b41e478e
|
[] |
no_license
|
pytutorial/py2011E
|
eceb4d563cc807294b08b818edadd521ed8da488
|
306437369b0bfe55a2fa827b098283856242e731
|
refs/heads/main
| 2023-02-28T23:57:32.851536
| 2021-01-30T14:56:12
| 2021-01-30T14:56:12
| 318,186,117
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
# vd9.py
# Chương trình dự báo thời tiết
# Cho T(độ C), w (km/h), p(atm)
# In ra : Có mưa ?
T = float(input('Nhiệt độ (C):'))
w = float(input('Tốc độ gió (km/h):'))
p = float(input('Áp suất khí quyển(atm):'))
rain = False # default
if T >= 21:
if w >= 3 and p > 0.87:
rain = True
else:
if w >= 7 or p > 1.04:
rain = True
print(rain)
|
[
"duongthanhtungvn01@gmail.com"
] |
duongthanhtungvn01@gmail.com
|
c830596b2f898d2ead4f94528ad2f3100de2be7b
|
7786de317489fa258c7504b2fc96341e970e45db
|
/tests/unit/test_cf_storage_object.py
|
40cecc402ed6e56b9c96465a85a7524220df10d6
|
[
"MIT"
] |
permissive
|
tvaught/pyrax
|
7207158d832721ca6ccde2e9c328855155a60915
|
8a310435239c536921490e04a984ff8a82b18eb8
|
refs/heads/master
| 2020-12-25T10:10:54.714401
| 2013-05-30T19:56:21
| 2013-05-30T19:56:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,903
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax
from pyrax.cf_wrapper.storage_object import StorageObject
import pyrax.exceptions as exc
from tests.unit.fakes import FakeContainer
from tests.unit.fakes import FakeIdentity
from tests.unit.fakes import FakeResponse
class CF_StorageObjectTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
reload(pyrax)
self.orig_connect_to_cloudservers = pyrax.connect_to_cloudservers
self.orig_connect_to_cloudfiles = pyrax.connect_to_cloudfiles
self.orig_connect_to_cloud_databases = pyrax.connect_to_cloud_databases
ctclb = pyrax.connect_to_cloud_loadbalancers
self.orig_connect_to_cloud_loadbalancers = ctclb
ctcbs = pyrax.connect_to_cloud_blockstorage
self.orig_connect_to_cloud_blockstorage = ctcbs
super(CF_StorageObjectTest, self).__init__(*args, **kwargs)
self.obj_name = "testobj"
self.container_name = "testcont"
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax.connect_to_cloud_blockstorage = Mock()
@patch('pyrax.cf_wrapper.client.Container', new=FakeContainer)
def setUp(self):
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax.connect_to_cloud_blockstorage = Mock()
pyrax.clear_credentials()
pyrax.identity = FakeIdentity()
pyrax.set_credentials("fakeuser", "fakeapikey")
pyrax.connect_to_cloudfiles()
self.client = pyrax.cloudfiles
self.container = FakeContainer(self.client, self.container_name, 0, 0)
self.container.name = self.container_name
self.client.get_container = Mock(return_value=self.container)
self.client.connection.get_container = Mock()
self.client.connection.head_object = Mock()
objs = [{"name": self.obj_name, "content_type": "test/test",
"bytes": 444, "hash": "abcdef0123456789"}]
self.client.connection.head_object.return_value = ({}, objs)
self.client.connection.get_container.return_value = ({}, objs)
self.storage_object = self.client.get_object(self.container, "testobj")
self.client._container_cache = {}
self.container.object_cache = {}
def tearDown(self):
self.client = None
self.container = None
self.storage_object = None
pyrax.connect_to_cloudservers = self.orig_connect_to_cloudservers
pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles
pyrax.connect_to_cloud_databases = self.orig_connect_to_cloud_databases
octclb = self.orig_connect_to_cloud_loadbalancers
pyrax.connect_to_cloud_loadbalancers = octclb
octcbs = self.orig_connect_to_cloud_blockstorage
pyrax.connect_to_cloud_blockstorage = octcbs
def test_read_attdict(self):
tname = "something"
ttype = "foo/bar"
tbytes = 12345
tlastmodified = "2222-02-22T22:22:22.222222"
tetag = "123123123"
dct = {"name": tname, "content_type": ttype, "bytes": tbytes,
"last_modified": tlastmodified, "hash": tetag}
obj = self.storage_object
obj._read_attdict(dct)
self.assertEqual(obj.name, tname)
self.assertEqual(obj.content_type, ttype)
self.assertEqual(obj.total_bytes, tbytes)
self.assertEqual(obj.last_modified, tlastmodified)
self.assertEqual(obj.etag, tetag)
def test_subdir(self):
tname = "something"
dct = {"subdir": tname}
obj = self.storage_object
obj._read_attdict(dct)
self.assertEqual(obj.name, tname)
def test_get(self):
obj = self.storage_object
obj.client.connection.get_object = Mock()
meta = {"a": "b"}
data = "This is the contents of the file"
obj.client.connection.get_object.return_value = (meta, data)
ret = obj.get()
self.assertEqual(ret, data)
ret = obj.get(include_meta=True)
self.assertEqual(ret, (meta, data))
def test_delete(self):
obj = self.storage_object
obj.client.connection.delete_object = Mock()
obj.delete()
obj.client.connection.delete_object.assert_called_with(
obj.container.name, obj.name)
def test_purge(self):
obj = self.storage_object
cont = obj.container
cont.cdn_uri = None
self.assertRaises(exc.NotCDNEnabled, obj.purge)
cont.cdn_uri = "http://example.com"
obj.client.connection.cdn_request = Mock()
obj.purge()
obj.client.connection.cdn_request.assert_called_with("DELETE",
cont.name, obj.name, hdrs={})
def test_get_metadata(self):
obj = self.storage_object
obj.client.connection.head_object = Mock()
obj.client.connection.head_object.return_value = {
"X-Object-Meta-Foo": "yes",
"Some-Other-Key": "no"}
meta = obj.get_metadata()
self.assertEqual(meta, {"X-Object-Meta-Foo": "yes"})
def test_set_metadata(self):
obj = self.storage_object
obj.client.connection.post_object = Mock()
obj.client.connection.head_object = Mock(return_value={})
obj.set_metadata({"newkey": "newval"})
obj.client.connection.post_object.assert_called_with(obj.container.name,
obj.name, {"x-object-meta-newkey": "newval"})
def test_remove_metadata_key(self):
obj = self.storage_object
obj.client.connection.post_object = Mock()
obj.client.connection.head_object = Mock(return_value={})
obj.remove_metadata_key("newkey")
obj.client.connection.post_object.assert_called_with(obj.container.name,
obj.name, {})
def test_change_content_type(self):
obj = self.storage_object
obj.client.change_object_content_type = Mock()
obj.change_content_type("foo")
obj.client.change_object_content_type.assert_called_once_with(
obj.container, obj, new_ctype="foo", guess=False)
def test_get_temp_url(self):
obj = self.storage_object
obj.client.get_temp_url = Mock()
secs = random.randint(1, 1000)
obj.get_temp_url(seconds=secs)
obj.client.get_temp_url.assert_called_with(obj.container, obj,
seconds=secs, method="GET")
def test_repr(self):
obj = self.storage_object
rep = obj.__repr__()
self.assert_("<Object " in rep)
self.assert_(obj.name in rep)
self.assert_(obj.content_type in rep)
if __name__ == "__main__":
unittest.main()
|
[
"ed@leafe.com"
] |
ed@leafe.com
|
fed740e3a86c5c0992ca482c58875e9b14269012
|
1bfad01139237049eded6c42981ee9b4c09bb6de
|
/RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/pimsm/router/interface/learnedmdtinfo/learnedmdtinfo.py
|
b27f8bb6f94a4485f17effd4ef1a42a2e0f065ba
|
[
"MIT"
] |
permissive
|
kakkotetsu/IxNetwork
|
3a395c2b4de1488994a0cfe51bca36d21e4368a5
|
f9fb614b51bb8988af035967991ad36702933274
|
refs/heads/master
| 2020-04-22T09:46:37.408010
| 2019-02-07T18:12:20
| 2019-02-07T18:12:20
| 170,284,084
| 0
| 0
|
MIT
| 2019-02-12T08:51:02
| 2019-02-12T08:51:01
| null |
UTF-8
|
Python
| false
| false
| 4,210
|
py
|
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedMdtInfo(Base):
"""The LearnedMdtInfo class encapsulates a system managed learnedMdtInfo node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the LearnedMdtInfo property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'learnedMdtInfo'
def __init__(self, parent):
super(LearnedMdtInfo, self).__init__(parent)
@property
def Age(self):
"""The amount of time (in seconds) remaining before this TLV times out.
Returns:
number
"""
return self._get_attribute('age')
@property
def CeGroupAddress(self):
"""The CE group address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('ceGroupAddress')
@property
def CeSourceAddress(self):
"""The CE source address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('ceSourceAddress')
@property
def MdtGroupAddress(self):
"""The MDT (PE) group address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('mdtGroupAddress')
@property
def MdtSourceAddress(self):
"""The MDT (PE) source address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('mdtSourceAddress')
def find(self, Age=None, CeGroupAddress=None, CeSourceAddress=None, MdtGroupAddress=None, MdtSourceAddress=None):
"""Finds and retrieves learnedMdtInfo data from the server.
All named parameters support regex and can be used to selectively retrieve learnedMdtInfo data from the server.
By default the find method takes no parameters and will retrieve all learnedMdtInfo data from the server.
Args:
Age (number): The amount of time (in seconds) remaining before this TLV times out.
CeGroupAddress (str): The CE group address contained in this data MDT TLV.
CeSourceAddress (str): The CE source address contained in this data MDT TLV.
MdtGroupAddress (str): The MDT (PE) group address contained in this data MDT TLV.
MdtSourceAddress (str): The MDT (PE) source address contained in this data MDT TLV.
Returns:
self: This instance with matching learnedMdtInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of learnedMdtInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the learnedMdtInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"hubert.gee@keysight.com"
] |
hubert.gee@keysight.com
|
f66d8eca2d435b8587e7ca130d23d12400ed0211
|
3fbd28e72606e5358328bfe4b99eb0349ca6a54f
|
/.history/a_Young_Physicist_20210607193741.py
|
863458084f547b6a9bf662840ab4c6ff7880d758
|
[] |
no_license
|
Tarun1001/codeforces
|
f0a2ef618fbd45e3cdda3fa961e249248ca56fdb
|
576b505d4b8b8652a3f116f32d8d7cda4a6644a1
|
refs/heads/master
| 2023-05-13T04:50:01.780931
| 2021-06-07T21:35:26
| 2021-06-07T21:35:26
| 374,399,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
n= int(input())
x=[]
for i in range(n):
p=map(int,input().split()))
x.append(p)
a=b=c=0
for i in x:
a+=i[0]
b+=i[1]
c+=i[2]
if a==b==c==0:
print("YES")
else:
print("NO")
|
[
"tarunsivasai8@gmail.com"
] |
tarunsivasai8@gmail.com
|
20076d99682732c095519240df2c951bfe0aae37
|
55ab64b67d8abc02907eb43a54ff6c326ded6b72
|
/scripts/startup/tila_OP_SmartDelete.py
|
cc9ba649d4972b3487b5351419e9a875b4d2745a
|
[
"MIT"
] |
permissive
|
Tilapiatsu/blender-custom_config
|
2f03b0bb234c3b098d2830732296d199c91147d0
|
00e14fc190ebff66cf50ff911f25cf5ad3529f8f
|
refs/heads/master
| 2023-08-16T14:26:39.990840
| 2023-08-16T01:32:41
| 2023-08-16T01:32:41
| 161,249,779
| 6
| 2
|
MIT
| 2023-04-12T05:33:59
| 2018-12-10T23:25:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,449
|
py
|
import bpy
bl_info = {
"name": "Tila : Smart Delete",
"author": "Tilapiatsu",
"version": (1, 0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D",
"category": "Object",
}
class TILA_SmartDeleteOperator(bpy.types.Operator):
bl_idname = "object.tila_smartdelete"
bl_label = "TILA: Smart Delete"
bl_options = {'REGISTER', 'UNDO'}
menu: bpy.props.BoolProperty(name='call_menu', default=False)
def execute(self, context):
if context.space_data.type == 'VIEW_3D':
if self.menu:
if context.mode == 'EDIT_MESH':
bpy.ops.wm.call_menu(name='VIEW3D_MT_edit_mesh_delete')
elif context.mode == 'EDIT_CURVE':
bpy.ops.wm.call_menu(name='VIEW3D_MT_edit_curve_delete')
else:
if context.mode == 'EDIT_MESH':
current_mesh_mode = context.tool_settings.mesh_select_mode[:]
# if vertex mode on
if current_mesh_mode[0]:
bpy.ops.mesh.dissolve_verts()
# if edge mode on
if current_mesh_mode[1]:
bpy.ops.mesh.dissolve_edges(use_verts=True)
# if face mode on
if current_mesh_mode[2]:
bpy.ops.mesh.delete(type='FACE')
elif context.mode == 'EDIT_CURVE':
bpy.ops.curve.delete(type='VERT')
elif context.mode == 'EDIT_GPENCIL':
try:
bpy.ops.gpencil.delete(type='POINTS')
except Exception as e:
print("Warning: %r" % e)
elif context.mode == 'EDIT_METABALL':
bpy.ops.mball.delete_metaelems('EXEC_DEFAULT')
elif context.mode == 'OBJECT':
bpy.ops.object.delete(use_global=False, confirm=False)
elif context.space_data.type == 'OUTLINER':
bpy.ops.outliner.delete()
elif context.space_data.type == 'FILE_BROWSER':
bpy.ops.file.delete()
# elif context.space_data.type == 'IMAGE_EDITOR':
# layout.label("No Context! image editor")
return {'FINISHED'}
addon_keymaps = []
classes = (TILA_SmartDeleteOperator,)
register, unregister = bpy.utils.register_classes_factory(classes)
if __name__ == "__main__":
register()
|
[
"tilapiatsu@hotmail.fr"
] |
tilapiatsu@hotmail.fr
|
3c3083f149d724f150c0f60864c4c9d6ed10495d
|
27856ac3b3311728fe103911f7cbc0f20cbdfa8f
|
/bot/config.py
|
549488d5ab4942dbe9d3762ea0d3e81b3afc860a
|
[] |
no_license
|
535521469/crawl_free_ip_proxy
|
2c314f5037e45508071593bbcfa27e16751e4078
|
977c7fc422e8d49dd1d195cf8d7d1475da427e04
|
refs/heads/master
| 2016-09-06T13:25:25.738769
| 2013-05-01T07:28:25
| 2013-05-01T07:28:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
# encoding=utf8
'''
Created on 2013-4-24
@author: corleone
'''
from bot.configutil import ConfigFile
import os
def read_config():
cfg_path = os.sep.join([os.getcwd(), os.curdir, 'fetchproxy.cfg'])
configdata = ConfigFile.readconfig(cfg_path).data
return configdata
configdata = read_config()
|
[
"535521469@qq.com"
] |
535521469@qq.com
|
ff224afdc46082bd19994708a0dc8289239eb5e4
|
9bc0d33e1c3454393ea74d85b531801d6aa28a55
|
/baselines/duet/test_ranking.py
|
20ddb3c6a7f5158fc67751c3eb22e468eb15f604
|
[
"MIT"
] |
permissive
|
skallumadi/mnsrf_ranking_suggestion
|
4c604ce5fc394c6d1d1efebb68af08bd2349c696
|
37cbf55d27e8595b990c0a66449e7bfe3027cc8c
|
refs/heads/master
| 2021-01-25T14:03:23.465568
| 2017-10-09T06:40:10
| 2017-10-09T06:40:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,189
|
py
|
###############################################################################
# Author: Wasi Ahmad
# Project: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/10/wwwfp0192-mitra.pdf
# Date Created: 7/23/2017
#
# File Description: This script evaluates test ranking performance.
###############################################################################
import torch, helper, util, data, os
from duet import DUET
from ranking_eval_functions import mean_average_precision, NDCG
args = util.get_args()
def compute_ranking_performance(model, test_batch, test_clicks, test_labels):
local_score = model.local_model(test_batch, test_clicks)
distributed_score = model.distributed_model(test_batch, test_clicks)
total_score = local_score + distributed_score
MAP = mean_average_precision(total_score, test_labels)
NDCG_at_1 = NDCG(total_score, test_labels, 1)
NDCG_at_3 = NDCG(total_score, test_labels, 3)
NDCG_at_10 = NDCG(total_score, test_labels, 5)
return MAP, NDCG_at_1, NDCG_at_3, NDCG_at_10
def test_ranking(model, test_batches):
num_batches = len(test_batches)
map, ndcg_1, ndcg_3, ndcg_10 = 0, 0, 0, 0
for batch_no in range(1, num_batches + 1):
test_queries, test_docs, test_labels = helper.batch_to_tensor(test_batches[batch_no - 1], model.dictionary,
model.config.max_query_length,
model.config.max_doc_length)
if model.config.cuda:
test_queries = test_queries.cuda()
test_docs = test_docs.cuda()
test_labels = test_labels.cuda()
ret_val = compute_ranking_performance(model, test_queries, test_docs, test_labels)
map += ret_val[0]
ndcg_1 += ret_val[1]
ndcg_3 += ret_val[2]
ndcg_10 += ret_val[3]
map = map / num_batches
ndcg_1 = ndcg_1 / num_batches
ndcg_3 = ndcg_3 / num_batches
ndcg_10 = ndcg_10 / num_batches
print('MAP - ', map)
print('NDCG@1 - ', ndcg_1)
print('NDCG@3 - ', ndcg_3)
print('NDCG@10 - ', ndcg_10)
if __name__ == "__main__":
dictionary = data.Dictionary(5)
dictionary.load_dictionary(args.save_path, 'vocab.csv', 5000)
model = DUET(dictionary, args)
if 'CUDA_VISIBLE_DEVICES' in os.environ:
cuda_visible_devices = [int(x) for x in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
if len(cuda_visible_devices) > 1:
model = torch.nn.DataParallel(model, device_ids=cuda_visible_devices)
if args.cuda:
model = model.cuda()
helper.load_model_states_from_checkpoint(model, os.path.join(args.save_path, 'model_best.pth.tar'), 'state_dict')
print('Model and dictionary loaded.')
model.eval()
test_corpus = data.Corpus(args.data, 'session_test.txt', dictionary)
print('Test set size = ', len(test_corpus.data))
test_batches = helper.batchify(test_corpus.data, args.batch_size)
print('Number of test batches = ', len(test_batches))
test_ranking(model, test_batches)
|
[
"wasiahmad@ucla.edu"
] |
wasiahmad@ucla.edu
|
8dcc2947e1a739ffad867c6bf674d20d81008c49
|
0abd812a50ba3330734fcbb0088a74c5ad6735a2
|
/python/utf8_for_emojis.py
|
695f4f879e0986f5202ac4876ea2878fd0bf97aa
|
[] |
no_license
|
scMarth/Learning
|
a914af6f6327454234e5f98dfc8cf95d6d4f8077
|
ae696461c2c8edc9944879503cce01d525cf4ce0
|
refs/heads/master
| 2023-08-03T05:13:03.162533
| 2023-07-28T22:58:51
| 2023-07-28T22:58:51
| 120,689,926
| 2
| 0
| null | 2022-12-11T13:14:07
| 2018-02-08T00:33:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,984
|
py
|
# convert json to csv
import arcpy, os, shutil, numpy, json, codecs
fields = {
'request' : [ \
'id', \
'master', \
'addDate', \
'addDateUnix', \
'lastAction', \
'lastActionUnix', \
'dept', \
'displayDate', \
'displayLastAction', \
'status', \
'streetId', \
'streetName', \
'streetNum', \
'crossStreetId', \
'crossStreetName', \
'cityId', \
'cityName', \
'district', \
'comments', \
'privateNotes', \
'submitter', \
'typeId', \
'typeName', \
'priorityValue', \
'latitude', \
'longitude', \
'aggregatorId', \
'aggregatorInfo', \
'origin', \
'priorityToDisplay' \
],
'activity' : [ \
'actDate', \
'actDateUnix', \
'attachments', \
'code', \
'codeDesc', \
'comments', \
'displayDate', \
'id', \
'notify', \
'requestId', \
'routeId', \
'user', \
'files', \
'isEditable' \
],
'attachment' : [ \
'createDateUnix', \
'createDate', \
'fileName', \
'id', \
'parent', \
'parentType', \
'size', \
'user' \
],
'submitter' : [ \
'id', \
'firstName', \
'lastName', \
'middleInitial', \
'address', \
'address2', \
'city', \
'state', \
'zip', \
'email', \
'phone', \
'phoneExt', \
'altPhone', \
'altPhoneExt', \
'password', \
'aggregatorId', \
'verified', \
'banned', \
'twitterId', \
'twitterScreenName', \
'notifyEmail', \
'notifyPhone', \
'notifyAltPhone', \
'notifyMail', \
'notifyPush', \
'notifyPhoneSms', \
'notifyAltPhoneSms' \
]
}
def escaped(inputStr):
# return inputStr
return inputStr.translate(str.maketrans({ \
# "]": r"\]", \
# "^": r"\^", \
# "$": r"\$", \
# "*": r"\*", \
# ".": r"\.", \
# "/": r"\/",\
# so far, I've seen carriage returns, line feeds, and double-quotes that can mess up records. '\'' is escaped just in case
"\r": r"\r", \
"\n": r"\n", \
"\\": r"\\", \
'\"': r'\"' \
}))
# reads a json file path then creates a fgdb for that json file in 'workspace'
# the json file contains json data that is returned from the requests/dump method
def write_json_file_to_csv(workspace, json_path):
with open(json_path) as json_file:
data = json.load(json_file)
for key in data:
if key == 'deleted':
continue
output_filepath = workspace + r'\\' + key.upper() + '.csv'
print('Writing' + output_filepath)
# delete file if it exists
if os.path.exists(output_filepath):
os.unlink(output_filepath)
with codecs.open(output_filepath, 'w', encoding='utf8') as file:
# write header
for i in range(len(fields[key]) - 1):
file.write(escaped(fields[key][i]) + ',')
file.write(escaped(fields[key][-1]) + '\n')
# write records
for i in range(len(data[key])):
record = data[key][i]
# print(record)
for j in range(len(fields[key]) - 1):
# print(j)
file.write('"' + escaped(str(record[fields[key][j]])) + '",')
file.write('"' + escaped(str(record[fields[key][-1]])) + '"\n')
print('{} records written.\n'.format(len(data[key])))
workspace = os.path.dirname(__file__) + r'\request_data'
write_json_file_to_csv(workspace, workspace + r'\response.json')
|
[
"vlantaca@gmail.com"
] |
vlantaca@gmail.com
|
1c9e3b879141282edd5569d79e16594bb83d4f29
|
f51ac19ce4d1df15eba02c4b3481533087d5ef9e
|
/day03/xiaohuar/start.py
|
06058cbe787a1bb3530230ff1fa09be09169f548
|
[] |
no_license
|
disenQF/xpy903_scrapy
|
c9e0818f4ad08614f933ec800d680439e3f22ea6
|
7fd1f89f2cbf046b59774071c48801dfc3c5b54d
|
refs/heads/master
| 2022-08-09T13:53:10.104037
| 2019-09-27T09:06:15
| 2019-09-27T09:06:15
| 210,261,888
| 1
| 0
| null | 2022-07-29T22:35:50
| 2019-09-23T04:05:10
|
Python
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
#!/usr/bin/python3
# coding: utf-8
from scrapy import cmdline
if __name__ == '__main__':
cmdline.execute(['scrapy', 'crawl', 'hua', '-o', 'hua.json'])
|
[
"610039018@qq.com"
] |
610039018@qq.com
|
17e914aac8110ab19e8448f67594dcc2b1be380c
|
cee96536d5115a20bd271d7ff5626da496197ac6
|
/test_coco.py
|
ce245527e8ec25e646dbf982ae9dda955ca58fb4
|
[] |
no_license
|
YaojwDefgun/new-YOLOv1_PyTorch
|
0855a8b0dcf8960057ccf82dcf341f480069a789
|
f81b1b033fe2ad9a62bd61ad0bab0f47a4463f42
|
refs/heads/master
| 2023-01-03T21:28:34.243705
| 2020-10-22T12:21:31
| 2020-10-22T12:21:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,138
|
py
|
import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from data.cocodataset import *
from data import config, BaseTransform, VOCAnnotationTransform, VOCDetection, VOC_ROOT, VOC_CLASSES
import numpy as np
import cv2
import time
from decimal import *
parser = argparse.ArgumentParser(description='YOLO Detection')
parser.add_argument('-v', '--version', default='yolo',
help='yolo.')
parser.add_argument('-d', '--dataset', default='COCO_val',
help='we use VOC, COCO_val, COCO_test-dev, to test.')
parser.add_argument('-bk', '--backbone', type=str, default='r18',
help='r18, r50, d19')
parser.add_argument('--trained_model', default='weights/coco/',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--visual_threshold', default=0.3, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to test model')
parser.add_argument('--dataset_root', default='/home/k303/object-detection/dataset/COCO/',
help='Location of VOC root directory')
parser.add_argument('-f', default=None, type=str,
help="Dummy arg so we can load in Jupyter Notebooks")
parser.add_argument('--debug', action='store_true', default=False,
help='debug mode where only one image is trained')
args = parser.parse_args()
coco_class_labels = ('background',
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'street sign', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella',
'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk',
'toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
coco_class_index = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67,
70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def test_net(net, device, testset, transform, thresh, mode='voc'):
class_color = [(np.random.randint(255),np.random.randint(255),np.random.randint(255)) for _ in range(80)]
num_images = len(testset)
for index in range(num_images):
print('Testing image {:d}/{:d}....'.format(index+1, num_images))
if args.dataset == 'COCO_val' or args.dataset == 'COCO-test' or args.dataset == 'COCO_test-dev':
img, _ = testset.pull_image(index)
elif args.dataset == 'VOC':
img = testset.pull_image(index)
# img_id, annotation = testset.pull_anno(i)
x = torch.from_numpy(transform(img)[0][:, :, (2, 1, 0)]).permute(2, 0, 1)
x = x.unsqueeze(0).to(device)
t0 = time.clock()
y = net(x) # forward pass
detections = y
print("detection time used ", Decimal(time.clock()) - Decimal(t0), "s")
# scale each detection back up to the image
scale = np.array([[img.shape[1], img.shape[0],
img.shape[1], img.shape[0]]])
bbox_pred, scores, cls_inds = detections
# map the boxes to origin image scale
bbox_pred *= scale
for i, box in enumerate(bbox_pred):
cls_indx = cls_inds[i]
xmin, ymin, xmax, ymax = box
if scores[i] > thresh:
box_w = int(xmax - xmin)
cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)), class_color[int(cls_indx)], 2)
cv2.rectangle(img, (int(xmin), int(abs(ymin)-15)), (int(xmin+box_w*0.55), int(ymin)), class_color[int(cls_indx)], -1)
cls_id = coco_class_index[int(cls_indx)]
cls_name = coco_class_labels[cls_id]
mess = '%s: %.3f' % (cls_name, scores[i])
cv2.putText(img, mess, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2)
cv2.imshow('detection', img)
cv2.waitKey(0)
# print('Saving the' + str(index) + '-th image ...')
# cv2.imwrite('test_images/' + args.dataset+ '3/' + str(index).zfill(6) +'.jpg', img)
def test():
# get device
if args.cuda:
cudnn.benchmark = True
device = torch.device("cuda")
else:
device = torch.device("cpu")
# load net
num_classes = 80
if args.dataset == 'COCO_val':
cfg = config.coco_af
input_size = cfg['min_dim']
testset = COCODataset(
data_dir=args.dataset_root,
json_file='instances_val2017.json',
name='val2017',
img_size=cfg['min_dim'][0],
debug=args.debug)
elif args.dataset == 'COCO_test-dev':
cfg = config.coco_af
input_size = cfg['min_dim']
testset = COCODataset(
data_dir=args.dataset_root,
json_file='image_info_test-dev2017.json',
name='test2017',
img_size=cfg['min_dim'][0],
debug=args.debug)
elif args.dataset == 'VOC':
cfg = config.voc_af
input_size = cfg['min_dim']
testset = VOCDetection(VOC_ROOT, [('2007', 'test')], None, VOCAnnotationTransform())
# build model
if args.version == 'yolo':
from models.yolo import myYOLO
net = myYOLO(device, input_size=input_size, num_classes=num_classes, trainable=False)
print('Let us test YOLO on the %s dataset ......' % (args.dataset))
else:
print('Unknown Version !!!')
exit()
net.load_state_dict(torch.load(args.trained_model, map_location=device))
net.to(device).eval()
print('Finished loading model!')
# evaluation
test_net(net, device, testset,
BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)),
thresh=args.visual_threshold)
if __name__ == '__main__':
test()
|
[
"1394571815@qq.com"
] |
1394571815@qq.com
|
93fe75d32ccb18339ef6ff1b37d1cfbe0b3c0c1e
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/dlymuh001/question2.py
|
34d73fd549c0a400164a5301a2e7cc2b38ba5c3b
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,297
|
py
|
def cat():
lick = input("Did the cat lick it? (yes/no)\n")
if (lick == "yes"):
healthy = input("Is your cat healthy? (yes/no)\n")
if (healthy == "yes"):
return "Eat it"
elif (healthy == "no"):
return "Your call"
elif (lick == "no"):
return "Eat it"
print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
decision = ""
seen = input("Did anyone see you? (yes/no)\n")
if (seen == "yes"):
person = input("Was it a boss/lover/parent? (yes/no)\n")
if (person == "yes"):
expensive = input("Was it expensive? (yes/no)\n")
if (expensive == "yes"):
cut_off = input("Can you cut off the part that touched the floor? (yes/no)\n")
if (cut_off == "yes"):
decision = "Eat it"
elif (cut_off == "no"):
decision = "Your call"
elif (expensive == "no"):
chocolate = input("Is it chocolate? (yes/no)\n")
if (chocolate == "yes"):
decision = "Eat it"
elif (chocolate == "no"):
decision = "Don\'t eat it"
elif (person == "no"):
decision = "Eat it"
elif (seen == "no"):
sticky = input("Was it sticky? (yes/no)\n")
if (sticky == "yes"):
raw_steak = input("Is it a raw steak? (yes/no)\n")
if (raw_steak == "yes"):
puma = input("Are you a puma? (yes/no)\n")
if (puma == "yes"):
decision = "Eat it"
elif (puma == "no"):
decision = "Don\'t eat it"
elif (raw_steak == "no"):
decision = cat()
elif (sticky == "no"):
emausaurus = input("Is it an Emausaurus? (yes/no)\n")
if (emausaurus == "yes"):
megalosaurus = input("Are you a Megalosaurus? (yes/no)\n")
if (megalosaurus == "yes"):
decision = "Eat it"
elif (megalosaurus == "no"):
decision = "Don\'t eat it"
elif (emausaurus == "no"):
decision = cat()
##output decision
print ("Decision:", decision, sep = " ", end = ".")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
9876a9af35eb3649f4f3c68253359af8c252f427
|
54df8336b50e8f2d7dbe353f0bc51a2b3489095f
|
/Front End/Kivy/project8/pro8.py
|
ca78447ed453ab88b83ef4fdd5468ca01be6e9f2
|
[] |
no_license
|
SurendraKumarAratikatla/MyLenovolapCodes1
|
42d5bb7a14bfdf8d773ee60719380ee28ff4947a
|
12c56200fcfd3e5229bfeec209fd03b5fc35b823
|
refs/heads/master
| 2023-06-17T15:44:18.312398
| 2021-07-19T10:28:11
| 2021-07-19T10:28:11
| 387,358,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,288
|
py
|
from kivy.uix.screenmanager import ScreenManager, Screen
from kivymd.app import MDApp
from kivymd.uix.screen import Screen
from kivymd.uix.textfield import MDTextField
from kivy.lang import Builder
from kivymd.uix.label import MDLabel, MDIcon
from helpers8 import screen_help
from kivymd.uix.button import MDRectangleFlatButton
from kivy.core.window import Window
from kivymd.uix.list import OneLineListItem
from kivy.uix.scrollview import ScrollView
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
from kivymd.uix.list import OneLineIconListItem, IconLeftWidget
from kivymd.uix.button import MDFloatingActionButtonSpeedDial
from kivymd.theming import ThemableBehavior
from kivymd.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
import sqlite3 as sql
import re
from kivymd.uix.taptargetview import MDTapTargetView
KV = '''
Screen:
MDFloatingActionButton:
id: button
icon: "head-question"
pos: 10, 10
on_release: app.tap_target_start()
elevation_normal: 10
'''
Window.size = (350, 600)
class MenuScreen(Screen):
pass
class UserCustomerScreen(Screen):
pass
class ProfileScreen(Screen):
mobile: ObjectProperty()
user: ObjectProperty()
address: ObjectProperty()
def get_started(self):
print('here we go')
def add_user(self):
con = sql.connect('user.db')
cur = con.cursor()
cur.execute(""" INSERT INTO id (mobile,user,address) VALUES (?,?,?)""", (self.mobile.text, self.user.text, self.address.text))
con.commit()
con.close()
screen = Screen()
mobile_no_string = self.mobile.text
print(self.mobile.text)
print(self.user.text)
print(self.address.text)
print(len(self.mobile.text))
if re.match("^[0-9]\d{10}$", self.mobile.text) == None:
pass
else:
label = MDLabel(text='*You entered incorrect mobile number,', theme_text_color='Custom',
text_color=(0, 1, 0, 1), font_style='H6', pos_hint={'center_x': 0.5, 'center_y': 0.3})
screen.add_widget(label)
class AllItemsScreen(Screen):
pass
class RationScreen(Screen):
pass
class BellScreen(Screen):
pass
class FreshEggsScrren(Screen):
pass
class ContentNavigationDrawer(BoxLayout):
pass
class AboutScreen(Screen):
pass
class NotificationScreen(Screen):
pass
class AboutRationScreen(Screen):
pass
# Create the screen manager
sm = ScreenManager()
sm.add_widget(MenuScreen(name='menu'))
sm.add_widget(AllItemsScreen(name='usercustomer'))
sm.add_widget(ProfileScreen(name='profile'))
sm.add_widget(AllItemsScreen(name='allitems'))
sm.add_widget(AllItemsScreen(name='ration'))
sm.add_widget(AllItemsScreen(name='eggs'))
sm.add_widget(AllItemsScreen(name='aboutration'))
class DrawerList(ThemableBehavior, MDList):
pass
class DemoApp(MDApp):
data = {
'basket': 'Today Offers',
'offer': 'Discounts',
'cart': 'Cart Page',
}
try:
con = sql.connect('user.db')
cur = con.cursor()
cur.execute(""" CREATE TABLE id(
mobile text,
user text,
address text)
""")
con.commit()
con.close()
except:
pass
def build(self):
#self.theme_cls.theme_style = 'Dark'
#screen = Screen()
firstpage = Builder.load_string(screen_help)
screen = Builder.load_string(KV)
self.tap_target_view = MDTapTargetView(
widget=screen.ids.button,
title_text="VZM Store",
description_text='''Anyone can login as a user and
you can publish your products to customers''',
widget_position="left_bottom",
target_circle_color=(142/255.0, 172/255.0, 249/255.0),
)
screen.add_widget(firstpage)
return screen
def navigation_draw(self):
sm = ScreenManager()
sm.add_widget(AllItemsScreen(name='bell'))
def tap_target_start(self):
if self.tap_target_view.state == "close":
self.tap_target_view.start()
else:
self.tap_target_view.stop()
if __name__ == '__main__':
DemoApp().run()
|
[
"suendra.aratikatla1608@gmail.com"
] |
suendra.aratikatla1608@gmail.com
|
18cb6da4a1dcaa779b3ef0b93d2dd0af8d8ec46b
|
e4eabccc6d971289cf13653d1b6f290e39b870ab
|
/1651-shuffle-string/shuffle-string.py
|
806acf60e20549daab09a587a9cd68b2470fb226
|
[] |
no_license
|
HEroKuma/leetcode
|
128b38a9f559dc9e3f21c86a47ede67ad72f7675
|
b3045aaedbe98eddc7e4e518a03a9337a63be716
|
refs/heads/master
| 2023-01-03T12:12:31.018717
| 2020-11-01T16:56:47
| 2020-11-01T16:56:47
| 260,488,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
# Given a string s and an integer array indices of the same length.
#
# The string s will be shuffled such that the character at the ith position moves to indices[i] in the shuffled string.
#
# Return the shuffled string.
#
#
# Example 1:
#
#
# Input: s = "codeleet", indices = [4,5,6,7,0,2,1,3]
# Output: "leetcode"
# Explanation: As shown, "codeleet" becomes "leetcode" after shuffling.
#
#
# Example 2:
#
#
# Input: s = "abc", indices = [0,1,2]
# Output: "abc"
# Explanation: After shuffling, each character remains in its position.
#
#
# Example 3:
#
#
# Input: s = "aiohn", indices = [3,1,4,2,0]
# Output: "nihao"
#
#
# Example 4:
#
#
# Input: s = "aaiougrt", indices = [4,0,2,6,7,3,1,5]
# Output: "arigatou"
#
#
# Example 5:
#
#
# Input: s = "art", indices = [1,0,2]
# Output: "rat"
#
#
#
# Constraints:
#
#
# s.length == indices.length == n
# 1 <= n <= 100
# s contains only lower-case English letters.
# 0 <= indices[i] < n
# All values of indices are unique (i.e. indices is a permutation of the integers from 0 to n - 1).
#
class Solution:
def restoreString(self, s: str, indices: List[int]) -> str:
ans = ['']*len(s)
for i, j in enumerate(indices):
ans[j] = s[i]
return "".join(ans)
|
[
"zx8733520+github@gapp.nthu.edu.tw"
] |
zx8733520+github@gapp.nthu.edu.tw
|
faf3b5ffc73b80f5cb5728f55014305a2b80da4e
|
738aedb8035e49951f83ce3f4291eee149cad5fb
|
/OB Damage - Li-Hopfield Model/All the code/SLURM setup files/dir_setup_OI-flat_10_2D.py
|
2e83d567ac9f3004eca045a3289376859981b1dd
|
[] |
no_license
|
jkberry07/OB_PD_Model
|
fb453303bfa64c1a3a43c7d81d2b5373950e1f4d
|
1ce30205354dc30cab4673e406988bfa76390238
|
refs/heads/master
| 2022-11-21T09:39:09.692654
| 2020-07-25T23:25:11
| 2020-07-25T23:25:11
| 282,358,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 16:09:06 2019
@author: wmmjk
"""
import os
f = open('dir_setup_OI-flat_10_2D.sh','w+')
here = os.path.dirname(os.path.realpath(__file__))
subdir1 = 'OI-flat_10_2D'
f.write('mkdir '+subdir1+'\n')
f.write('cp OI-flat_10_2D.py '\
+'H0_10_2D_65Hz.npy W0_10_2D_65Hz.npy '+subdir1+'\n')
|
[
"noreply@github.com"
] |
jkberry07.noreply@github.com
|
89e6683e391279884270bae480df6b3a56146ac5
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/comisr/lib/model.py
|
b3f2d2423bbd3b56d18ce8e090b7122e47b40d2c
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,846
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model functions to reconstruct models."""
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from comisr.lib import ops
# Definition of the fnet, more details can be found in TecoGAN paper
def fnet(fnet_input, reuse=False):
"""Flow net."""
def down_block(inputs, output_channel=64, stride=1, scope='down_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = ops.lrelu(net, 0.2)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = ops.lrelu(net, 0.2)
net = ops.maxpool(net)
return net
def up_block(inputs, output_channel=64, stride=1, scope='up_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = ops.lrelu(net, 0.2)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = ops.lrelu(net, 0.2)
new_shape = tf.shape(net)[1:-1] * 2
net = tf2.image.resize(net, new_shape)
return net
with tf.variable_scope('autoencode_unit', reuse=reuse):
net = down_block(fnet_input, 32, scope='encoder_1')
net = down_block(net, 64, scope='encoder_2')
net = down_block(net, 128, scope='encoder_3')
net = up_block(net, 256, scope='decoder_1')
net = up_block(net, 128, scope='decoder_2')
net1 = up_block(net, 64, scope='decoder_3')
with tf.variable_scope('output_stage'):
net = ops.conv2(net1, 3, 32, 1, scope='conv1')
net = ops.lrelu(net, 0.2)
net2 = ops.conv2(net, 3, 2, 1, scope='conv2')
net = tf.tanh(net2) * 24.0
# the 24.0 is the max Velocity, details can be found in TecoGAN paper
return net
def generator_f_encoder(gen_inputs, num_resblock=10, reuse=False):
"""Generator function encoder."""
# The Bx residual blocks
def residual_block(inputs, output_channel=64, stride=1, scope='res_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = tf.nn.relu(net)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = net + inputs
return net
with tf.variable_scope('generator_unit', reuse=reuse):
# The input layer
with tf.variable_scope('input_stage'):
net = ops.conv2(gen_inputs, 3, 64, 1, scope='conv')
stage1_output = tf.nn.relu(net)
net = stage1_output
# The residual block parts
for i in range(1, num_resblock + 1,
1): # should be 16 for TecoGAN, and 10 for TecoGANmini
name_scope = 'resblock_%d' % (i)
net = residual_block(net, 64, 1, name_scope)
return net
def generator_f_decoder(net,
gen_inputs,
gen_output_channels,
vsr_scale,
reuse=False):
"""Generator function decoder."""
with tf.variable_scope('generator_unit', reuse=reuse):
with tf.variable_scope('conv_tran2highres'):
if vsr_scale == 2:
net = ops.conv2_tran(
net, kernel=3, output_channel=64, stride=2, scope='conv_tran1')
net = tf.nn.relu(net)
if vsr_scale == 4:
net = ops.conv2_tran(net, 3, 64, 2, scope='conv_tran1')
net = tf.nn.relu(net)
net = ops.conv2_tran(net, 3, 64, 2, scope='conv_tran2')
net = tf.nn.relu(net)
with tf.variable_scope('output_stage'):
net = ops.conv2(net, 3, gen_output_channels, 1, scope='conv')
low_res_in = gen_inputs[:, :, :, 0:3] # ignore warped pre high res
bicubic_hi = ops.bicubic_x(low_res_in, scale=vsr_scale) # can put on GPU
net = net + bicubic_hi
net = ops.preprocess(net)
return net
# Definition of the generator.
def generator_f(gen_inputs,
gen_output_channels,
num_resblock=10,
vsr_scale=4,
reuse=False):
net = generator_f_encoder(gen_inputs, num_resblock, reuse)
net = generator_f_decoder(net, gen_inputs, gen_output_channels, vsr_scale,
reuse)
return net
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
06d28ed6d203c6790e5e808bd8033beb090b6c7d
|
9dc6f8d91dc56523b9688990d4ae413b0bcbd4e1
|
/examples/mcscf/31-cr2_scan/cr2-scan.py
|
cd23eb7028ad7e19891993db6645713ad6ae6e11
|
[
"Apache-2.0"
] |
permissive
|
sunqm/pyscf
|
566bc2447d8072cff442d143891c12e6414de01c
|
dd179a802f0a35e72d8522503172f16977c8d974
|
refs/heads/master
| 2023-08-15T18:09:58.195953
| 2023-03-27T21:02:03
| 2023-03-27T21:02:03
| 159,149,096
| 80
| 26
|
Apache-2.0
| 2022-02-05T00:19:24
| 2018-11-26T10:10:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
#!/usr/bin/env python
'''
Scan Cr2 molecule singlet state dissociation curve.
Simliar tthe example mcscf/30-hf_scan, we need to control the CASSCF initial
guess using functions project_init_guess and sort_mo. In this example,
sort_mo function is replaced by the symmetry-adapted version
``sort_mo_by_irrep``.
'''
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
ehf = []
emc = []
def run(b, dm, mo, ci=None):
mol = gto.Mole()
mol.verbose = 5
mol.output = 'cr2-%2.1f.out' % b
mol.atom = [
['Cr',( 0.000000, 0.000000, -b/2)],
['Cr',( 0.000000, 0.000000, b/2)],
]
mol.basis = 'cc-pVTZ'
mol.symmetry = 1
mol.build()
mf = scf.RHF(mol)
mf.level_shift = .4
mf.max_cycle = 100
mf.conv_tol = 1e-9
ehf.append(mf.scf(dm))
mc = mcscf.CASSCF(mf, 12, 12)
mc.fcisolver.conv_tol = 1e-9
# FCI solver with multi-threads is not stable enough for this sytem
mc.fcisolver.threads = 1
if mo is None:
# the initial guess for b = 1.5
ncore = {'A1g':5, 'A1u':5} # Optional. Program will guess if not given
ncas = {'A1g':2, 'A1u':2,
'E1ux':1, 'E1uy':1, 'E1gx':1, 'E1gy':1,
'E2ux':1, 'E2uy':1, 'E2gx':1, 'E2gy':1}
mo = mcscf.sort_mo_by_irrep(mc, mf.mo_coeff, ncas, ncore)
else:
mo = mcscf.project_init_guess(mc, mo)
emc.append(mc.kernel(mo, ci)[0])
mc.analyze()
return mf.make_rdm1(), mc.mo_coeff, mc.ci
dm = mo = ci = None
for b in numpy.arange(1.5, 3.01, .1):
dm, mo, ci = run(b, dm, mo, ci)
for b in reversed(numpy.arange(1.5, 3.01, .1)):
dm, mo, ci = run(b, dm, mo, ci)
x = numpy.arange(1.5, 3.01, .1)
ehf1 = ehf[:len(x)]
ehf2 = ehf[len(x):]
emc1 = emc[:len(x)]
emc2 = emc[len(x):]
ehf2.reverse()
emc2.reverse()
with open('cr2-scan.txt', 'w') as fout:
fout.write(' HF 1.5->3.0 CAS(12,12) HF 3.0->1.5 CAS(12,12)\n')
for i, xi in enumerate(x):
fout.write('%2.1f %12.8f %12.8f %12.8f %12.8f\n'
% (xi, ehf1[i], emc1[i], ehf2[i], emc2[i]))
import matplotlib.pyplot as plt
plt.plot(x, ehf1, label='HF,1.5->3.0')
plt.plot(x, ehf2, label='HF,3.0->1.5')
plt.plot(x, emc1, label='CAS(12,12),1.5->3.0')
plt.plot(x, emc2, label='CAS(12,12),3.0->1.5')
plt.legend()
plt.show()
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
66eee5b3e6193fdd3fbf93572531c18f032831fc
|
5905ed0409c332492409d7707528452b19692415
|
/google-cloud-sdk/lib/googlecloudsdk/command_lib/artifacts/print_settings/gradle.py
|
82a99b6bd2e49073fe4da73c767a02d9c12bb651
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
millerthomasj/google-cloud-sdk
|
c37b7ddec08afadec6ee4c165153cd404f7dec5e
|
3deda6696c3be6a679689b728da3a458c836a24e
|
refs/heads/master
| 2023-08-10T16:03:41.819756
| 2021-09-08T00:00:00
| 2021-09-08T15:08:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,051
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for forming settings for gradle."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
SERVICE_ACCOUNT_TEMPLATE = """\
// Move the secret to ~/.gradle.properties
def artifactRegistryMavenSecret = "{password}"
// Insert following snippet into your build.gradle
// see docs.gradle.org/current/userguide/publishing_maven.html
plugins {{
id "maven-publish"
}}
publishing {{
repositories {{
maven {{
url "https://{location}-maven.pkg.dev/{repo_path}"
credentials {{
username = "{username}"
password = "$artifactRegistryMavenSecret"
}}
}}
}}
}}
repositories {{
maven {{
url "https://{location}-maven.pkg.dev/{repo_path}"
credentials {{
username = "{username}"
password = "$artifactRegistryMavenSecret"
}}
authentication {{
basic(BasicAuthentication)
}}
}}
}}
"""
NO_SERVICE_ACCOUNT_TEMPLATE = """\
// Insert following snippet into your build.gradle
// see docs.gradle.org/current/userguide/publishing_maven.html
plugins {{
id "maven-publish"
id "com.google.cloud.artifactregistry.gradle-plugin" version "{extension_version}"
}}
publishing {{
repositories {{
maven {{
url "artifactregistry://{location}-maven.pkg.dev/{repo_path}"
}}
}}
}}
repositories {{
maven {{
url "artifactregistry://{location}-maven.pkg.dev/{repo_path}"
}}
}}
"""
|
[
"gcloud@google.com"
] |
gcloud@google.com
|
30743d0660f99cca916c12814e164669ead70026
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa2/sample/expr_lists-45.py
|
9aff4784319ac14303406fc9b8c82678ed9274ee
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
x:[int] = None
y:[object] = None
z:[bool] = None
o:object = None
x = [1, $Exp, 3]
x = []
y = [1, True]
z = [False, True]
x = None
o = x
o = x = [1]
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
0b5b7f88519fa9b9b26e3ad6652ff1a4672c1541
|
f2c773e7ccdd60caf5a7c062305cfcd14d11beec
|
/AR_Scripts_1.0.16_R21_Deprecated/AR_SwapObjects.py
|
85e406b4c862dc6ca5dea0e0ae5157af60259cd3
|
[] |
no_license
|
aturtur/cinema4d-scripts
|
4ccfbc3403326a79076d9bcf001189cd5427f46a
|
a87fc6c835db5d205f8428cc67ccd30fdd4b4d4b
|
refs/heads/master
| 2023-07-03T13:34:58.735879
| 2023-06-19T09:57:22
| 2023-06-19T09:57:22
| 63,731,563
| 316
| 49
| null | 2022-04-24T02:31:17
| 2016-07-19T22:15:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
"""
AR_SwapObjects
Author: Arttu Rautio (aturtur)
Website: http://aturtur.com/
Name-US: AR_SwapObjects
Version: 1.0
Description-US: Swaps selected objects between each other. Holding SHIFT while executing script swaps also objects place in hierarchy.
Written for Maxon Cinema 4D R21.207
Python version 2.7.14
"""
# Libraries
import c4d
# Functions
def swapObjects():
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
bc = c4d.BaseContainer() # Initialize Base Container
tempNullA = c4d.BaseObject(c4d.Onull) # Initialize temporary Null object
tempNullB = c4d.BaseObject(c4d.Onull)
selection = doc.GetActiveObjects(c4d.GETACTIVEOBJECTFLAGS_NONE) # Get selection
objA = selection[0] # Get object A
objB = selection[1] # Get objet B
matA = objA.GetMg() # Get object A's global matrix
matB = objB.GetMg() # Get object B's global matrix
doc.AddUndo(c4d.UNDOTYPE_CHANGE, objA) # Add undo for changing object A
doc.AddUndo(c4d.UNDOTYPE_CHANGE, objB) # Add undo for changing object B
tempNullA.InsertBefore(objA) # Insert temp Null A before object A
tempNullB.InsertBefore(objB) # Insert temp Null B before object B
if c4d.gui.GetInputState(c4d.BFM_INPUT_KEYBOARD,c4d.BFM_INPUT_CHANNEL,bc):
if bc[c4d.BFM_INPUT_QUALIFIER] & c4d.QSHIFT: # If 'shift' key is pressed
objA.InsertAfter(tempNullB) # Move object
objB.InsertAfter(tempNullA) # Move object
objA.SetMg(matB) # Set new matrix to object A
objB.SetMg(matA) # Set new matrix to object B
tempNullA.Remove() # Delete temporary objects
tempNullB.Remove()
return True # Everything is fine
def main():
try: # Try to execute following script
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
doc.StartUndo() # Start recording undos
swapObjects() # Run the script
doc.EndUndo() # Stop recording undos
c4d.EventAdd() # Refresh Cinema 4D
except: # If something went wrong
pass # Do nothing
# Execute main()
if __name__=='__main__':
main()
|
[
"rautio.arttu@gmail.com"
] |
rautio.arttu@gmail.com
|
351f10db84028c7b90967a57fd7c5947cf1c2ff1
|
4a1b61cf551db7843050cc7080cec6fd60c4f8cc
|
/2020/백준문제/트리/00_트리.py
|
bc90198b1bcad51c6c1ca207c0bc74de3b890221
|
[] |
no_license
|
phoenix9373/Algorithm
|
4551692027ca60e714437fd3b0c86462f635d8ff
|
c66fd70e14bb8357318e8b8f386d2e968f0c4d98
|
refs/heads/master
| 2023-08-24T10:01:20.798430
| 2021-10-15T07:57:36
| 2021-10-15T07:57:36
| 288,092,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
import sys
sys.stdin = open('input_00.txt', 'r')
def preorder(n):
if n: # 트리가 존재하면, 0이 아니면.
print(n, end=' ')
preorder(tree[n][0])
preorder(tree[n][1])
def inorder(n):
if n: # 트리가 존재하면, 0이 아니면.
preorder(tree[n][0])
print(n, end=' ')
preorder(tree[n][1])
def postorder(n):
if n: # 트리가 존재하면, 0이 아니면.
preorder(tree[n][0])
preorder(tree[n][1])
print(n, end=' ')
# 트리 입력받기.
N = int(input()) # 노드의 수.
E = 12 # 간선의 수.
tree = [[0, 0, 0] for _ in range(N + 1)]
arr = list(map(int, input().split()))
for i in range(E):
if tree[arr[2 * i]][0] == 0:
tree[arr[2 * i]][0] = arr[2 * i + 1]
else:
tree[arr[2 * i]][1] = arr[2 * i + 1]
tree[arr[2 * i + 1]][2] = arr[2 * i]
print(arr)
preorder(1)
print()
inorder(1)
print()
postorder(1)
|
[
"phoenix9373@naver.com"
] |
phoenix9373@naver.com
|
3b89389daeeefbd5bfb316297767be67e33037ad
|
aef5c3a8fc1a0849e8ed7dcdf4ea0446f64c342c
|
/zapd/admin.py
|
11cffccef493d84b52ed6a47db8f4850407810cd
|
[] |
no_license
|
eoliveros/zapd
|
c21e05dde1b318870483a2a34799fffdd1fcbd69
|
b17afbc5b05fcbd27370d9ea9e6c2e6fc6bed7d6
|
refs/heads/master
| 2022-10-16T02:01:49.969941
| 2020-06-16T00:36:15
| 2020-06-16T00:36:15
| 171,779,747
| 0
| 0
| null | 2019-02-21T01:43:15
| 2019-02-21T01:43:14
| null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
from flask import url_for
import flask_admin
from flask_admin import helpers as admin_helpers
from app_core import app, db
from models import security, RestrictedModelView, ProposalModelView, UserModelView, TransactionRestrictedModelView, AMWalletRestrictedModelView, \
Role, User, Category, Proposal, Transaction, CreatedTransaction, AMWallet, AMDevice
# Create admin
admin = flask_admin.Admin(
app,
'ZAPD Admin',
base_template='my_master.html',
template_mode='bootstrap3',
)
# Add model views
admin.add_view(UserModelView(User, db.session, category='Admin'))
admin.add_view(RestrictedModelView(Role, db.session, category='Admin'))
admin.add_view(RestrictedModelView(Category, db.session, category='Admin'))
admin.add_view(AMWalletRestrictedModelView(AMWallet, db.session, name='App Metrics - Wallet', category='Admin'))
admin.add_view(ProposalModelView(Proposal, db.session))
admin.add_view(TransactionRestrictedModelView(Transaction, db.session, category='ZAPD'))
admin.add_view(RestrictedModelView(CreatedTransaction, db.session, category='ZAPD'))
# define a context processor for merging flask-admin's template context into the
# flask-security views.
@security.context_processor
def security_context_processor():
return dict(
admin_base_template=admin.base_template,
admin_view=admin.index_view,
h=admin_helpers,
get_url=url_for
)
|
[
"djpnewton@gmail.com"
] |
djpnewton@gmail.com
|
e58160be043c25f1567117706578c6627e844ccb
|
bf72636241a871d9a7519a577395f9d1fd7b38c2
|
/tools_box/_selling/doctype/daily_route_activity/daily_route_activity.py
|
cfad18b8a1834abd2997ab43008e8996ba9faa94
|
[
"MIT"
] |
permissive
|
Athenolabs/Tools-Box
|
fc6400d9d88cc8ba0a3d48e38a0918f0022ce914
|
c4e4e368a0bec115f84bc33ae011d7e0fd02932f
|
refs/heads/master
| 2021-01-23T10:58:36.243182
| 2017-05-30T13:44:04
| 2017-05-30T13:44:04
| 93,116,515
| 2
| 1
| null | 2017-06-02T01:58:32
| 2017-06-02T01:58:31
| null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DailyRouteActivity(Document):
pass
|
[
"masonarmani38@gmail.com"
] |
masonarmani38@gmail.com
|
15f753d76464d7abfd4fcf2a4b8dd8743d72fd97
|
462a30862d0303d1d1beeebb2d33bb2a625d5336
|
/catchpy/settings/local.py
|
995d6763bc4efc46baa39e79fbf3ac479732de8e
|
[] |
no_license
|
nmaekawa/catchpy
|
5eca9715c23e71ce4f6ef489607da0b0e46a14a3
|
50783648804e5b6ce57dcb7d00ba1038fd23ffdc
|
refs/heads/master
| 2023-08-03T09:25:44.838480
| 2023-04-18T19:05:20
| 2023-04-18T19:05:20
| 98,905,832
| 10
| 3
| null | 2023-08-14T18:47:50
| 2017-07-31T15:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 506
|
py
|
from .dev import *
DEBUG = True
# Django Extensions
# http://django-extensions.readthedocs.org/en/latest/
try:
import django_extensions
INSTALLED_APPS += ['django_extensions']
except ImportError:
pass
# Django Debug Toolbar
# http://django-debug-toolbar.readthedocs.org/en/latest/
try:
import debug_toolbar
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
DEBUG_TOOLBAR_PATCH_SETTINGS = True
except ImportError:
pass
|
[
"nmaekawa@g.harvard.edu"
] |
nmaekawa@g.harvard.edu
|
b8405ccbf1b037622cfb344604a81fcef9306518
|
1f5f8f95530003c6c66419519d78cb52d21f65c0
|
/projects/golem_gui/tests/users/create_user/add_project_permission.py
|
5ac16e37d8543470a90751eb0751b5bc624ee3b4
|
[] |
no_license
|
golemhq/golem-tests
|
c5d3ab04b1ea3755d8b812229feb60f513d039ac
|
dff8fd3a606c3d1ef8667aece6fddef8ac441230
|
refs/heads/master
| 2023-08-17T23:05:26.286718
| 2021-10-04T20:34:17
| 2021-10-04T20:34:17
| 105,579,436
| 4
| 1
| null | 2018-11-19T00:14:24
| 2017-10-02T20:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 513
|
py
|
from golem import actions
from projects.golem_gui.pages import common
from projects.golem_gui.pages.users import create_user
def setup(data):
common.access_golem(data.env.url, data.env.admin)
create_user.navigate_to_page()
def test(data):
project = 'project1'
permission = 'admin'
create_user.select_project(project)
create_user.select_permission(permission)
actions.click(create_user.add_permission_button)
create_user.assert_project_permission_in_table(project, permission)
|
[
"luciano@lucianorenzi.com"
] |
luciano@lucianorenzi.com
|
2e1b14b5791d705897342227ca9a919f4399bccf
|
73f1075c99338984795f4bd7bd7b9563ecc36d87
|
/Binary_Search/74.Search_a_2D_Matrix.py
|
033d554ced3b5db33c3e0b08155a2a7e62fb0138
|
[] |
no_license
|
grg909/LCtrip
|
314bd173f87ec98ff13234bdd148c76482db2df7
|
96836da905526b47f0cdee8c0bb4790c4cdd6c79
|
refs/heads/master
| 2020-09-12T13:38:52.486189
| 2020-08-28T14:22:50
| 2020-08-28T14:22:50
| 222,442,472
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
# -*- coding: UTF-8 -*-
# @Date : 2019/12/11
# @Author : WANG JINGE
# @Email : wang.j.au@m.titech.ac.jp
# @Language: python 3.7
"""
"""
# 思路1,把二维数组扁平化
class Solution:
def searchMatrix(self, matrix, target):
try:
n, m = len(matrix), len(matrix[0])
except:
return False
start, end = 0, n*m -1
while start + 1 < end:
mid = (start + end)//2
x, y = mid/m, mid%m
if matrix[x][y] > target:
end = mid
else:
start = mid
x, y = start/m, start%m
if matrix[x][y] == target:
return True
x, y = end / m, end % m
if matrix[x][y] == target:
return True
return False
|
[
"grg909@foxmail.com"
] |
grg909@foxmail.com
|
cc7b250a3c9f0394d2b4a95cc17b250ac8fc17f7
|
bd2a975f5f6cd771393f994ebd428e43142ee869
|
/new_render_data/input/p/script/abort/back20180419/CG/C4d/process/AnalyzeC4d.py
|
8701fce3cb9979a4512eb94493a2858b24657c12
|
[] |
no_license
|
sol87/Pycharm_python36
|
1a297c9432462fc0d3189a1dc7393fdce26cb501
|
fa7d53990040d888309a349cfa458a537b8d5f04
|
refs/heads/master
| 2023-03-16T10:35:55.697402
| 2018-11-08T09:52:14
| 2018-11-08T09:52:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,585
|
py
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
# Author: kaname
# QQ: 1394041054
""" C4d analyzer """
# RUN:
# 1. From C4Dloader.py to loading RBAnalzer.py to do it.
# 2. AnalyzeC4d.py loading C4Dloader.py to do it.
import os
import sys
import subprocess
import string
import logging
import time
import shutil
from C4d import C4d
from C4dLoader import C4dLoader
from C4dPluginManager import C4dPlugin, C4dPluginMgr
from CommonUtil import RBCommon as CLASS_COMMON_UTIL
class AnalyzeC4d(C4d):
def __init__(self, **paramDict):
C4d.__init__(self, **paramDict)
self.format_log('AnalyzeC4d.init', 'start')
self.G_TIPS_TXT_NODE=os.path.join(self.G_WORK_RENDER_TASK_CFG, 'tips.json').replace('\\','/')
for key, value in list(self.__dict__.items()):
self.G_DEBUG_LOG.info(key + '=' + str(value))
self.format_log('done','end')
def RB_MAP_DRIVE(self):#2.chongxie
#self.format_log('[映射盘符]','[start]'.decode('utf-8').encode('gbk'))
self.G_DEBUG_LOG.info('[c4d.RB_MAP_DRIVE.start.....]')
if self.G_RENDER_OS != '0':
#delete all mappings
CLASS_COMMON_UTIL.del_net_use()
CLASS_COMMON_UTIL.del_subst()
#net use
b_flag = False
if self.G_CG_NAME == 'C4d':
map_root = os.path.basename(self.G_INPUT_PROJECT_PATH)
print(map_root + '@KANADAmmmmmmm')
map_dict = os.path.join(self.G_INPUT_PROJECT_PATH)
print(map_root + '@KANADAnnnnnnn')
map_cmd = 'net use %s: "%s"' % (map_root, map_dict)
CLASS_COMMON_UTIL.cmd_python3(map_cmd,my_log=self.G_DEBUG_LOG)
# #base RB_MAP_DRIVE
# if self.G_CG_NAME != 'Max' and self.G_TASK_JSON_DICT['system_info'].has_key('mnt_map'):
# map_dict = self.G_TASK_JSON_DICT['system_info']['mnt_map']
# for key,value in map_dict.items():
# value = os.path.normpath(value)
# map_cmd = 'net use "%s" "%s"' % (key,value)
# CLASS_COMMON_UTIL.cmd_python3(map_cmd,my_log=self.G_DEBUG_LOG)
# if key.lower() == 'b:':
# b_flag = True
if not b_flag:
map_cmd_b = 'net use B: "%s"' % (os.path.normpath(self.G_PLUGIN_PATH))
CLASS_COMMON_UTIL.cmd(map_cmd_b,my_log=self.G_DEBUG_LOG,try_count=3)
self.G_DEBUG_LOG.info('[c4d.RB_MAP_DRIVE.end.....]')
self.format_log('done','end')
def RB_CONFIG(self):
self.G_DEBUG_LOG.info('[c4d.analyze.配置插件开始]')
self.G_DEBUG_LOG.info('[c4d.analyze.plugin.config.start......]')
self.plugin_config()
self.G_DEBUG_LOG.info('[c4d.analyze.配置插件完成]')
self.G_DEBUG_LOG.info('[c4d.analyze.plugin.config.end......]')
def RB_RENDER(self):
self.G_DEBUG_LOG.info('[c4d.RBanalyse.start.....]')
self.G_FEE_PARSER.set('render','start_time',str(int(time.time())))
cg_ver = self.G_CG_VERSION
task_id = self.G_TASK_ID
cg_file = self.G_INPUT_CG_FILE
task_json = self.G_TASK_JSON
asset_json = self.G_ASSET_JSON
tips_json = self.G_TIPS_TXT_NODE
c4d_loader = C4dLoader(cg_ver, task_id, cg_file, task_json, asset_json, tips_json)
c4d_loader.execute()
self.G_FEE_PARSER.set('render','end_time',str(int(time.time())))
self.G_DEBUG_LOG.info('[c4d.RBanalyse.end.....]')
|
[
"superdkk@gmail.com"
] |
superdkk@gmail.com
|
41363247c358198e8cecea4460b8076fd9c34398
|
01301e5f486883865e3696f38ef913a232958343
|
/antlir/compiler/test_images/print_ok.py
|
a38dabfa12fe5a4e77e3b8b8fd720897c11764b0
|
[
"MIT"
] |
permissive
|
SaurabhAgarwala/antlir
|
85fb09c87dafde56622b4107224b41f873f66442
|
d9513d35d3eaa9d28717a40057a14d099c6ec775
|
refs/heads/main
| 2023-06-25T09:05:30.619684
| 2021-07-01T23:04:57
| 2021-07-01T23:06:11
| 382,355,446
| 0
| 0
|
MIT
| 2021-07-02T13:30:39
| 2021-07-02T13:30:39
| null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'Prints the unicode string "ok" via the `print` function to `stdout`, on 1 line'
print("ok")
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
0f276a9b40c35cb921b2f49748656afb5c5442d9
|
0f0a7adfae45e07a896c5cd5648ae081d4ef7790
|
/python数据结构/慕课测试题/打印实心矩形.py
|
f31f5eb66436884a6fbfd6372e3042c933196836
|
[] |
no_license
|
renlei-great/git_window-
|
e2c578544c7a8bdd97a7a9da7be0464d6955186f
|
8bff20a18d7bbeeaf714aa49bf15ab706153cc28
|
refs/heads/master
| 2021-07-19T13:09:01.075494
| 2020-06-13T06:14:37
| 2020-06-13T06:14:37
| 227,722,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
args = input().split()
alist = [int(i) for i in args]
# min_number = min(alist)
min_number = alist[0]
for i in alist:
if min_number > i:
min_number = i
print(min_number)
|
[
"1415977534@qq.com"
] |
1415977534@qq.com
|
166d339829928c03eae087789acaafe7f5329a46
|
267f2c09420436e97275986f825045cbe81fd3ec
|
/buy & sell vinyl records 3.5.3.py
|
5215d6374e530fd31aa37d163087968486904c55
|
[] |
no_license
|
aiqbal-hhs/91906-7
|
f1ddc21846bee6dd9dcf4f75bdabe68989390769
|
8d6aadedff8c6585c204a256b5bd3ad8294a815f
|
refs/heads/main
| 2023-05-15T00:17:41.407536
| 2021-06-04T10:32:21
| 2021-06-04T10:32:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,680
|
py
|
from functools import partial
from tkinter import *
import random
root = Tk()
stock_list = ["Igor - Tyler The Creator",
"Good Kid Maad City - Kendrick Lamar",
"Demon Days - Gorillaz"]
class stock:
def __init__(self, name, amount):
self.name = name
self.amount
stock_list.append(self)
##########################################buy frame######################################################
#formatting variables....
background_color = "orange"
# converter Main Screen GUI...
buy_frame = Frame(width=360, bg=background_color)
buy_frame.grid()
# buy title (row 0)
buy_label = Label(buy_frame, text="Buy page",
font=("Arial", "16", "bold"),
bg=background_color,
padx=10, pady=5)
buy_label.grid(row=0, column=0)
# buy heading (label, row 1)
buy_heading = Label(buy_frame, text="Buy heading goes here",
font=("Arial", "12"),
bg=background_color,
padx=10, pady=5)
buy_heading.grid(row=1, column=0)
# buy heading (label, row 2)
buy_text = Label(buy_frame, text="this is where you buy vinyls",
font="Arial 9 italic", wrap=250, justify=LEFT,
bg=background_color,
padx=10, pady=10)
buy_text.grid(row=2, column=0)
#entry for amount of vinyls the user wants to buy
e = Entry(buy_frame, width=25)
e.insert(0,"")
e.grid(row=4, column=1)
myButton = Button(buy_frame, text="Enter", font=("Arial", "14"),
padx=10, pady=10, command=help)
myButton.grid(row=5, column=1)
#Creating the Dropdown Menu
chosen_option = StringVar()
option_menu = OptionMenu(buy_frame, chosen_option, stock_list[0], *stock_list)
option_menu.grid(row=1, column=1)
##########################################sell frame######################################################
#formatting variables....
sell_background_color = "blue"
# converter Main Screen GUI...
sell_frame = Frame(width=360, bg=sell_background_color)
sell_frame.grid()
# sell title (row 0)
sell_label = Label(sell_frame, text="Sell page",
font=("Arial", "16", "bold"),
bg=sell_background_color,
padx=10, pady=5)
sell_label.grid(row=0, column=0)
# sell heading (label, row 1)
sell_heading = Label(sell_frame, text="sell heading goes here",
font=("Arial", "12"),
bg=sell_background_color,
padx=10, pady=5)
sell_heading.grid(row=1, column=0)
# buy heading (label, row 2)
sell_text = Label(sell_frame, text="this is where you buy vinyls",
font="Arial 9 italic", wrap=250, justify=LEFT,
bg=sell_background_color,
padx=10, pady=10)
sell_text.grid(row=2, column=0)
#entry for amount of vinyls the user wants to buy
sell_e = Entry(sell_frame, width=25)
sell_e.insert(0,"")
sell_e.grid(row=4, column=1)
sell_Button = Button(sell_frame, text="Enter", font=("Arial", "14"),
padx=10, pady=10, command=help)
sell_Button.grid(row=5, column=1)
#Creating the Dropdown Menu
sell_chosen_option = StringVar()
sell_option_menu = OptionMenu(sell_frame, sell_chosen_option, stock_list[0], *stock_list)
sell_option_menu.grid(row=1, column=1)
##########################################stock frame############################
#main routine
if __name__ == "__main__":
root.title("Buy & Sell Vinyl Records")
root.mainloop()
|
[
"noreply@github.com"
] |
aiqbal-hhs.noreply@github.com
|
ef9dd66a281bd4a8cfff524ae8a983149449e1cd
|
ca17bd80ac1d02c711423ac4093330172002a513
|
/binary_tree_longest_consecutive_sequence/LongestSequence_better.py
|
3413f0a627ca955427b2a27755e726678c29a746
|
[] |
no_license
|
Omega094/lc_practice
|
64046dea8bbdaee99d767b70002a2b5b56313112
|
e61776bcfd5d93c663b247d71e00f1b298683714
|
refs/heads/master
| 2020-03-12T13:45:13.988645
| 2018-04-23T06:28:32
| 2018-04-23T06:28:32
| 130,649,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def helper(self, root):
if not root: return 0, 0
leftG, leftL = self.helper(root.left)
rightG, rightL = self.helper(root.right)
currentL = 1
if root.left and root.val + 1 == root.left.val :
currentL =max(currentL, leftL + 1)
if root.right and root.val + 1 == root.right.val :
currentL = max(currentL, rightL + 1)
currentG = max(currentL, leftG, rightG)
return currentG, currentL
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.longest = 0
if not root: return 0
return self.helper(root)[0]
|
[
"zhao_j1@denison.edu"
] |
zhao_j1@denison.edu
|
59dd09fa952c05fb2664214cd30c0473025458e0
|
43e53df2f2bc1779c2896541940a235e66a02b02
|
/day18/qq发送消息.py
|
ab63f8dfee03cb49856868ecbdb35ef1e150b795
|
[] |
no_license
|
songdanlee/python_code_basic
|
ddb3276b0473a261423c43d5d8e7a1ff038d5c51
|
f32cd4dc9670e55ffa6abe04c9184bfa5d8bbc41
|
refs/heads/master
| 2020-07-14T21:05:30.471486
| 2019-08-30T14:55:51
| 2019-08-30T14:55:51
| 205,402,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
import os
Name = input('Name of the Receiver: ')
Name = '穆梓'
clientDict = {'lz':'513278236',
'穆梓':'318750798'
} # 人名 和对应的 qq号
os.system('start tencent://message/?uin=' + clientDict[Name])
|
[
"2533636371@qq.com"
] |
2533636371@qq.com
|
bf883990f5d5a2a677f673e28b5c4877284c147d
|
fde186bd141ed055ba8ab915b2ad25355f8f3fb6
|
/ABC/070/py/A.py
|
66689bce17b2f299f4639476d5684fcfd9e35d34
|
[] |
no_license
|
Tsukumo3/Atcoder
|
259ea6487ad25ba2d4bf96d3e1cf9be4a427d24e
|
5f8d5cf4c0edee5f54b8e78bc14a62e23cab69cb
|
refs/heads/master
| 2020-12-20T05:04:39.222657
| 2020-10-17T01:39:04
| 2020-10-17T01:39:04
| 235,969,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
'''
ABC070 A - Palindromic Number
https://atcoder.jp/contests/abc070/tasks/abc070_a
'''
n = input()
if n[0] == n[2]:
ans = 'Yes'
else:
ans = 'No'
print(ans)
|
[
"53821328+Tsukumo3@users.noreply.github.com"
] |
53821328+Tsukumo3@users.noreply.github.com
|
b9b123916eba2a46e552b8cb0e286f5b55b8e3e2
|
e6f2d7e407d2b516152094d0834e78603c9eb60b
|
/wen_python_16/pic_1.py
|
6be48cde753d4cc2948ea9632e02d8c0580a5dbd
|
[] |
no_license
|
pylinx64/wen_python_16
|
5d63a44d2cbc8380e57b9f3c6887ab91578ec6cb
|
c9e2f9083f848d502bce2e0cf049ccba2677e981
|
refs/heads/main
| 2023-04-18T04:43:32.601474
| 2021-05-05T10:07:30
| 2021-05-05T10:07:30
| 336,603,250
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
import turtle
import time
t = turtle.Pen()
colors = ['lime', '#C35A62', '#9CC35A', '#5AC3B7', '#C35AB8']
turtle.bgcolor('black')
t.pencolor(colors[2])
t.circle(100)
t.left(320)
t.forward(200)
t.circle(100)
time.sleep(50)
|
[
"noreply@github.com"
] |
pylinx64.noreply@github.com
|
a2c75d7e2b2e0d54e1631a4ce6785d7266097d6e
|
9cd180fc7594eb018c41f0bf0b54548741fd33ba
|
/sdk/python/pulumi_azure_nextgen/network/v20170901/zone.py
|
11bfa1976eb4864afd69e9171e7f0790cc681bd9
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
MisinformedDNA/pulumi-azure-nextgen
|
c71971359450d03f13a53645171f621e200fe82d
|
f0022686b655c2b0744a9f47915aadaa183eed3b
|
refs/heads/master
| 2022-12-17T22:27:37.916546
| 2020-09-28T16:03:59
| 2020-09-28T16:03:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,856
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Zone']
class Zone(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Describes a DNS zone.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: The etag of the zone.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] zone_name: The name of the DNS zone (without a terminating dot).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['etag'] = etag
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
if zone_name is None:
raise TypeError("Missing required property 'zone_name'")
__props__['zone_name'] = zone_name
__props__['max_number_of_record_sets'] = None
__props__['name'] = None
__props__['name_servers'] = None
__props__['number_of_record_sets'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20150504preview:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20160401:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20180301preview:Zone"), pulumi.Alias(type_="azure-nextgen:network/v20180501:Zone")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Zone, __self__).__init__(
'azure-nextgen:network/v20170901:Zone',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Zone':
"""
Get an existing Zone resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Zone(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
The etag of the zone.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxNumberOfRecordSets")
def max_number_of_record_sets(self) -> pulumi.Output[int]:
"""
The maximum number of record sets that can be created in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "max_number_of_record_sets")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameServers")
def name_servers(self) -> pulumi.Output[Sequence[str]]:
"""
The name servers for this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "name_servers")
@property
@pulumi.getter(name="numberOfRecordSets")
def number_of_record_sets(self) -> pulumi.Output[int]:
"""
The current number of record sets in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "number_of_record_sets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
d5f2b424d4ed273d886ee3533b078836331a62e5
|
97eac4a05c77e1b6898b84c9606afa13428e45df
|
/Important_Functions/fib.py
|
84c34f782b19848ecb61c528a94af491a974b47a
|
[] |
no_license
|
ryanmcg86/Euler_Answers
|
8f71b93ea15fceeeeb6b661d7401e40b760a38e6
|
28374025448b16aab9ed1dd801aafc3d602f7da8
|
refs/heads/master
| 2022-08-11T13:31:11.038918
| 2022-07-28T00:35:11
| 2022-07-28T00:35:11
| 190,278,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
'''This is a O(log n) implementation of a function that retreives the nth number in Fibonacci's sequence.'''
fibs = {0: 0, 1: 1}
def fib(n):
if n in fibs: return fibs[n]
if n % 2 == 0:
fibs[n] = ((2 * fib((n / 2) - 1)) + fib(n / 2)) * fib(n / 2)
else:
fibs[n] = fib((n - 1) / 2)**2 + fib((n + 1) / 2)**2
return fibs[n]
|
[
"noreply@github.com"
] |
ryanmcg86.noreply@github.com
|
e258038aad904c2a62e39e78d3c0d2cf97592f7e
|
7714d7fe86c99c059e339e895e265658fa3ce36e
|
/backend/home/migrations/0005_auto_20200807_0839.py
|
aa38d5dae63fac410eabc371a886dabc919134b3
|
[] |
no_license
|
crowdbotics-apps/mobile-7-aug-dev-8582
|
f9454c8a9b3ca34e0b7dce328554658fd3fe02e9
|
f569d0a9ae3effb99d6ee00127f87015296a4993
|
refs/heads/master
| 2023-07-11T13:56:39.164407
| 2020-08-07T09:01:31
| 2020-08-07T09:01:31
| 285,739,310
| 0
| 0
| null | 2021-08-03T20:03:29
| 2020-08-07T04:46:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 551
|
py
|
# Generated by Django 2.2.15 on 2020-08-07 08:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0004_hjkhgkjhkjhkj'),
]
operations = [
migrations.RemoveField(
model_name='customtext',
name='hgfhgfhgf',
),
migrations.RemoveField(
model_name='customtext',
name='hjgjhgjhghjg',
),
migrations.RemoveField(
model_name='customtext',
name='kjhkjhkjh',
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.