blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3dbdb608cd2de3f1278d8f0339287fd5ce40c676
|
fcc88521f63a3c22c81a9242ae3b203f2ea888fd
|
/Python3/0844-Backspace-String-Compare/soln-1.py
|
cf9b6afca02d5b7deeaed1a8aa8d927a70cbd4e0
|
[
"MIT"
] |
permissive
|
wyaadarsh/LeetCode-Solutions
|
b5963e3427aa547d485d3a2cb24e6cedc72804fd
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
refs/heads/master
| 2022-12-06T15:50:37.930987
| 2020-08-30T15:49:27
| 2020-08-30T15:49:27
| 291,811,790
| 0
| 1
|
MIT
| 2020-08-31T19:57:35
| 2020-08-31T19:57:34
| null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
class Solution:
def backspaceCompare(self, S, T):
"""
:type S: str
:type T: str
:rtype: bool
"""
i, j = len(S) - 1, len(T) - 1
bs, bt = 0, 0
while True:
while i >= 0 and (bs or S[i] == '#'):
bs = bs + 1 if S[i] == '#' else bs - 1
i -= 1
while j >= 0 and (bt or T[j] == '#'):
bt = bt + 1 if T[j] == '#' else bt - 1
j -= 1
if not(i >= 0 and j >= 0 and S[i] == T[j]):
return i == j == -1
i, j = i - 1, j - 1
|
[
"zhang623@wisc.edu"
] |
zhang623@wisc.edu
|
9dea79ebe2acef41d229a77657e6b1cf232caf43
|
5215715a4cbcf9ce065b1542db224a2b1997c760
|
/T3/t3.py
|
5523731288ed9eb2d6e7a4f542b5a35d71a18b89
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
Cgipson06/reddit
|
02deac29ead779890e42d48400d2233ce888e5a0
|
deb1da398840bbd311a79eec25ef2a8b5a8ed5b1
|
refs/heads/master
| 2021-01-08T23:19:55.245559
| 2014-12-28T20:52:26
| 2014-12-28T20:52:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,034
|
py
|
#/u/GoldenSights
import praw
import time
import sqlite3
import datetime
import random
USERAGENT = """
/u/GoldenSights T3 data collection: Gathering Submission data for
statistical analysis.
More info at https://github.com/voussoir/reddit/tree/master/T3
"""
r = praw.Reddit(USERAGENT)
print('Connected to reddit.')
sql = sqlite3.connect('D:/T3/t3.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS meta(label TEXT, data TEXT)')
cur.execute(('CREATE TABLE IF NOT EXISTS posts(idint INT, idstr TEXT, '
'created INT, self INT, nsfw INT, author TEXT, title TEXT, '
'url TEXT, selftext TEXT, score INT, subreddit TEXT, distinguish INT, '
'textlen INT)'))
DISTINGUISHMAP = {0:"user", 1:"moderator", 2:"admin"}
DISTINGUISHMAP_R = {"user":0, "moderator":1, "admin":2}
LOWERBOUND = 9999000
# 5yba0
UPPERBOUND = 164790958
# 2q41im
# 1,679,616 = 10000
# 9,999,000 = 5yba0
# 60,466,176 = 100000
# 120,932,352 = 200000
# 164,790,958 = 2q41im
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
class Post:
''' Used to map the indices of DB entries to names '''
def __init__(self, data):
self.idint = data[0]
self.idstr = data[1]
self.created_utc = data[2]
self.is_self = True if data[3] == 1 else False
self.over_18 = True if data[4] == 1 else False
self.author = data[5]
self.title = data[6]
self.url = data[7]
self.selftext = data[8]
self.score = data[9]
self.subreddit = data[10]
self.distinguished = DISTINGUISHMAP[data[11]]
self.textlen = data[12]
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def human(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def process(itemid, log=True, kill=True):
if isinstance(itemid, str):
itemid = [itemid]
if isinstance(itemid, list):
if isinstance(itemid[0], str):
itemid = verify_t3(itemid)
try:
itemid = remove_existing(itemid)
temp = itemid[:]
except Exception:
return
itemid = r.get_info(thing_id=itemid)
try:
len(itemid)
except:
print(temp, "DEAD")
if kill:
logdead(temp[0])
process(temp, kill=kill)
return
for index in range(len(itemid)):
item = itemid[index]
item.idint = b36(item.id)
item.idstr = item.id
if item.distinguished is None:
item.distinguished = 0
else:
item.distinguished = DISTINGUISHMAP_R[item.distinguished]
item.url = "self" if item.is_self else item.url
item.created_utc = int(item.created_utc)
item.is_self = 1 if item.is_self else 0
item.over_18 = 1 if item.over_18 else 0
item.sub = item.subreddit.display_name
item.textlen = len(item.selftext)
try:
item.auth = item.author.name
except AttributeError:
item.auth = "[deleted]"
item = [item.idint, item.idstr, item.created_utc,
item.is_self, item.over_18, item.auth, item.title,
item.url, item.selftext, item.score, item.sub,
item.distinguished, item.textlen]
itemid[index] = item
if log:
logdb(itemid)
else:
return itemid
if len(itemid) < len(temp):
process(temp)
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
def logdb(items):
for item in items:
cur.execute('INSERT INTO posts VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', item)
sql.commit()
def logdead(i):
#If an ID is dead, let's at least add it to the db.
i = i.replace('t3_', '')
data = [b36(i), i, 0, 0, 0, '?', '?', '?', '?', 0, '?', 0, 0]
logdb([data])
def verify_t3(items):
for index in range(len(items)):
i = items[index]
if 't3_' not in i:
items[index] = 't3_' + i
return items
def remove_existing(items):
done = False
items = verify_t3(items)
while not done:
done = True
for item in items:
cur.execute('SELECT * FROM posts WHERE idint=?', [b36(item[3:])])
f = cur.fetchone()
if f:
items.remove(item)
done = False
break
if len(items) == 0:
raise Exception("Nothing new")
return items
def processrange(lower, upper, kill=True):
if isinstance(lower, str):
lower = b36(lower)
if isinstance(upper, int):
upper = lower + upper
if isinstance(upper, str):
upper = b36(upper)
if upper <= lower:
print("Upper must be higher than lower")
return
ids = [b36(x) for x in range(lower, upper)]
while len(ids) > 0:
p = ids[:100]
print("%s >>> %s (%d)" % (p[0], p[-1], len(ids)))
ids = ids[100:]
process(p, kill=kill)
def lastitem():
cur.execute('SELECT * FROM posts ORDER BY idint DESC LIMIT 1')
return cur.fetchone()[1]
def show():
filea = open('show/missing.txt', 'w')
fileb = open('show/stats.txt', 'w')
cur.execute('SELECT Count(*) FROM posts')
count = cur.fetchone()
count = count[0]
counts = '{0:,}'.format(count)
mainstats = '%s posts collected; ' % counts
print('Current total:', counts)
print('Counting dead posts')
cur.execute('SELECT * FROM posts WHERE created=0')
dead = cur.fetchall()
dead = [x[1] for x in dead]
deadcount = len(dead)
deadcount = '{0:,}'.format(deadcount)
mainstats += '%s dead.\n' % deadcount
for deaditem in dead:
print(deaditem, file=filea)
filea.close()
print('Counting selfposts')
cur.execute('SELECT * FROM posts WHERE self=1')
self = cur.fetchall()
self = len(self)
link = count-self
selfs = '{0:,}'.format(self)
links = '{0:,}'.format(link)
selfstats = '%s linkposts; %s selfposts\n' % (links, selfs)
readmefile = open('README.md', 'r')
readmelines = readmefile.readlines()
readmefile.close()
readmelines[3] = mainstats
readmelines[4] = selfstats
readmefile = open('README.md', 'w')
readmefile.write(''.join(readmelines))
readmefile.close()
#STATS TIME
print('Writing subreddit stats')
cur.execute('SELECT * FROM posts')
subredditcounts = {}
while True:
fetch = cur.fetchone()
if fetch:
fetch = Post(fetch)
try:
subredditcounts[fetch.subreddit] += 1
except KeyError:
subredditcounts[fetch.subreddit] = 1
else:
break
subkeys = list(subredditcounts.keys())
subkeys.sort(key=subredditcounts.get, reverse=True)
for key in subkeys:
out = key
out += '.'*(25-len(key))
num = '{0:,}'.format(subredditcounts[key])
out += '.'*(14-len(num))
out += num
print(out, file=fileb)
fileb.close()
|
[
"edalool@yahoo.com"
] |
edalool@yahoo.com
|
27bb5b5457ce8249495d9fcc5263dd01e827aed6
|
e7d2c2c7fbcffc3b4e8976f01b354f794fc3b71d
|
/bmga/utils/formatting.py
|
6ab11a6dfcbe02e87b651ba6371989e3ca2c1403
|
[
"BSD-3-Clause"
] |
permissive
|
vituocgia/boxme-api
|
41da50fcec12089e59a29786b3bcff6c9b169d99
|
10c8054a223f124a85e70669d17313e3a2991226
|
refs/heads/master
| 2020-03-08T19:18:21.829490
| 2018-04-06T09:11:30
| 2018-04-06T09:11:30
| 128,347,542
| 0
| 0
| null | 2018-04-27T04:43:38
| 2018-04-06T05:24:05
|
Python
|
UTF-8
|
Python
| false
| false
| 822
|
py
|
from __future__ import unicode_literals
from django.utils import dateformat
from bmga.utils.timezone import make_naive, aware_datetime
from dateutil.parser import parse as mk_datetime # flake8: noqa
def format_datetime(dt):
"""
RFC 2822 datetime formatter
"""
return dateformat.format(make_naive(dt), 'r')
def format_date(d):
"""
RFC 2822 date formatter
"""
# workaround because Django's dateformat utility requires a datetime
# object (not just date)
dt = aware_datetime(d.year, d.month, d.day, 0, 0, 0)
return dateformat.format(dt, 'j M Y')
def format_time(t):
"""
RFC 2822 time formatter
"""
# again, workaround dateformat input requirement
dt = aware_datetime(2000, 1, 1, t.hour, t.minute, t.second)
return dateformat.format(dt, 'H:i:s O')
|
[
"dotiendiep@gmail.com"
] |
dotiendiep@gmail.com
|
d62cb9ed15fdf25fbcf76191f5229784b9ee13e5
|
cf3ef8f3eca858bd3c64ba6159a2ba7cdb1722ad
|
/studygroups/views/organizer.py
|
06e748fc35dd2c27b9b26843e6b0676e4ae6d0d6
|
[] |
no_license
|
alvarmaciel/learning-circles
|
2ff956dcbe0b5a42f64036c33613644115063a8d
|
3ac444fd6f5a81f655face733e7d41786e085cd4
|
refs/heads/master
| 2021-01-11T00:45:04.513019
| 2016-10-05T14:13:16
| 2016-10-05T14:13:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,094
|
py
|
import datetime
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.core.mail import EmailMultiAlternatives, send_mail
from django.contrib import messages
from django.conf import settings
from django import http
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.generic.base import View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic import ListView
from studygroups.models import Course
from studygroups.models import StudyGroup
from studygroups.models import TeamMembership
from studygroups.models import Facilitator
from studygroups.models import StudyGroupMeeting
from studygroups.models import report_data
from studygroups.models import generate_all_meetings
from studygroups.models import get_team_users
from studygroups.models import get_user_team
from studygroups.forms import StudyGroupForm
from studygroups.forms import FacilitatorForm
from studygroups.decorators import user_is_organizer
@user_is_organizer
def organize(request):
today = datetime.datetime.now().date()
two_weeks_ago = today - datetime.timedelta(weeks=2, days=today.weekday())
two_weeks = today - datetime.timedelta(days=today.weekday()) + datetime.timedelta(weeks=3)
study_groups = StudyGroup.objects.active()
facilitators = Facilitator.objects.all()
courses = []# TODO Remove courses until we implement course selection for teams
team = None
if not request.user.is_staff:
team = get_user_team(request.user)
team_users = get_team_users(request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
facilitators = facilitators.filter(user__in=team_users)
active_study_groups = study_groups.filter(
id__in=StudyGroupMeeting.objects.active().filter(meeting_date__gte=two_weeks_ago).values('study_group')
)
meetings = StudyGroupMeeting.objects.active()\
.filter(study_group__in=study_groups, meeting_date__gte=two_weeks_ago)\
.exclude(meeting_date__gte=two_weeks)
context = {
'team': team,
'courses': courses,
'meetings': meetings,
'study_groups': study_groups,
'active_study_groups': active_study_groups,
'facilitators': facilitators,
'today': timezone.now(),
}
return render_to_response('studygroups/organize.html', context, context_instance=RequestContext(request))
class StudyGroupList(ListView):
model = StudyGroup
def get_queryset(self):
study_groups = StudyGroup.objects.active()
if not self.request.user.is_staff:
team_users = get_team_users(self.request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
return study_groups
class StudyGroupMeetingList(ListView):
model = StudyGroupMeeting
def get_queryset(self):
study_groups = StudyGroup.objects.active()
if not self.request.user.is_staff:
team_users = get_team_users(self.request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
meetings = StudyGroupMeeting.objects.active().filter(study_group__in=study_groups)
return meetings
class TeamMembershipDelete(DeleteView):
model = TeamMembership
success_url = reverse_lazy('studygroups_organize')
template_name = 'studygroups/confirm_delete_membership.html'
def get_object(self, queryset=None):
if queryset == None:
queryset = TeamMembership.objects
return queryset.get(user_id=self.kwargs.get('user_id'), team_id=self.kwargs.get('team_id'))
class CourseUpdate(UpdateView):
model = Course
fields = [
'title',
'provider',
'link',
'start_date',
'duration',
'prerequisite',
'time_required',
'caption',
]
success_url = reverse_lazy('studygroups_organize')
class CourseDelete(DeleteView):
model = Course
success_url = reverse_lazy('studygroups_organize')
template_name = 'studygroups/confirm_delete.html'
class StudyGroupCreate(CreateView):
model = StudyGroup
form_class = StudyGroupForm
success_url = reverse_lazy('studygroups_organize')
def form_valid(self, form):
self.object = form.save()
generate_all_meetings(self.object)
return http.HttpResponseRedirect(self.get_success_url())
@user_is_organizer
def report(request):
# TODO - remove this view
study_groups = StudyGroup.objects.active()
for study_group in study_groups:
study_group.laptop_stats = {}
context = {
'study_groups': study_groups,
}
return render_to_response('studygroups/report.html', context, context_instance=RequestContext(request))
@user_is_organizer
def weekly_report(request, year=None, month=None, day=None ):
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
if month and day and year:
today = today.replace(year=int(year), month=int(month), day=int(day))
start_time = today - datetime.timedelta(days=today.weekday())
end_time = start_time + datetime.timedelta(days=7)
context = {
'start_time': start_time,
'end_time': end_time,
}
# get team for current user
team = None
membership = TeamMembership.objects.filter(user=request.user, role=TeamMembership.ORGANIZER).first()
if membership:
team = membership.team
context.update(report_data(start_time, end_time, team))
return render_to_response('studygroups/weekly-update.html', context, context_instance=RequestContext(request))
|
[
"dirkcuys@gmail.com"
] |
dirkcuys@gmail.com
|
03fbeb1450ccc44bd26fc126ce64cfd378980fa0
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_117/1246.py
|
70e284873ba26f9f9d0eb663271fb9c4b2097cdc
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
__author__ = 'joranvar'
__problem__ = 'B'
class Field(object):
def __init__(self, data, height, width):
self.data = data
self.height = height
self.width = width
def is_cuttable(self):
max_x = [max([self.data[y][x] for x in range(self.width)]) for y in range(self.height)]
max_y = [max([self.data[y][x] for y in range(self.height)]) for x in range(self.width)]
for x in range(self.width):
for y in range(self.height):
if self.data[y][x] < min(max_x[y], max_y[x]):
return False
return True
def read_field(f_in, width, height):
field_data = [[int(square) for square in f_in.readline().split()] for line in range(height)]
field = Field(field_data, height, width)
return field
def solve(case, f_in):
N, M = list(map(int, f_in.readline().split()))
field = read_field(f_in, M, N)
if field.is_cuttable(): return ['Case #{}: YES\n'.format(case + 1)]
return ['Case #{}: NO\n'.format(case + 1)]
def open_last_file():
for problem_type in ['-large', '-small-attempt1', '-sample']:
try:
return problem_type, open(__problem__ + problem_type + '.in', 'r')
except FileNotFoundError:
pass
raise FileNotFoundError("No input file found!")
if __name__ == '__main__':
problem_type, f_in = open_last_file()
print (problem_type)
f_out = open(__problem__ + problem_type + '.out', 'w')
T = int(f_in.readline())
for case in range(T):
f_out.writelines(solve(case, f_in))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
7d0942492c486ab43f4c39a5adee4453c034f50e
|
c1fe97208afe479b7ae1ee67d69866a6911564ca
|
/AdvCBV/basicapp/admin.py
|
046528c9de692ae1d3f199606430ad8437e9c4a1
|
[] |
no_license
|
jaindhairyahere/Python_Django
|
a0a46c57b6ca60d0942ae181fe28ea56bb1ee948
|
f170a2e38b78df698a02821a454a3baea0c358a6
|
refs/heads/master
| 2020-06-18T09:17:56.364928
| 2019-11-02T18:34:12
| 2019-11-02T18:34:12
| 196,249,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django.contrib import admin
from basicapp.models import School, Student
# Register your models here.
admin.site.register(Student)
admin.site.register(School)
|
[
"jaindhairya2001@gmail.com"
] |
jaindhairya2001@gmail.com
|
151fc23e1533e76eb12ce1b8bb1392755667dbab
|
7f54637e347e5773dfbfded7b46b58b50544cfe5
|
/7-3/chainxy/spiders/tradesecretscanada.py
|
dc8b30b2b1997267ec5b41a42628814c788f3cc0
|
[] |
no_license
|
simba999/all-scrapy
|
5cc26fd92b1d03366b74d4fff58c4a0641c85609
|
d48aeb3c00fa2474153fbc8d131cf58402976e1d
|
refs/heads/master
| 2021-01-25T14:24:04.715550
| 2018-03-03T13:43:13
| 2018-03-03T13:43:13
| 123,695,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,369
|
py
|
import scrapy
import json
import os
from scrapy.spiders import Spider
from scrapy.http import FormRequest
from scrapy.http import Request
from chainxy.items import ChainItem
from lxml import etree
from selenium import webdriver
from lxml import html
import usaddress
import pdb
class tradesecretscanada(scrapy.Spider):
name = 'tradesecretscanada'
domain = ''
history = []
def start_requests(self):
init_url = 'http://talk.tradesecrets.ca/locations-reviews/'
yield scrapy.Request(url=init_url, callback=self.body)
def body(self, response):
print("========= Checking.......")
store_list = response.xpath('//a[@rel="noopener noreferrer"]/@href').extract()
for store in store_list:
yield scrapy.Request(url=store, callback=self.parse_page)
def parse_page(self, response):
try:
item = ChainItem()
detail = self.eliminate_space(response.xpath('//div[contains(@class, "fusion-one-half fusion-layout-column fusion-spacing-no")]//h4//text()').extract())
h_temp = ''
for de in detail:
if '(' in de and '-' in de:
try:
item['phone_number'] = self.validate('(' + de.split('(')[1])
except:
item['phone_number'] = self.validate(de)
if ':' in de:
h_temp += de + ', '
if '(' in detail[0]:
detail[0] = self.validate(detail[0].split('(')[0]).replace('|','')
addr = detail[0].replace('|','').split(',')
if len(addr) == 4:
item['address'] = self.validate(addr[1])
item['city'] = self.validate(addr[2])
item['state'] = self.validate(addr[3].strip())[:2].strip()
item['zip_code'] = self.validate(addr[3])[2:].strip()
elif len(addr) == 3:
item['address'] = self.validate(addr[0])
item['city'] = self.validate(addr[1])
item['state'] = self.validate(addr[2].strip())[:2].strip()
item['zip_code'] = self.validate(addr[2])[2:].strip()
else:
pdb.set_trace()
item['country'] = 'Canada'
item['store_hours'] = h_temp[:-2]
yield item
except:
pass
def validate(self, item):
try:
return item.encode('raw-unicode-escape').replace('\u2013', '').replace('\xa0', '').replace('|','').strip()
except:
return ''
def eliminate_space(self, items):
tmp = []
for item in items:
if self.validate(item) != '' and 'try' not in self.validate(item).lower() and 'http' not in self.validate(item).lower():
tmp.append(self.validate(item))
return tmp
|
[
"oliverking8985@yahoo.com"
] |
oliverking8985@yahoo.com
|
d4a5a2155aa71f6f81e1301fb6dea5d302b0742f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_greens.py
|
ce3b23baa678edd94ee5bf830fa189133e5ffadb
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
from xai.brain.wordbase.nouns._green import _GREEN
#calss header
class _GREENS(_GREEN, ):
def __init__(self,):
_GREEN.__init__(self)
self.name = "GREENS"
self.specie = 'nouns'
self.basic = "green"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
2370e7452bcc9e77a37e5853184a510e1184341d
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingMedian_NoCycle_LSTM.py
|
52f0b5d941ab48694348dbd8ae8a86fd89845917
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingMedian'] , ['NoCycle'] , ['LSTM'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
a33ea344425501fccf20a8502fc44380fce73c76
|
ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31
|
/dino-master/dino/rest/resources/send.py
|
a63978198fcff73e1c60cefb3ad6386d3ea9a807
|
[
"Apache-2.0"
] |
permissive
|
babiato/flaskapp1
|
84de2d0b26a54f5820d3bbe97926782ad41e005c
|
530beb9e3b8516e0e93960b99521c23a523ef546
|
refs/heads/master
| 2023-02-26T16:36:49.760632
| 2021-02-04T09:08:40
| 2021-02-04T09:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
import logging
import traceback
import eventlet
import sys
from dino import environ
from dino import utils
from dino.utils.decorators import timeit
from dino.db.manager import UserManager
from dino.rest.resources.base import BaseResource
from flask import request
logger = logging.getLogger(__name__)
def fail(error_message):
return {
'status': 'FAIL',
'message': error_message
}
class SendResource(BaseResource):
def __init__(self):
super(SendResource, self).__init__()
self.user_manager = UserManager(environ.env)
self.request = request
def async_post(self, json):
logger.debug('POST request: %s' % str(json))
if 'content' not in json:
raise RuntimeError('no key [content] in json message')
msg_content = json.get('content')
if msg_content is None or len(msg_content.strip()) == 0:
raise RuntimeError('content may not be blank')
if not utils.is_base64(msg_content):
raise RuntimeError('content in json message must be base64')
user_id = str(json.get('user_id', 0))
user_name = utils.b64d(json.get('user_name', utils.b64e('admin')))
object_type = json.get('object_type')
target_id = str(json.get('target_id'))
namespace = json.get('namespace', '/ws')
target_name = json.get('target_name')
data = utils.activity_for_message(user_id, user_name)
data['target'] = {
'objectType': object_type,
'id': target_id,
'displayName': target_name,
'url': namespace
}
data['object'] = {
'content': msg_content
}
if not environ.env.cache.user_is_in_multicast(target_id):
logger.info('user {} is offline, dropping message: {}'.format(target_id, str(json)))
return
try:
environ.env.out_of_scope_emit('message', data, room=target_id, json=True, namespace='/ws', broadcast=True)
except Exception as e:
logger.error('could not /send message to target {}: {}'.format(target_id, str(e)))
logger.exception(traceback.format_exc())
environ.env.capture_exception(sys.exc_info())
@timeit(logger, 'on_rest_send')
def do_post(self):
is_valid, msg, json = self.validate_json(self.request, silent=False)
if not is_valid:
logger.error('invalid json: %s' % msg)
raise RuntimeError('invalid json')
if json is None:
raise RuntimeError('no json in request')
if not isinstance(json, dict):
raise RuntimeError('need a dict')
eventlet.spawn_n(self.async_post, dict(json))
|
[
"jinxufang@tencent.com"
] |
jinxufang@tencent.com
|
c797e1ec5b3e5955a867418fed9a26431bd4212c
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/cloud/redis/v1/redis-v1-py/google/cloud/redis_v1/services/cloud_redis/pagers.py
|
ea1c2287e22e2c73eb752e030a4919c860621449
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,709
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.redis_v1.types import cloud_redis
class ListInstancesPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.redis_v1.types.ListInstancesResponse` object, and
provides an ``__iter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.redis_v1.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., cloud_redis.ListInstancesResponse],
request: cloud_redis.ListInstancesRequest,
response: cloud_redis.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.redis_v1.types.ListInstancesRequest):
The initial request object.
response (google.cloud.redis_v1.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = cloud_redis.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[cloud_redis.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[cloud_redis.Instance]:
for page in self.pages:
yield from page.instances
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListInstancesAsyncPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.redis_v1.types.ListInstancesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.redis_v1.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[cloud_redis.ListInstancesResponse]],
request: cloud_redis.ListInstancesRequest,
response: cloud_redis.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.redis_v1.types.ListInstancesRequest):
The initial request object.
response (google.cloud.redis_v1.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = cloud_redis.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[cloud_redis.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[cloud_redis.Instance]:
async def async_generator():
async for page in self.pages:
for response in page.instances:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
4a8ba21250def0e771eb0d8cfee9b9e5f35ef4b8
|
e87415a8507341d66991411c91e77ad38cda3df9
|
/templated_email/block_render.py
|
9ddf822ec66358a729ae9b8e2ad0a806ddf76d91
|
[
"MIT"
] |
permissive
|
somair/django-templated-email
|
6185abf24031a9813fc8b9d53faa8433f7bda0a6
|
b217a3e38d7af8b514d8f83568c1fd55efd1ac11
|
refs/heads/master
| 2021-01-19T14:13:10.500289
| 2017-01-13T13:06:06
| 2017-01-13T13:06:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,748
|
py
|
from django.template.loader_tags import BlockNode, ExtendsNode
from django.template import loader, Context, RequestContext
def _get_template(template):
if isinstance(template, (tuple, list)):
return loader.select_template(template)
return loader.get_template(template)
class BlockNotFound(Exception):
"""The requested block did not exist."""
pass
def render_template_block(template, block, context):
"""
Renders a single block from a template.
This template should have previously been rendered.
"""
template._render(context)
return _render_template_block_nodelist(template.nodelist, block, context)
def _render_template_block_nodelist(nodelist, block, context):
for node in nodelist:
if isinstance(node, BlockNode) and node.name == block:
return node.render(context)
for key in ('nodelist', 'nodelist_true', 'nodelist_false'):
if hasattr(node, key):
try:
rendered = _render_template_block_nodelist(
getattr(node, key), block, context)
except:
pass
else:
return rendered
for node in nodelist:
if isinstance(node, ExtendsNode):
try:
rendered = render_template_block(
node.get_parent(context), block, context)
except BlockNotFound:
pass
else:
return rendered
raise BlockNotFound
def render_block_to_string(template_name, block, dictionary=None,
context_instance=None):
"""Return a string
Loads the given template_name and renders the given block with the
given dictionary as context.
"""
dictionary = dictionary or {}
t = _get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
return render_template_block(t, block, context_instance)
def direct_block_to_template(request, template, block, extra_context=None,
mimetype=None, **kwargs):
"""
Render a given block in a given template with any extra URL
parameters in the context as ``{{ params }}``.
"""
if extra_context is None:
extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
c = RequestContext(request, dictionary)
t = _get_template(template)
t.render(c)
return HttpResponse(render_template_block(t, block, c), mimetype=mimetype)
|
[
"alex.hayes@roi.com.au"
] |
alex.hayes@roi.com.au
|
d4ef7df593f1fbf7027fa866174ceb80592f6f0c
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/battle_control/controllers/quest_progress/__init__.py
|
f9b0128616646671d06aafd2df3f29f0785e39a0
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_control/controllers/quest_progress/__init__.py
pass
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
aa6a81ca2a68d3bbe0fcd037c5db7068f2adb766
|
dd44e145ac547209f5f209bc9b1f09189bb8b5c7
|
/Python-Advanced-2021/03.Multidimensional-lists-L/02.Sum-of-matrix-columns.py
|
719862b6c9646cc99f509dcebd28edadbfe2e5d6
|
[] |
no_license
|
todorovventsi/Software-Engineering
|
e3c1be8f0f72c85619518bb914d2a4dbaac270f8
|
64ffa6c80b190e7c6f340aaf219986f769f175ab
|
refs/heads/master
| 2023-07-09T05:35:14.522958
| 2021-08-15T14:35:55
| 2021-08-15T14:35:55
| 336,056,643
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
rows, columns = map(int, input().split(", "))
matrix = [[int(i) for i in input().split()] for _ in range(rows)]
for column in range(columns):
column_sum = 0
for row in range(rows):
column_sum += matrix[row][column]
print(column_sum)
|
[
"todorov.ventsi@gmail.com"
] |
todorov.ventsi@gmail.com
|
1cf47e979c62abe7878aec58e70e8bf82cace12f
|
3cfc6d23f37e45b8fd8b3810aa56eee21a493a01
|
/custom/plugins/RBKeyshot/KeyShot_RenderScript.py
|
1b2b4b7b7cbcf42f8fc4921ae87894b943238807
|
[] |
no_license
|
joinmm/Deadline_Development
|
eb72f13e1beffac2dd55b3d0eb69d56b98110a86
|
90b1031ffa27177c2b7b93ac4fa59fca0f79e227
|
refs/heads/master
| 2023-03-17T22:56:53.716116
| 2019-08-30T03:18:33
| 2019-08-30T03:18:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,952
|
py
|
import os
import time
import shutil
HOME_PATH = os.path.join(os.environ["HOMEPATH"], "Desktop", "Temp")
SCENE_FILE_PATH = "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4.bip"
NEW_SCENE_FILE_NAME = os.path.basename(SCENE_FILE_PATH)
NEW_TEMP_SCENE_FILE_NAME = ""
def valid_temp_folder():
if os.path.exists(HOME_PATH):
print("Temp folder has already been created.")
return True
else:
try:
os.makedirs(HOME_PATH)
print("Temp folder created successfully.")
return True
except:
print("Temp folder could not be created.")
return False
def dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
NETWORK_FILE_DIR_LIST = os.listdir(NETWORK_FILE_DIR)
DESTINATION_PATH_LIST = os.listdir(DESTINATION_PATH)
if len(NETWORK_FILE_DIR_LIST) == len(DESTINATION_PATH_LIST)or len(NETWORK_FILE_DIR_LIST) < len(DESTINATION_PATH_LIST):
print("No directory update required.")
return True
else:
print("Directory update required.")
return False
def file_transfer(SCENE_FILE_PATH):
NETWORK_FILE_DIR = os.path.dirname(SCENE_FILE_PATH)
NETWORK_DIR_NAME = os.path.basename(NETWORK_FILE_DIR)
DESTINATION_PATH = os.path.join(os.environ["HOMEPATH"], "Desktop", "Temp", NETWORK_DIR_NAME)
NEW_SCENE_PATH = os.path.join(DESTINATION_PATH, os.path.basename(SCENE_FILE_PATH))
if os.path.exists(DESTINATION_PATH)and dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
print("Render folder has already been transferred , returning immediately .")
return NEW_SCENE_PATH
elif os.path.exists(DESTINATION_PATH) and not dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
shutil.rmtree(DESTINATION_PATH)
print("Render folder has been removed.")
if valid_temp_folder() :
try:
shutil.copytree(NETWORK_FILE_DIR, DESTINATION_PATH)
print("Render folder transferred successfully.")
except:
print("Render folder could not be transferred.")
else:
print("File transfer failed")
return NEW_SCENE_PATH
def main(scene_file_path):
lux.openFile(scene_file_path)
lux.setCamera("Camera 2")
lux.setAnimationFrame( 0 )
lux.pause
lux.setAnimationFrame( 0 )
lux.unpause
lux.setAnimationFrame( 0 )
lux.saveFile( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
lux.openFile( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
path = "A:/Test_Output/made_to_travel_black_rev4_1560962403_%d.tif"
width = 1920
height = 1080
opts = lux.getRenderOptions()
opts.setAddToQueue(False)
opts.setOutputRenderLayers(False)
opts.setOutputAlphaChannel(False)
try:
opts.setOutputDiffusePass(False)
except AttributeError:
print( "Failed to set render pass: output_diffuse_pass" )
try:
opts.setOutputReflectionPass(False)
except AttributeError:
print( "Failed to set render pass: output_reflection_pass" )
try:
opts.setOutputClownPass(False)
except AttributeError:
print( "Failed to set render pass: output_clown_pass" )
try:
opts.setOutputDirectLightingPass(False)
except AttributeError:
print( "Failed to set render pass: output_direct_lighting_pass" )
try:
opts.setOutputRefractionPass(False)
except AttributeError:
print( "Failed to set render pass: output_refraction_pass" )
try:
opts.setOutputDepthPass(False)
except AttributeError:
print( "Failed to set render pass: output_depth_pass" )
try:
opts.setOutputIndirectLightingPass(False)
except AttributeError:
print( "Failed to set render pass: output_indirect_lighting_pass" )
try:
opts.setOutputShadowPass(False)
except AttributeError:
print( "Failed to set render pass: output_indirect_lighting_pass" )
try:
opts.setOutputNormalsPass(False)
except AttributeError:
print( "Failed to set render pass: output_normals_pass" )
try:
opts.setOutputCausticsPass(False)
except AttributeError:
print( "Failed to set render pass: output_caustics_pass" )
try:
opts.setOutputShadowPass(False)
except AttributeError:
print( "Failed to set render pass: output_shadow_pass" )
try:
opts.setOutputAmbientOcclusionPass(False)
except AttributeError:
print( "Failed to set render pass: output_ambient_occlusion_pass" )
try:
opts.setAdvancedRendering( 38 )
except AttributeError:
print( "Failed to set render option: advanced_samples" )
try:
opts.setGlobalIllumination( 1.0 )
except AttributeError:
print( "Failed to set render option: engine_global_illumination" )
try:
opts.setRayBounces( 14 )
except AttributeError:
print( "Failed to set render option: engine_ray_bounces" )
try:
opts.setPixelBlur( 1.5 )
except AttributeError:
print( "Failed to set render option: engine_pixel_blur" )
try:
opts.setAntiAliasing( 3 )
except AttributeError:
print( "Failed to set render option: engine_anti_aliasing" )
try:
opts.setDofQuality( 3 )
except AttributeError:
print( "Failed to set render option: engine_dof_quality" )
try:
opts.setShadowQuality( 4.47200012207 )
except AttributeError:
print( "Failed to set render option: engine_shadow_quality" )
try:
opts.setCausticsQuality( 0.0 )
except AttributeError:
print( "Failed to set render option: engine_caustics_quality" )
try:
opts.setSharpShadows( True )
except AttributeError:
print( "Failed to set render option: engine_sharp_shadows" )
try:
opts.setSharperTextureFiltering( True )
except AttributeError:
print( "Failed to set render option: engine_sharper_texture_filtering" )
try:
opts.setGlobalIlluminationCache( True )
except AttributeError:
print( "Failed to set render option: engine_global_illumination_cache" )
for frame in range( 0, 1 ):
renderPath = path
renderPath = renderPath.replace( "%d", str(frame) )
lux.setAnimationFrame( frame )
lux.renderImage(path = renderPath, width = width, height = height, opts = opts)
print("Rendered Image: "+renderPath)
os.remove( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
print ('Job Completed')
exit()
GET_NEW_FILE_PATH = file_transfer(SCENE_FILE_PATH)
if GET_NEW_FILE_PATH:
main(GET_NEW_FILE_PATH)
else:
main(SCENE_FILE_PATH)
|
[
"hamedhematyar91@gmail.com"
] |
hamedhematyar91@gmail.com
|
1bbcc01ac088646277008e1eb2cd085872555dbc
|
8da91c26d423bacbeee1163ac7e969904c7e4338
|
/pyvisdk/do/map.py
|
6cd8f87633a30e6210e2784a05d6e7d2c56ec9bd
|
[] |
no_license
|
pexip/os-python-infi-pyvisdk
|
5d8f3a3858cdd61fb76485574e74ae525cdc7e25
|
1aadea0afbc306d09f6ecb9af0e683dbbf961d20
|
refs/heads/master
| 2023-08-28T02:40:28.789786
| 2020-07-16T04:00:53
| 2020-07-16T04:00:53
| 10,032,240
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def Map(vim, *args, **kwargs):
'''Topological representation of entity relationships as a set of nodes and edges.'''
obj = vim.client.factory.create('{urn:sms}Map')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'edge', 'lastUpdateTime', 'node', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"jmb@pexip.com"
] |
jmb@pexip.com
|
b703d23d4eb23bc86961a3a4aeb666dabf0dda73
|
6f594cc963795c69d8da3c30ca580c0405ef2d6e
|
/bitwise/476NumberComplement/0.py
|
33f4c15e585b8d532a3126140c9cbb3e777b3817
|
[] |
no_license
|
lo-tp/leetcode
|
25933c5b25f64f881d43748d8b2763f69614a97f
|
4cc4d76c64e9d9aa3f53c5e9574e488c93e10a50
|
refs/heads/master
| 2022-09-07T20:32:58.487759
| 2022-09-05T03:39:50
| 2022-09-07T13:39:50
| 116,555,892
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
def helper(k):
if k is 0:
return 1
else:
return 0
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
binaryForm = []
tem = num
while tem:
binaryForm.append(tem%2)
tem = tem >> 1
binaryForm.reverse()
complement=map(helper, binaryForm)
try:
index=complement.index(1)
complement=complement[index:]
complement.reverse()
ratio=1
sum=0
for i in complement:
sum+=i*ratio
ratio*=2
return sum
except ValueError:
return 0
soluction = Solution()
print soluction.findComplement(5)
print soluction.findComplement(1)
|
[
"regesteraccount@hotmail.com"
] |
regesteraccount@hotmail.com
|
41bc879377fb025f109b4ead056627f4d30424db
|
799d8f9024926bb69a0226110740a56bf30929e3
|
/SoftuniAdvanced/ADVANCED/stacks_and_queues/crossroads.py
|
bacd369a82728fa8c60e20e0b88a0d8917517af0
|
[] |
no_license
|
velinovasen/python-adv-oop
|
a849cdff92793b45c6cca3279f1db853125b6ec8
|
1e3d7c194c2e8e24e4d7b07969db86e9973890cb
|
refs/heads/main
| 2023-01-01T11:16:55.572778
| 2020-10-25T18:06:34
| 2020-10-25T18:06:34
| 307,159,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
from collections import deque
green_light_time = int(input())
free_window = int(input())
total_time = green_light_time + free_window
crossroad = deque([])
car_inside = deque([])
cars_passed = 0
while True:
command = input()
if command == 'END':
break
elif command == 'green':
while green_light_time > 0:
car_inside = crossroad.popleft()
else:
crossroad.append(command)
|
[
"velinovasen@users.noreply.github.com"
] |
velinovasen@users.noreply.github.com
|
1f01924e59a9a35f46bb3ddaa5e7f3a0b028cb8f
|
9d67cd5f8d3e0ffdd4334a6b9b67c93f8deca100
|
/dqn_new/configs/target7.py
|
70d57a14af0c64a3a6b36deb10a442f6035c220c
|
[] |
no_license
|
SiyuanLee/caps
|
0c300a8e5a9a661eca4b2f59cd38125ddc35b6d3
|
476802e18ca1c7c88f1e29ed66a90c350aa50c1f
|
refs/heads/master
| 2021-06-20T22:48:16.230354
| 2021-02-22T13:21:57
| 2021-02-22T13:21:57
| 188,695,489
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,819
|
py
|
"""
This is the example config file
"""
import numpy as np
# More one-char representation will be added in order to support
# other objects.
# The following a=10 is an example although it does not work now
# as I have not included a '10' object yet.
a = 10
# This is the map array that represents the map
# You have to fill the array into a (m x n) matrix with all elements
# not None. A strange shape of the array may cause malfunction.
# Currently available object indices are # they can fill more than one element in the array.
# 0: nothing
# 1: wall
# 2: ladder
# 3: coin
# 4: spike
# 5: triangle -------source
# 6: square ------ source
# 7: coin -------- target
# 8: princess -------source
# 9: player # elements(possibly more than 1) filled will be selected randomly to place the player
# unsupported indices will work as 0: nothing
map_array = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 5, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 9, 9, 9, 9, 1, 9, 9, 9, 8, 1],
[1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1],
[1, 0, 0, 2, 0, 0, 0, 2, 0, 7, 1],
[1, 9, 9, 2, 9, 9, 9, 2, 9, 9, 1],
[1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 6, 0, 1],
[1, 9, 9, 9, 1, 9, 9, 9, 9, 9, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
# set to true -> win when touching the object
# 0, 1, 2, 3, 4, 9 are not possible
end_game = {
7: True,
}
rewards = {
"positive": 0, # when collecting a coin
"win": 1, # endgame (win)
"negative": -25, # endgame (die)
"tick": 0 # living
}
######### dqn only ##########
# ensure correct import
import os
import sys
__file_path = os.path.abspath(__file__)
__dqn_dir = '/'.join(str.split(__file_path, '/')[:-2]) + '/'
sys.path.append(__dqn_dir)
__cur_dir = '/'.join(str.split(__file_path, '/')[:-1]) + '/'
from dqn_utils import PiecewiseSchedule
# load the random sampled obs
# import pickle
# pkl_file = __cur_dir + 'eval_obs_array_random.pkl'
# with open(pkl_file, 'rb') as f:
# eval_obs_array = pickle.loads(f.read())
def seed_func():
return np.random.randint(0, 1000)
num_timesteps = 2.5e7
learning_freq = 4
# training iterations to go
num_iter = num_timesteps / learning_freq
# piecewise learning rate
lr_multiplier = 1.0
learning_rate = PiecewiseSchedule([
(0, 2e-4 * lr_multiplier),
(num_iter / 2, 1e-4 * lr_multiplier),
(num_iter * 3 / 4, 5e-5 * lr_multiplier),
], outside_value=5e-5 * lr_multiplier)
# piecewise learning rate
exploration = PiecewiseSchedule([
(0, 1.0),
(num_iter / 2, 0.7),
(num_iter * 3 / 4, 0.1),
(num_iter * 7 / 8, 0.05),
], outside_value=0.05)
dqn_config = {
'seed': seed_func, # will override game settings
'num_timesteps': num_timesteps,
'replay_buffer_size': 1000000,
'batch_size': 32,
'gamma': 0.99,
'learning_starts': 8e5,
'learning_freq': learning_freq,
'frame_history_len': 4,
'target_update_freq': 10000,
'grad_norm_clipping': 10,
'learning_rate': learning_rate,
'exploration': exploration,
# 'eval_obs_array': eval_obs_array,
'room_q_interval': 1e4, # q_vals will be evaluated every room_q_interval steps
'epoch_size': 5e4, # you decide any way
'config_name': str.split(__file_path, '/')[-1].replace('.py', '') # the config file name
}
map_config = {
'map_array': map_array,
'rewards': rewards,
'end_game': end_game,
'init_score': 0,
'init_lives': 1, # please don't change, not going to work
# configs for dqn
'dqn_config': dqn_config,
# work automatically only for aigym wrapped version
'fps': 1000,
'frame_skip': 1,
'force_fps': True, # set to true to make the game run as fast as possible
'display_screen': False,
'episode_length': 1200,
'episode_end_sleep': 0., # sec
}
|
[
"lisiyuan@bupt.edu.cn"
] |
lisiyuan@bupt.edu.cn
|
88e75c46abb9494b3a6c173c9d4edbb771ad30b3
|
83951f7fd0bbaba9675bdf9ba6980504213bc1c6
|
/skim/crab/skim_QCD_Pt-15to7000_Flat2017_cfg.py
|
f4567da99bb4f470b3019a97ec8411522789b737
|
[] |
no_license
|
DryRun/DijetSkimmer
|
6db71583b969ecc64841da26107f43c4c734ca43
|
ead65f8e2a5d11f99f3e1a60a1d2f9a163e68491
|
refs/heads/main
| 2021-07-22T19:41:09.096943
| 2021-07-14T13:01:00
| 2021-07-14T13:01:00
| 171,485,404
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,340
|
py
|
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
job_name = "DijetSkim_QCD_Pt-15to7000_Flat2017_1_0_1"
config.section_("General")
config.General.requestName = job_name
config.General.transferLogs = False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
# Setup the custom executable
config.JobType.psetName = os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/PSet.py') # CRAB modifies this file to contain the input files and lumis
config.JobType.scriptExe = os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/crab_shell.sh') # CRAB then calls scriptExe jobId <scriptArgs>
config.JobType.scriptArgs = ["--source=mc", "--year=2017"]
config.JobType.inputFiles = [
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/crab_meat.py'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/NanoAODTools/scripts/haddnano.py'), #hadd nano will not be needed once nano tools are in cmssw
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches_data.txt'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches_mc.txt'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches.txt'),
#os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/FrameworkJobReport.xml'),
]
config.JobType.outputFiles = ["nanoskim.root", "hists.root"]
config.JobType.sendPythonFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/JetHT/Run2018C-Nano14Dec2018-v1/NANOAOD'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 4
#config.Data.totalUnits = 10
config.JobType.allowUndistributedCMSSW = True
config.Data.outLFNDirBase = '/store/user/{}/{}'.format(getUsernameFromSiteDB(), job_name)
config.Data.publication = False
config.Data.outputDatasetTag = job_name
#config.Data.ignoreLocality = True
config.section_("Site")
config.Site.storageSite = "T3_US_Brown"
config.Data.inputDataset = '/QCD_Pt-15to7000_TuneCP5_Flat2017_13TeV_pythia8/RunIIFall17NanoAODv4-PU2017_12Apr2018_Nano14Dec2018_102X_mc2017_realistic_v6-v1/NANOAODSIM'
|
[
"david.renhwa.yu@gmail.com"
] |
david.renhwa.yu@gmail.com
|
3f008a682cd719d81b222f36983c87310b67f103
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/402.py
|
631b928370b0e9eabec5dcf010eca20cf6babf83
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204
| 2022-05-09T14:05:50
| 2022-05-09T14:05:50
| 209,430,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
dp=["" for _ in range(k+1) ]
for i in range(len(num)):
dp[i][0]=num[:i+1]
for j in range(1,k+1):
dp[0][j]=""
for i in range(1,len(num)):
for j in range(1,k+1)[::-1]:
dp[i][j]=min(dp[i-1][j-1],dp[i-1][j]+num[i])
# print(dp)
res=dp[len(num) - 1][k].lstrip('0')
if res=="":
return '0'
else:
return res
a=Solution()
num = "1432219"
k = 3
print(a.removeKdigits(num,k))
num = "10200"
k=1
print(a.removeKdigits(num,k))
test='00002000'
print(test.lstrip('0'))
|
[
"1533441387@qq.com"
] |
1533441387@qq.com
|
11a3b54a12af9a6d287edfead2ec004be81b18c7
|
5be992e6ac6bae2ebf938005d1cae93777825087
|
/space/research/genelab.py
|
34513f8b9468f68b837529823a4942d5eab865ce
|
[] |
no_license
|
a1aiintel/SpaceIsCool
|
0c88acaa966c85e31d73da8319966c218447158f
|
939641dbe626a2cbb9fcec845c18bfb3371118ad
|
refs/heads/master
| 2020-07-30T04:54:14.577501
| 2019-01-10T17:57:52
| 2019-01-10T17:57:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
import requests
from space import NASA_KEY
def search_genelab(query, type):
"""
GeneLab provides a RESTful Application Programming Interface (API) to its full-text search_exoplanet capability,
which provides the same functionality available through the GeneLab public data repository website.
The API provides a choice of standardized web output formats, such as JavaScript Object Notation (JSON)
or Hyper Text Markup Language (HTML), of the search_exoplanet results. The GeneLab Search API can also
federate with other heterogeneous external bioinformatics databases, such as the
National Institutes of Health (NIH) / National Center for Biotechnology Information's (NCBI)
Gene Expression Omnibus (GEO); the European Bioinformatics Institute's (EBI)
Proteomics Identification (PRIDE); the Argonne National Laboratory's (ANL)
Metagenomics Rapid Annotations using Subsystems Technology (MG-RAST).
:param query:
:return:
"""
url = "https://genelab-data.ndc.nasa.gov/genelab/data/search_exoplanet?term=mouse%20liver&type=cgene"
|
[
"jarbasai@mailfence.com"
] |
jarbasai@mailfence.com
|
25328fb0492fe750697b3767b53d440d4e3da0b8
|
e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163
|
/ScientificComputing/ch14/filter_firdesign_sinc1.py
|
cfb39fc541dac9e8bb9246523bf73a615acecbeb
|
[] |
no_license
|
socrates77-sh/learn
|
a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b
|
ae50978023f6b098b168b8cca82fba263af444aa
|
refs/heads/master
| 2022-12-16T16:53:50.231577
| 2019-07-13T13:52:42
| 2019-07-13T13:52:42
| 168,442,963
| 0
| 0
| null | 2022-12-08T05:18:37
| 2019-01-31T01:30:06
|
HTML
|
UTF-8
|
Python
| false
| false
| 363
|
py
|
# -*- coding: utf-8 -*-
import scipy.signal as signal
import numpy as np
import pylab as pl
def h_ideal(n, fc):
return 2*fc*np.sinc(2*fc*np.arange(0, n, 1.0))
b = h_ideal(30, 0.25)
w, h = signal.freqz(b, 1)
pl.figure(figsize=(8, 4))
pl.plot(w/2/np.pi, 20*np.log10(np.abs(h)))
pl.xlabel(u"正规化频率 周期/取样")
pl.ylabel(u"幅值(dB)")
pl.show()
|
[
"zhwenrong@sina.com"
] |
zhwenrong@sina.com
|
172d528877e46d3a15c44ea0bd68dd96091dec79
|
77676610410e479a3214669b082b5f410b499e24
|
/apps/main/migrations/0010_auto_20170424_0645.py
|
cfeb0350a6e5aedc05e7e5c8f745933e2474e75b
|
[
"Apache-2.0"
] |
permissive
|
StepicOrg/stepik-extensions
|
e76b2ee033275b33bf9d8c8deeac495d3a6bde46
|
5825bc9b2444ad4690681964d1bed172706f8796
|
refs/heads/develop
| 2023-04-05T12:43:28.114500
| 2021-04-19T12:57:30
| 2021-04-19T12:57:30
| 82,687,804
| 5
| 2
|
Apache-2.0
| 2021-04-19T12:58:47
| 2017-02-21T14:17:00
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 653
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-24 06:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20170422_2002'),
]
operations = [
migrations.RemoveField(
model_name='extension',
name='categories',
),
migrations.RemoveField(
model_name='extension',
name='user_groups',
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='Extension',
),
]
|
[
"meanmail@mail.ru"
] |
meanmail@mail.ru
|
b01cb42df40d9efc85d03a815e799ee14b6e8fd8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03339/s273941488.py
|
c82cd4ca992be5faaa424d10d255497c4a9fd014
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
n = int(input())
s = [(i == "W")*1 for i in list(input())]
c = [0]*(n+1)
for i in range(n):
c[i+1] = c[i] + s[i]
ans = float("inf")
for i in range(n):
t = c[i] + (n-i-1-c[-1]+c[i+1])
ans = min(ans,t)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
297b49422f62295813f98787154517148273d665
|
a59deecc5d91214601c38bd170605d9d080e06d2
|
/14-dictionaries/08-copy()/app.py
|
2a626c1bb68207e6df9b951c1b8fd7d46c37c8b5
|
[] |
no_license
|
reyeskevin9767/modern-python-bootcamp-2018
|
a6a3abdb911716d19f6ab516835ed1a04919a13d
|
d0234f10c4b8aaa6a20555348aec7e3571e3d4e7
|
refs/heads/master
| 2022-12-03T18:48:50.035054
| 2020-08-09T03:00:55
| 2020-08-09T03:00:55
| 286,109,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
# * Copy Method
d = dict(a=1, b=2, c=3)
c = d.copy()
print(c) # {'a': 1, 'b': 2, 'c': 3}
print(c is d) # False
e = dict(a=6, b=7, c=8)
f = e.copy()
print(e) # {'a': 1, 'b': 2, 'c': 3}
print(e is f) # False
|
[
"reyeskevin9767@gmail.com"
] |
reyeskevin9767@gmail.com
|
0826bb49bda6584cc57d9ea1205a457341b5e9ac
|
4e3c976773526fd610d64ffb83589bccfaee5e68
|
/sponge-integration-tests/examples/core/filters_event_pattern.py
|
32eae8faeab3bf1d4d3fa3664b9a44fc5a0f1edc
|
[
"Apache-2.0"
] |
permissive
|
softelnet/sponge
|
2313d2328953fcff49a002e727bb803757870627
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
refs/heads/master
| 2022-10-28T16:19:55.619882
| 2021-09-16T19:50:08
| 2021-09-16T19:50:08
| 95,256,030
| 10
| 2
|
Apache-2.0
| 2022-10-04T23:55:09
| 2017-06-23T20:58:49
|
Java
|
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
"""
Sponge Knowledge Base
Filters - Event pattern
"""
from java.util.concurrent.atomic import AtomicInteger
def onInit():
# Variables for assertions only
sponge.setVariable("nameCount", AtomicInteger(0))
sponge.setVariable("patternCount", AtomicInteger(0))
sponge.setVariable("acceptedCount", AtomicInteger(0))
sponge.setVariable("notAcceptedCount", AtomicInteger(0))
class NameFilter(Filter):
def onConfigure(self):
self.withEvent("a1")
def onAccept(self, event):
sponge.getVariable("nameCount").incrementAndGet()
return True
class PatternFilter(Filter):
def onConfigure(self):
self.withEvent("a.+")
def onAccept(self, event):
sponge.getVariable("patternCount").incrementAndGet()
return False
class AcceptedTrigger(Trigger):
def onConfigure(self):
self.withEvent(".+")
def onRun(self, event):
self.logger.info("accepted {}", event.name)
if event.name != EventName.STARTUP:
sponge.getVariable("acceptedCount").incrementAndGet()
class NotAcceptedTrigger(Trigger):
def onConfigure(self):
self.withEvent("a.+")
def onRun(self, event):
sponge.getVariable("notAcceptedCount").incrementAndGet()
def onStartup():
for name in ["a1", "b1", "a2", "b2", "a", "b", "a1", "b2"]:
sponge.event(name).send()
|
[
"marcin.pas@softelnet.com"
] |
marcin.pas@softelnet.com
|
bd7d1491e809be7611d09d0d0e8578f497fb3520
|
e811da3715d43e23a4548490aa27be40ac21d6e4
|
/handlers/base/__init__.py
|
8f1904288c671963f969ea59e55106edced6d3da
|
[] |
no_license
|
atiger808/tornado
|
2a2ff73957d6fb97cd91222038f499ee8ed325f5
|
77e981ee70a7c7b3903bec82d91109f163bb2a43
|
refs/heads/master
| 2020-04-04T09:22:07.007710
| 2018-11-02T05:04:00
| 2018-11-02T05:04:00
| 155,815,465
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
# _*_ coding: utf-8 _*_
# @Time : 2018/6/26 22:52
# @Author : Ole211
# @Site :
# @File : __init__.py.py
# @Software : PyCharm
|
[
"atiger0614@163.com"
] |
atiger0614@163.com
|
a9b098aaf599f218d0e3b35cae1d246bcbeb2c50
|
a66b69c3f9da9779ae80f347b61f47e3bc5ba145
|
/day1002/A04_loop.py
|
311112630c8c83899668600713293b1a7f31e1f9
|
[] |
no_license
|
kyungtae92/python-basic
|
c841d9c9c6196b01da3de007c1298fe2c4b8f693
|
80a2051e37b6e87c9dbfd332c4b2946089ff0d5c
|
refs/heads/master
| 2020-11-25T08:01:22.156661
| 2019-12-17T08:25:38
| 2019-12-17T08:25:38
| 228,567,120
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
import os # 파이썬이 운영체제의 일부 기능 가져옴(명령어)
while (True):
dan = input('input gugudan >> ')
if dan.isalpha() == True or dan == '':
os.system('cls')
else:
break
dan = int(dan)
i = 0
for i in range(1, 10): # for i in range(1, 10, 1):
print("%d * %d = %2d" % (dan, i, dan * i))
|
[
"noreply@github.com"
] |
kyungtae92.noreply@github.com
|
d5d4dc11f80514143b96cfebbcab39e53506dd9b
|
7f9811857538858ea5c6baaefdccf424c2dea3c2
|
/INTRODUCTION_TO_DS/chapter5_search/linear_search.py
|
b3c44483d7fd39c6fc66b263858905c46d9c2969
|
[] |
no_license
|
owari-taro/python_algorithm
|
ec4d0c737eefdb4f5ddc140c4dfe81fcfb2ee5af
|
5af19f7dabe6224f0d06b7c89f38c528a08cf903
|
refs/heads/master
| 2021-11-23T07:23:08.958737
| 2021-08-31T00:56:07
| 2021-08-31T00:56:07
| 231,067,479
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from typing import List
def binary_search(a: List, x, lo=0, hi=None):
if lo < 0:
raise ValueError()
if hi is None:
hi = len(a)
while lo < hi:
mid = (hi+lo)//2
if x < a[mid]:
hi = mid
|
[
"taro.biwajima@gmail.com"
] |
taro.biwajima@gmail.com
|
31f505bcd3e2862f943b2fb2fb39a976fcf80f18
|
7ba05e73515c14fb8d2f3d056b51102131171a11
|
/First_steps_March_Excercise/Akvarium.py
|
c65b850ffd42b8483b25d7fd5129ca00ac7b1aab
|
[] |
no_license
|
gyurel/SoftUni-Basics-and-Fundamentals
|
bd6d5fa8c9d0cc51f241393afd418633a66c65dc
|
184fc5dfab2fdd410aa8593f4c562fd56211c727
|
refs/heads/main
| 2023-07-05T11:16:58.966841
| 2021-08-31T19:25:40
| 2021-08-31T19:25:40
| 401,485,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
# От конзолата се четат 4 реда:
# 1. Дължина в см – цяло число
# 2. Широчина в см – цяло число
# 3. Височина в см – цяло число
# 4. Процент зает обем – реално число
length = int(input())
width = int(input())
height = int(input())
occuqied_percentage = float(input()) / 100
volume_in_litters = length * width * height/1000
# Да се напише програма, която изчислява литрите вода, които са необходими за напълването на аквариума.
needed_water = volume_in_litters - (volume_in_litters * occuqied_percentage)
print(needed_water)
|
[
"gyurel@yahoo.com"
] |
gyurel@yahoo.com
|
2f11b0f81351e4f628d1266ab215c514e432d2f2
|
7b0413547fb0e4766febcc6a7f0010fafe025fb6
|
/medium/course_schedule.py
|
52ca3f20847247a445eb480dcaa842522eed1cac
|
[] |
no_license
|
theeric80/LeetCode
|
b00d4bace7c48c409bc6b2f57321aea7b7106f35
|
e05321d8c2143d35279136d3999e1be1e7005690
|
refs/heads/master
| 2021-01-19T00:51:20.608326
| 2016-06-30T05:32:44
| 2016-06-30T05:32:44
| 42,165,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
class UndirectedGraphNode(object):
def __init__(self, x):
self.label = x
self.neighbors = []
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
self.cycle = False
G = self.buildGraph(numCourses, prerequisites)
result, marked, on_stack = [], [False]*len(G), [False]*len(G)
for v in G:
if not marked[v.label]:
self.topological_sort(G, v, marked, on_stack, result)
result.reverse()
return not self.cycle
def buildGraph(self, numCourses, prerequisites):
G = [UndirectedGraphNode(i) for i in xrange(numCourses)]
for u, v in prerequisites:
G[u].neighbors.append(G[v])
return G
def topological_sort(self, G, v, marked, on_stack, result):
label = v.label
marked[label] = True
on_stack[label] = True
for w in v.neighbors:
if self.cycle:
return
if not marked[w.label]:
self.topological_sort(G, w, marked, on_stack, result)
elif on_stack[w.label]:
self.cycle = True
on_stack[label] = False
result.append(label)
def dfs(self, G, v):
result, marked = [], [False]*len(G)
s = [v]
while s:
node = s.pop()
label = node.label
if not marked[label]:
marked[label] = True
result.append(label)
for neighbor in node.neighbors:
s.append(neighbor)
print '->'.join(str(i) for i in result)
def main():
import sys
from os.path import join, abspath
sys.path.append(join('..', 'common'))
inputs = [(2, [[1,0]])]
for numCourses, prerequisites in inputs:
result = Solution().canFinish(numCourses, prerequisites)
print result
if __name__ == '__main__':
main()
|
[
"chunchieh@gmail.com"
] |
chunchieh@gmail.com
|
07ae3fd425deb6e5c593ee9d9ae487d5398b8f25
|
e3765def4a180f1d51eaef3884448b0bb9be2cd3
|
/example/12.3.1_create_pygame_window/alien_invasion.py
|
136e506214bafb12d29f556453abfc4bb31417aa
|
[] |
no_license
|
spearfish/python-crash-course
|
cbeb254efdf0c1ab37d8a7d2fa0409194f19fa2b
|
66bc42d41395cc365e066a597380a96d3282d30b
|
refs/heads/master
| 2023-07-14T11:04:49.276764
| 2021-08-20T10:02:27
| 2021-08-20T10:02:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
#!/usr/bin/env python3
# modules
import sys
import pygame
def run_game() :
pygame.init()
# pygame.display is a object that handles display.
screen = pygame.display.set_mode((1200,800))
pygame.display.set_caption('Alien Invasion')
while True :
for event in pygame.event.get() :
if event.type == pygame.QUIT :
sys.exit()
pygame.display.flip()
run_game()
|
[
"jingchen@tutanota.com"
] |
jingchen@tutanota.com
|
b8b49ba5bc255e5615ec2889ec70661333b1a2c2
|
4252102a1946b2ba06d3fa914891ec7f73570287
|
/pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_localdot.py
|
6b47b5b33566ea24783e9ae4019290a4fabb845d
|
[] |
no_license
|
lpigou/chalearn2014
|
21d487f314c4836dd1631943e20f7ab908226771
|
73b99cdbdb609fecff3cf85e500c1f1bfd589930
|
refs/heads/master
| 2020-05-17T00:08:11.764642
| 2014-09-24T14:42:00
| 2014-09-24T14:42:00
| 24,418,815
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,927
|
py
|
import nose
import unittest
import numpy as np
import theano
from localdot import LocalDot
from ..test_matrixmul import SymbolicSelfTestMixin
class TestLocalDot32x32(unittest.TestCase, SymbolicSelfTestMixin):
channels = 3
bsize = 10 # batch size
imshp = (32, 32)
ksize = 5
nkern_per_group = 16
subsample_stride = 1
ngroups = 1
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
def setUp(self):
np.random.seed(234)
assert self.imshp[0] == self.imshp[1]
fModulesR = (self.imshp[0] - self.ksize + 1) // self.subsample_stride
#fModulesR += 1 # XXX GpuImgActs crashes w/o this??
fModulesC = fModulesR
self.fshape = (fModulesR, fModulesC, self.channels // self.ngroups,
self.ksize, self.ksize, self.ngroups, self.nkern_per_group)
self.ishape = (self.ngroups, self.channels // self.ngroups,
self.imshp[0], self.imshp[1], self.bsize)
self.hshape = (self.ngroups, self.nkern_per_group, fModulesR, fModulesC,
self.bsize)
filters = theano.shared(self.rand(self.fshape))
self.A = LocalDot(filters, self.imshp[0], self.imshp[1],
subsample=(self.subsample_stride, self.subsample_stride))
self.xlval = self.rand((self.hshape[-1],) + self.hshape[:-1])
self.xrval = self.rand(self.ishape)
self.xl = theano.shared(self.xlval)
self.xr = theano.shared(self.xrval)
# N.B. the tests themselves come from SymbolicSelfTestMixin
class TestLocalDotLargeGray(TestLocalDot32x32):
channels = 1
bsize = 128
imshp = (256, 256)
ksize = 9
nkern_per_group = 16
subsample_stride = 2
ngroups = 1
n_patches = 3000
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
# not really a test, but important code to support
# Currently exposes error, by e.g.:
# CUDA_LAUNCH_BLOCKING=1
# THEANO_FLAGS=device=gpu,mode=DEBUG_MODE
# nosetests -sd test_localdot.py:TestLocalDotLargeGray.run_autoencoder
def run_autoencoder(
self,
n_train_iter=10000, # -- make this small to be a good unit test
rf_shape=(9, 9),
n_filters=1024,
dtype='float32',
module_stride=2,
lr=0.01,
show_filters=True,
):
if show_filters:
# import here to fail right away
import matplotlib.pyplot as plt
try:
import skdata.vanhateren.dataset
except ImportError:
raise nose.SkipTest()
# 1. Get a set of image patches from the van Hateren data set
print 'Loading van Hateren images'
n_images = 50
vh = skdata.vanhateren.dataset.Calibrated(n_images)
patches = vh.raw_patches((self.n_patches,) + self.imshp,
items=vh.meta[:n_images],
rng=np.random.RandomState(123),
)
patches = patches.astype('float32')
patches /= patches.reshape(self.n_patches, self.imshp[0] * self.imshp[1])\
.max(axis=1)[:, None, None]
# TODO: better local contrast normalization
if 0 and show_filters:
plt.subplot(2, 2, 1); plt.imshow(patches[0], cmap='gray')
plt.subplot(2, 2, 2); plt.imshow(patches[1], cmap='gray')
plt.subplot(2, 2, 3); plt.imshow(patches[2], cmap='gray')
plt.subplot(2, 2, 4); plt.imshow(patches[3], cmap='gray')
plt.show()
# -- Convert patches to localdot format:
# groups x colors x rows x cols x images
patches5 = patches[:, :, :, None, None].transpose(3, 4, 1, 2, 0)
print 'Patches shape', patches.shape, self.n_patches, patches5.shape
# 2. Set up an autoencoder
print 'Setting up autoencoder'
hid = theano.tensor.tanh(self.A.rmul(self.xl))
out = self.A.rmul_T(hid)
cost = ((out - self.xl) ** 2).sum()
params = self.A.params()
gparams = theano.tensor.grad(cost, params)
train_updates = [(p, p - lr / self.bsize * gp)
for (p, gp) in zip(params, gparams)]
if 1:
train_fn = theano.function([], [cost], updates=train_updates)
else:
train_fn = theano.function([], [], updates=train_updates)
theano.printing.debugprint(train_fn)
# 3. Train it
params[0].set_value(0.001 * params[0].get_value())
for ii in xrange(0, self.n_patches, self.bsize):
self.xl.set_value(patches5[:, :, :, :, ii:ii + self.bsize], borrow=True)
cost_ii, = train_fn()
print 'Cost', ii, cost_ii
if 0 and show_filters:
self.A.imshow_gray()
plt.show()
assert cost_ii < 0 # TODO: determine a threshold for detecting regression bugs
|
[
"lionelpigou@gmail.com"
] |
lionelpigou@gmail.com
|
d063d7cbffb4226f8efbf9db037d712b216b8bb7
|
a8547f73463eef517b98d1085430732f442c856e
|
/pysam-0.13-py3.6-macosx-10.13-x86_64.egg/pysam/libcbgzf.py
|
366d86d29872fb9a2271270af8be79da14542344
|
[] |
no_license
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
63aece1b692225ee2fbb865200279d7ef88a1eca
|
5668b5785296b314ea1321057420bcd077dba9ea
|
refs/heads/master
| 2021-01-23T19:13:04.707152
| 2017-12-25T17:41:30
| 2017-12-25T17:41:30
| 102,808,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'libcbgzf.cpython-36m-darwin.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"Raliclo@gmail.com"
] |
Raliclo@gmail.com
|
6224998f24dbbf286ac343c71d3f2cf7401f4b20
|
abf9238ac124738796a61e4ae3e667cae950d55a
|
/Custom Troop Trees/Source Files/cstm_party_templates.py
|
e85eb75bb7d7beadb6787f95fd1ff63989067576
|
[] |
no_license
|
ChroniclesStudio/custom-troop-trees
|
d92d4c3723ca117fd087332451ea1a0414998162
|
d39333cf8c4ea9fddb3d58c49850a4dffedbb917
|
refs/heads/master
| 2023-02-18T07:27:56.439995
| 2021-01-19T14:46:50
| 2021-01-19T14:46:50
| 331,012,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,713
|
py
|
from header_common import *
from header_parties import *
from header_troops import *
from ID_troops import *
from ID_factions import *
from ID_map_icons import *
from module_constants import *
from module_troops import troops
import math
pmf_is_prisoner = 0x0001
####################################################################################################################
# Each party template record contains the following fields:
# 1) Party-template id: used for referencing party-templates in other files.
# The prefix pt_ is automatically added before each party-template id.
# 2) Party-template name.
# 3) Party flags. See header_parties.py for a list of available flags
# 4) Menu. ID of the menu to use when this party is met. The value 0 uses the default party encounter system.
# 5) Faction
# 6) Personality. See header_parties.py for an explanation of personality flags.
# 7) List of stacks. Each stack record is a tuple that contains the following fields:
# 7.1) Troop-id.
# 7.2) Minimum number of troops in the stack.
# 7.3) Maximum number of troops in the stack.
# 7.4) Member flags(optional). Use pmf_is_prisoner to note that this member is a prisoner.
# Note: There can be at most 6 stacks.
####################################################################################################################
party_templates = [
#("kingdom_1_reinforcements_a", "{!}kingdom_1_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_swadian_recruit,5,10),(trp_swadian_militia,2,4)]),
#("kingdom_1_reinforcements_b", "{!}kingdom_1_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_swadian_footman,3,6),(trp_swadian_skirmisher,2,4)]),
#("kingdom_1_reinforcements_c", "{!}kingdom_1_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_swadian_man_at_arms,2,4),(trp_swadian_crossbowman,1,2)]), #Swadians are a bit less-powered thats why they have a bit more troops in their modernised party template (3-6, others 3-5)
#("kingdom_2_reinforcements_a", "{!}kingdom_2_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_vaegir_recruit,5,10),(trp_vaegir_footman,2,4)]),
#("kingdom_2_reinforcements_b", "{!}kingdom_2_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_vaegir_veteran,2,4),(trp_vaegir_skirmisher,2,4),(trp_vaegir_footman,1,2)]),
#("kingdom_2_reinforcements_c", "{!}kingdom_2_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_vaegir_horseman,2,3),(trp_vaegir_infantry,1,2)]),
#("kingdom_3_reinforcements_a", "{!}kingdom_3_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_khergit_tribesman,3,5),(trp_khergit_skirmisher,4,9)]), #Khergits are a bit less-powered thats why they have a bit more 2nd upgraded(trp_khergit_skirmisher) than non-upgraded one(trp_khergit_tribesman).
#("kingdom_3_reinforcements_b", "{!}kingdom_3_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_khergit_horseman,2,4),(trp_khergit_horse_archer,2,4),(trp_khergit_skirmisher,1,2)]),
#("kingdom_3_reinforcements_c", "{!}kingdom_3_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_khergit_horseman,2,4),(trp_khergit_veteran_horse_archer,2,3)]), #Khergits are a bit less-powered thats why they have a bit more troops in their modernised party template (4-7, others 3-5)
#("kingdom_4_reinforcements_a", "{!}kingdom_4_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_nord_footman,5,10),(trp_nord_recruit,2,4)]),
#("kingdom_4_reinforcements_b", "{!}kingdom_4_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_nord_huntsman,2,5),(trp_nord_archer,2,3),(trp_nord_footman,1,2)]),
#("kingdom_4_reinforcements_c", "{!}kingdom_4_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_nord_warrior,3,5)]),
#("kingdom_5_reinforcements_a", "{!}kingdom_5_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_rhodok_tribesman,5,10),(trp_rhodok_spearman,2,4)]),
#("kingdom_5_reinforcements_b", "{!}kingdom_5_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_rhodok_crossbowman,3,6),(trp_rhodok_trained_crossbowman,2,4)]),
#("kingdom_5_reinforcements_c", "{!}kingdom_5_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_rhodok_veteran_spearman,2,3),(trp_rhodok_veteran_crossbowman,1,2)]),
#("kingdom_6_reinforcements_a", "{!}kingdom_6_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_sarranid_recruit,5,10),(trp_sarranid_footman,2,4)]),
#("kingdom_6_reinforcements_b", "{!}kingdom_6_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_sarranid_skirmisher,2,4),(trp_sarranid_veteran_footman,2,3),(trp_sarranid_footman,1,3)]),
#("kingdom_6_reinforcements_c", "{!}kingdom_6_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_sarranid_horseman,3,5)]),
]
def troop_indexes_of_tier(skin, tier):
return [find_troop(troops, troop[0]) for troop in tree.get_custom_troops_of_tier(skin, tier)]
def tier_stacks(skin, tier, min, max):
troops = troop_indexes_of_tier(skin, tier)
return [(troop, int(math.ceil(min * 1.0 / len(troops))), int(math.ceil(max * 1.0 / len(troops)))) for troop in troops]
for tree in CUSTOM_TROOP_TREES:
for skin in CSTM_SKINS:
id = "cstm_kingdom_player_%s_%d_reinforcements" % (tree.id, skin.id)
party_templates.extend([
(id + "_a", "{!}" + id + "_a", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 1, min = 5, max = 10) + tier_stacks(skin, tier = 2, min = 2, max = 4)),
(id + "_b", "{!}" + id + "_b", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 3, min = 5, max = 10)),
(id + "_c", "{!}" + id + "_c", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 4, min = 3, max = 5)),
])
#for party_template in party_templates:
# print ", ".join([party_template[0], party_template[1], ", ".join(["%d-%d %s" % (stack[1], stack[2], troops[stack[0]][2]) for stack in party_template[6]])])
|
[
"knowscount@gmail.com"
] |
knowscount@gmail.com
|
c1fda1a470ad681c3a1a16d4e839b87151b19b33
|
6f6d215a4f0a1c30eeb5a08c8a36016fc351998a
|
/zcls/model/recognizers/resnet/torchvision_resnet.py
|
040bc44da6892b30585f415d6130a4b2fe65cecc
|
[
"Apache-2.0"
] |
permissive
|
Quebradawill/ZCls
|
ef9db2b54fbee17802f3342752e3d4fe4ef9d2c5
|
ade3dc7fd23584b7ba597f24ec19c02ae847673e
|
refs/heads/master
| 2023-04-15T23:25:18.195089
| 2021-04-29T07:05:46
| 2021-04-29T07:05:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,125
|
py
|
# -*- coding: utf-8 -*-
"""
@date: 2021/2/20 上午10:28
@file: torchvision_resnet.py
@author: zj
@description:
"""
from abc import ABC
import torch.nn as nn
from torch.nn.modules.module import T
from torchvision.models.resnet import resnet18, resnet50, resnext50_32x4d
from zcls.config.key_word import KEY_OUTPUT
from zcls.model import registry
from zcls.model.norm_helper import freezing_bn
class TorchvisionResNet(nn.Module, ABC):
def __init__(self,
arch="resnet18",
num_classes=1000,
torchvision_pretrained=False,
pretrained_num_classes=1000,
fix_bn=False,
partial_bn=False,
zero_init_residual=False):
super(TorchvisionResNet, self).__init__()
self.num_classes = num_classes
self.fix_bn = fix_bn
self.partial_bn = partial_bn
if arch == 'resnet18':
self.model = resnet18(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes,
zero_init_residual=zero_init_residual)
elif arch == 'resnet50':
self.model = resnet50(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes,
zero_init_residual=zero_init_residual)
elif arch == 'resnext50_32x4d':
self.model = resnext50_32x4d(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes,
zero_init_residual=zero_init_residual)
else:
raise ValueError('no such value')
self.init_weights(num_classes, pretrained_num_classes)
def init_weights(self, num_classes, pretrained_num_classes):
if num_classes != pretrained_num_classes:
fc = self.model.fc
fc_features = fc.in_features
self.model.fc = nn.Linear(fc_features, num_classes)
nn.init.normal_(self.model.fc.weight, 0, 0.01)
nn.init.zeros_(self.model.fc.bias)
def train(self, mode: bool = True) -> T:
super(TorchvisionResNet, self).train(mode=mode)
if mode and (self.partial_bn or self.fix_bn):
freezing_bn(self, partial_bn=self.partial_bn)
return self
def forward(self, x):
x = self.model(x)
return {KEY_OUTPUT: x}
@registry.RECOGNIZER.register('TorchvisionResNet')
def build_torchvision_resnet(cfg):
torchvision_pretrained = cfg.MODEL.RECOGNIZER.TORCHVISION_PRETRAINED
pretrained_num_classes = cfg.MODEL.RECOGNIZER.PRETRAINED_NUM_CLASSES
fix_bn = cfg.MODEL.NORM.FIX_BN
partial_bn = cfg.MODEL.NORM.PARTIAL_BN
# for backbone
arch = cfg.MODEL.BACKBONE.ARCH
zero_init_residual = cfg.MODEL.RECOGNIZER.ZERO_INIT_RESIDUAL
num_classes = cfg.MODEL.HEAD.NUM_CLASSES
return TorchvisionResNet(
arch=arch,
num_classes=num_classes,
torchvision_pretrained=torchvision_pretrained,
pretrained_num_classes=pretrained_num_classes,
fix_bn=fix_bn,
partial_bn=partial_bn,
zero_init_residual=zero_init_residual
)
|
[
"wy163zhuj@163.com"
] |
wy163zhuj@163.com
|
8afe9cc9f4f53d06be5e718686be5cb4cf5c0cdb
|
c67268ac491ecfe606308a43185f1bf8073d56a1
|
/unittesting/test_employee2.py
|
84682a7e52ffd035b6a9a992a079c59112128dc6
|
[] |
no_license
|
jisshub/python-django-training
|
3c0fad4c80c78bcfb4b61b025da60d220b502e4b
|
d8c61f53e3bb500b1a58a706f20108babd6a1a54
|
refs/heads/master
| 2020-06-21T15:07:25.704209
| 2019-09-01T19:24:02
| 2019-09-01T19:24:02
| 197,487,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
import unittest
# here v import Employee class employee module(employee.py)
from employee import Employee
class EmployeeTest(unittest.TestCase):
def setUp(self):
print('setup\n')
# here v create two employee obj instead of creating them for each test.
# ENSURING DRY PRINCIPLE
self.emp1 = Employee('jiss', 'jose', 3000)
self.emp2 = Employee('isco', 'alarcon', 5000)
def tearDown(self):
print('teardown\n')
def test_email(self):
print('test_email\n')
var1 = self.emp1.email
var2 = self.emp2.email
self.assertEqual(var1, 'jissjose@gmail.com')
self.assertEqual(var2, 'iscoalarcon@gmail.com')
self.emp1.first = 'john'
self.emp2.last = 'james'
self.assertEqual(self.emp1.email, 'johnjose@gmail.com')
self.assertEqual(self.emp2.email, 'iscojames@gmail.com')
def test_fullname(self):
print('test_fullname\n')
self.assertEqual(self.emp1.full_name, 'jiss jose')
self.emp1.first = 'jom'
self.emp1.last = 'thomas'
self.assertEqual(self.emp1.full_name, 'jom thomas')
self.assertEqual(self.emp2.full_name, 'isco alarcon')
self.emp2.first = 'alvaro'
self.emp2.last = 'morata'
self.assertEqual(self.emp2.full_name, 'alvaro morata')
def test_pay(self):
print('test_pay\n')
self.assertEqual(self.emp1.apply_raise, 6000)
self.emp1.pay_raise = 1.5
self.assertEqual(self.emp1.apply_raise, 9000)
self.assertEqual(self.emp2.apply_raise, 10000)
self.emp2.pay_raise = .5
self.assertEqual(self.emp2.apply_raise, 5000)
if __name__ == '__main__':
unittest.main()
# here v text whether value of apply_raise and pay are equal.
# here setUp runs before each test and tearDown method runs after each test.
# order will be like
# setUp
# testmethod
# teardown
|
[
"jissmon476@gmial.com"
] |
jissmon476@gmial.com
|
ccf100ecb17578bc9791263e5270183990fed468
|
0b793bce2da8c3d09b7956c0672ddbffd46feaed
|
/atcoder/corp/keyence2020_c.py
|
9e943f94b0f860184c871b6de78e2af5092d409b
|
[
"MIT"
] |
permissive
|
knuu/competitive-programming
|
c6c4e08fb231937d988bdc5a60a8ad6b31b97616
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
refs/heads/master
| 2021-01-17T09:39:02.647688
| 2020-11-07T03:17:22
| 2020-11-07T03:17:22
| 27,886,732
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
N, K, S = map(int, input().split())
if S == 1:
const = S + 1
else:
const = S - 1
ans = []
for i in range(N):
if i < K:
ans.append(S)
else:
ans.append(const)
print(*ans)
|
[
"premier3next@gmail.com"
] |
premier3next@gmail.com
|
03bc0d80849bc3264945b6fc903d9599b980d26a
|
a38725ed7fb93b503207502984ec197e921eb54b
|
/venv/lib/python3.6/site-packages/django_ajax/encoder.py
|
64ed9ca2af3a6a719fd651966cacb7ddaf862693
|
[] |
no_license
|
tanveerahmad1517/myblogproject
|
d00d550230e2df0843e67f793504f9c19d0b755c
|
2eaa051caa5b68a8fba260c7cd431f1e1719a171
|
refs/heads/master
| 2020-03-16T21:38:32.738671
| 2018-08-23T11:55:02
| 2018-08-23T11:55:02
| 133,008,051
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
"""
Utils
"""
from __future__ import unicode_literals
import json
from datetime import date
from django.http.response import HttpResponseRedirectBase, HttpResponse
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.db.models.base import ModelBase
from decimal import Decimal
class LazyJSONEncoderMixin(object):
"""
A JSONEncoder subclass that handle querysets and models objects.
Add how handle your type of object here to use when dump json
"""
def default(self, obj):
# handles HttpResponse and exception content
if issubclass(type(obj), HttpResponseRedirectBase):
return obj['Location']
elif issubclass(type(obj), TemplateResponse):
return obj.rendered_content
elif issubclass(type(obj), HttpResponse):
return obj.content
elif issubclass(type(obj), Exception) or isinstance(obj, bytes):
return force_text(obj)
# this handles querysets and other iterable types
try:
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
# this handlers Models
if isinstance(obj.__class__, ModelBase):
return force_text(obj)
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, date):
return obj.isoformat()
return super(LazyJSONEncoderMixin, self).default(obj)
class LazyJSONEncoder(LazyJSONEncoderMixin, json.JSONEncoder):
pass
def serialize_to_json(data, *args, **kwargs):
"""
A wrapper for simplejson.dumps with defaults as:
cls=LazyJSONEncoder
All arguments can be added via kwargs
"""
kwargs['cls'] = kwargs.get('cls', LazyJSONEncoder)
return json.dumps(data, *args, **kwargs)
|
[
"tanveerobjects@gmail.com"
] |
tanveerobjects@gmail.com
|
cb2f886ed26850bfebfaf4e3a00a9e730652e300
|
cc086a96967761f520c24ce3b22bacecb673cbf2
|
/chec_operator/threads/observation.py
|
877c8afa6dcb8e097cf23a53a3504277d6791849
|
[] |
no_license
|
watsonjj/chec_operator
|
39524405b3c6a55fe7fa3e8353da5f456f76a27d
|
c537a1737a53fe996652c793c09f5a33cd03e208
|
refs/heads/master
| 2020-04-18T02:27:52.730614
| 2019-01-23T10:41:13
| 2019-01-23T10:41:13
| 167,163,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,722
|
py
|
import threading
from time import sleep, ctime, time
from datetime import datetime
from chec_operator.utils.enums import CameraState
class ObservingThread(threading.Thread):
def __init__(self, parent_handler, timedelta, triggerdelta):
print("Creating observation thread")
self.parent_handler = parent_handler
self.timedelta = timedelta
self.triggerdelta = triggerdelta
self.starttime = 0
self.starttrigger = 0
self.currenttimedelta = 0
self.currenttriggerdelta = 0
self.get_trigger = self.parent_handler.get_backplane_trigger_count
super(ObservingThread, self).__init__()
self._observation_interrupt = threading.Event()
self.observation_reached_end = False
self.running = False
self.lock = threading.Lock()
def _check_time(self):
if self.timedelta:
self.currenttimedelta = datetime.now() - self.starttime
return self.currenttimedelta >= self.timedelta
else:
return False
def _check_trigger(self):
if self.triggerdelta:
self.currenttriggerdelta = self.get_trigger() - self.starttrigger
return self.currenttriggerdelta >= self.triggerdelta
else:
return False
def observation_ended(self):
return self._observation_interrupt.isSet()
def interrupt_observation(self):
if self.lock.acquire(False):
print("[WARNING] Interrupting observation thread!")
self._observation_interrupt.set()
self.join()
def run(self):
self.running = True
self.starttime = datetime.now()
self.starttrigger = self.get_trigger()
print("[INFO] Starting observation thread, "
"start time = {}, timedelta = {} s, triggerdelta = {}"
.format(ctime(time()), self.timedelta, self.triggerdelta))
while not self.observation_ended():
if self._check_time() or self._check_trigger():
self._finish_run()
break
self.running = False
print("Observation Ended")
def _finish_run(self):
if self.lock.acquire(False):
print("[INFO] Observation thread complete, "
"end time = {}, duration = {}, triggers {} (end) {} (actual)"
.format(ctime(time()), self.currenttimedelta,
self.currenttriggerdelta,
self.get_trigger() - self.starttrigger))
self.observation_reached_end = True
self.parent_handler.go_to_state(CameraState.READY)
def wait_for_end(self):
self.join()
print("Observation Ended")
|
[
"jason.jw@live.co.uk"
] |
jason.jw@live.co.uk
|
f3d5dcd2e5f655280d986d7d5e685dfb3b524cc2
|
06604399c457d6ec05fa5d5ae458632e2606ec98
|
/torch/utils/_sympy/functions.py
|
3c78e1bebb50e8e34e979cab147e57e371f418bb
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
yncxcw/pytorch
|
6f262f7613caef4c2ce18c85662db9adc6a2a81a
|
a3b72ee354031004edd9b951d0efcdd4508fd578
|
refs/heads/master
| 2023-07-20T21:38:00.718093
| 2023-07-13T03:54:17
| 2023-07-13T03:54:17
| 234,432,318
| 0
| 0
|
NOASSERTION
| 2020-01-16T23:34:42
| 2020-01-16T23:34:41
| null |
UTF-8
|
Python
| false
| false
| 5,787
|
py
|
import sympy
from sympy.core.logic import fuzzy_and, fuzzy_or
__all__ = ["FloorDiv", "ModularIndexing", "CleanDiv", "CeilDiv", "LShift", "RShift"]
class FloorDiv(sympy.Function):
"""
We maintain this so that:
1. We can use divisibility guards to simplify FloorDiv(a, b) to a / b.
2. Printing out the expression is nicer (compared to say, representing a//b as (a - a % b) / b)
"""
nargs = (2,)
precedence = 50 # precedence of mul # noqa: F811
# Default return type for SymPy assumptions.
# https://docs.sympy.org/latest/guides/assumptions.html#implementing-assumptions-handlers
is_real = True
@property
def base(self):
return self.args[0]
@property
def divisor(self):
return self.args[1]
def _sympystr(self, printer):
base = printer.parenthesize(self.base, self.precedence)
divisor = printer.parenthesize(self.divisor, self.precedence)
return f"({base}//{divisor})"
# SymPy assumptions based on argument types.
def _eval_is_real(self):
return fuzzy_or([self.base.is_real, self.divisor.is_real])
def _eval_is_integer(self):
return fuzzy_and([self.base.is_integer, self.divisor.is_integer])
# Automatic evaluation.
# https://docs.sympy.org/latest/guides/custom-functions.html#best-practices-for-eval
@classmethod
def eval(cls, base, divisor):
def check_supported_type(x):
if (x.is_integer is False and x.is_real is False and x.is_complex) or x.is_Boolean:
raise TypeError(
f"unsupported operand type(s) for //: "
f"'{type(base).__name__}' and '{type(divisor).__name__}'"
f", expected integer or real")
check_supported_type(base)
check_supported_type(divisor)
# We don't provide the same error message as in Python because SymPy
# makes it difficult to check the types.
if divisor.is_zero:
raise ZeroDivisionError("division by zero")
if base.is_zero:
return sympy.S.Zero
if base.is_integer and divisor == 1:
return base
if base.is_real and divisor == 1:
return sympy.floor(base)
if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer):
return base // divisor
if isinstance(base, (sympy.Integer, sympy.Float)) and isinstance(divisor, (sympy.Integer, sympy.Float)):
return sympy.floor(base / divisor)
if isinstance(base, FloorDiv):
return FloorDiv(base.args[0], base.args[1] * divisor)
if isinstance(base, sympy.Add):
for a in base.args:
gcd = sympy.gcd(a, divisor)
if gcd == divisor:
return FloorDiv(base - a, divisor) + a / gcd
gcd = sympy.gcd(base, divisor)
if gcd != 1:
return FloorDiv(
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd)
)
class ModularIndexing(sympy.Function):
"""
ModularIndexing(a, b, c) => (a // b) % c
"""
nargs = (3,)
is_integer = True
@classmethod
def eval(cls, base, divisor, modulus):
if base == 0 or modulus == 1:
return sympy.Integer(0)
if (
isinstance(base, sympy.Integer)
and isinstance(divisor, sympy.Integer)
and isinstance(modulus, sympy.Integer)
):
return (base // divisor) % modulus
if divisor != 1:
gcd = sympy.gcd(base, divisor)
if gcd != 1:
return ModularIndexing(
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd), modulus
)
if isinstance(base, sympy.Add):
new_terms = []
all_positive = True
for term in base.args:
if sympy.gcd(term, modulus * divisor) != modulus * divisor:
if (isinstance(term, sympy.Integer) and term < 0) or (
isinstance(term, sympy.Mul)
and isinstance(term.args[0], sympy.Integer)
and term.args[0] < 0
):
# workaround for https://github.com/openai/triton/issues/619,
# if there are negative terms, // produces wrong result
# TODO if https://github.com/openai/triton/issues/619 is fixed
# this optimization would become valid
all_positive = False
break
else:
new_terms.append(term)
if len(new_terms) != len(base.args) and all_positive:
return ModularIndexing(sum(new_terms), divisor, modulus)
if isinstance(base, FloorDiv):
return ModularIndexing(base.args[0], base.args[1] * divisor, modulus)
class CleanDiv(FloorDiv):
"""
Div where we can assume no rounding.
This is to enable future optimizations.
"""
pass
class CeilDiv(sympy.Function):
"""
Div used in indexing that rounds up.
"""
is_integer = True
def __new__(cls, base, divisor):
if sympy.gcd(base, divisor) == divisor:
return CleanDiv(base, divisor)
else:
return FloorDiv(base + (divisor - 1), divisor)
class LShift(sympy.Function):
@classmethod
def eval(cls, base, shift):
if shift < 0:
raise ValueError('negative shift count')
return base * 2 ** shift
class RShift(sympy.Function):
@classmethod
def eval(cls, base, shift):
if shift < 0:
raise ValueError('negative shift count')
return base // 2 ** shift
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
af2729e0f3c3c35ad20460334df67ddb78436aec
|
6635686859b272d291d0ba3520ccd03cdc80a349
|
/DT/threadingtext.py
|
5b21c27fdb9a81a7ecb35c0e0d9c9ebe52c19d32
|
[] |
no_license
|
yangrencong/web_spiders
|
ac15c491f60e489000e5312c999f02e6c4fdafdf
|
69fdc6eeb5ad19283690c056064f8853e0256445
|
refs/heads/master
| 2020-03-28T18:45:50.800667
| 2018-10-26T02:50:16
| 2018-10-26T02:50:16
| 148,908,630
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: Mr.yang
# Created Time : 2018/10/10 星期三 12:26:07
# File Name: threadingtext.py
# Description:
# Editortool: vim8.0
"""
import threading
import time
class myThread(threading.Thread):
def __init__(self ,name ,delay):
threading.Thread.__init__(self)
self.name = name
self.delay = delay
def run(self):
print("Starting " + self.name)
print_time(self.name ,self.delay)
print("Exiting " + self.name)
def print_time(threadName ,delay):
counter = 0
while counter < 3:
time.sleep(delay)
print(threadName ,time.ctime())
counter += 1
threads = []
#创建新线程
thread1 = myThread("Thread-1" ,1)
thread2 = myThread("Thread-2" ,2)
#开启新线程
thread1.start()
thread2.start()
#添加线程到线程列表
threads.append(thread1)
threads.append(thread2)
#等待所有线程完成
for t in threads:
t.join()
print("Exiting main thread")
|
[
"1452581359@qq.com"
] |
1452581359@qq.com
|
7fc024f18bdc5289a4cad605dbc8a2f6fa792e74
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/contrib/eager/python/tfe.py
|
c441ab87be7e0aebadefe92023f89bfd67ff471e
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:9b7bd976495c4645582fde2d7fcf488a311648b43813cff249462fccfa19224a
size 5928
|
[
"github@cuba12345"
] |
github@cuba12345
|
4353deb50a51a18cfc392b8d5fada6467c849fe1
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/worklink_write_3/domain_associate.py
|
622afc66bafaf4062a1575d617c77e954bc7ee2e
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/worklink/associate-domain.html
if __name__ == '__main__':
"""
describe-domain : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/worklink/describe-domain.html
disassociate-domain : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/worklink/disassociate-domain.html
list-domains : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/worklink/list-domains.html
"""
parameter_display_string = """
# fleet-arn : The Amazon Resource Name (ARN) of the fleet.
# domain-name : The fully qualified domain name (FQDN).
# acm-certificate-arn : The ARN of an issued ACM certificate that is valid for the domain being associated.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("worklink", "associate-domain", "fleet-arn", "domain-name", "acm-certificate-arn", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
9adc81d26ca9708c7ee07b95c8795d117a6c05e9
|
987a82368d3a15b618ff999f28dc16b89e50f675
|
/plaso/parsers/winreg_plugins/shutdown.py
|
f14cb1abcb7795e44c6e63f3728e75b94987c6ff
|
[
"Apache-2.0"
] |
permissive
|
arunthirukkonda/plaso
|
185b30ab4ec90fcc2d280b3c89c521c9eef7b7ab
|
846fc2fce715e1f78b11f375f6fe4e11b5c284ba
|
refs/heads/master
| 2021-08-30T15:21:12.267584
| 2017-12-18T12:33:08
| 2017-12-18T12:33:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,893
|
py
|
# -*- coding: utf-8 -*-
"""Windows Registry plugin for parsing the last shutdown time of a system."""
from __future__ import unicode_literals
import construct
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'Preston Miller, dpmforensics.com, github.com/prmiller91'
class ShutdownWindowsRegistryEventData(events.EventData):
"""Shutdown Windows Registry event data.
Attributes:
key_path (str): Windows Registry key path.
value_name (str): name of the Windows Registry value.
"""
DATA_TYPE = 'windows:registry:shutdown'
def __init__(self):
"""Initializes event data."""
super(ShutdownWindowsRegistryEventData, self).__init__(
data_type=self.DATA_TYPE)
self.key_path = None
self.value_name = None
class ShutdownPlugin(interface.WindowsRegistryPlugin):
"""Windows Registry plugin for parsing the last shutdown time of a system."""
NAME = 'windows_shutdown'
DESCRIPTION = 'Parser for ShutdownTime Registry value.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Control\\Windows')])
_UINT64_STRUCT = construct.ULInt64('value')
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a ShutdownTime Windows Registry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
shutdown_value = registry_key.GetValueByName('ShutdownTime')
if not shutdown_value:
return
# Directly parse the Windows Registry value data in case it is defined
# as binary data.
try:
timestamp = self._UINT64_STRUCT.parse(shutdown_value.data)
except construct.FieldError as exception:
timestamp = None
parser_mediator.ProduceExtractionError(
'unable to determine shutdown timestamp with error: {0!s}'.format(
exception))
if not timestamp:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event_data = ShutdownWindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = shutdown_value.offset
event_data.value_name = shutdown_value.name
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg.WinRegistryParser.RegisterPlugin(ShutdownPlugin)
|
[
"joachim.metz@gmail.com"
] |
joachim.metz@gmail.com
|
d5aa6095ffe361c6c24f7e7ace9e878dcd34a356
|
8a452b71e3942d762fc2e86e49e72eac951b7eba
|
/leetcode/editor/en/[1065]Index Pairs of a String.py
|
9596550ca2c48f4cb14e3df379385e19b37fe19c
|
[] |
no_license
|
tainenko/Leetcode2019
|
7bea3a6545f97c678a176b93d6622f1f87e0f0df
|
8595b04cf5a024c2cd8a97f750d890a818568401
|
refs/heads/master
| 2023-08-02T18:10:59.542292
| 2023-08-02T17:25:49
| 2023-08-02T17:25:49
| 178,761,023
| 5
| 0
| null | 2019-08-27T10:59:12
| 2019-04-01T01:04:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
# Given a string text and an array of strings words, return an array of all
# index pairs [i, j] so that the substring text[i...j] is in words.
#
# Return the pairs [i, j] in sorted order (i.e., sort them by their first
# coordinate, and in case of ties sort them by their second coordinate).
#
#
# Example 1:
#
#
# Input: text = "thestoryofleetcodeandme", words = ["story","fleet","leetcode"]
# Output: [[3,7],[9,13],[10,17]]
#
#
# Example 2:
#
#
# Input: text = "ababa", words = ["aba","ab"]
# Output: [[0,1],[0,2],[2,3],[2,4]]
# Explanation: Notice that matches can overlap, see "aba" is found in [0,2] and
# [2,4].
#
#
#
# Constraints:
#
#
# 1 <= text.length <= 100
# 1 <= words.length <= 20
# 1 <= words[i].length <= 50
# text and words[i] consist of lowercase English letters.
# All the strings of words are unique.
#
# Related Topics Array String Trie Sorting 👍 203 👎 73
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def indexPairs(self, text: str, words: List[str]) -> List[List[int]]:
res = []
words.sort(key=lambda x: len(x))
for i in range(len(text)):
for word in words:
if i + len(word) > len(text):
continue
if word == text[i:i + len(word)]:
res.append([i, i + len(word) - 1])
return res
# leetcode submit region end(Prohibit modification and deletion)
|
[
"31752048+tainenko@users.noreply.github.com"
] |
31752048+tainenko@users.noreply.github.com
|
d4d00147e745d9e951765b3fc1fd6c50c016f113
|
3c9011b549dd06b6344c6235ed22b9dd483365d1
|
/Agenda/contatos/migrations/0003_contato_foto.py
|
097090b85a3f8c6636e29e52edc33d1acc2ee0e7
|
[] |
no_license
|
joaoo-vittor/estudo-python
|
1411f4c3620bbc5f6b7c674a096cae8f90f0db8d
|
5562d823dd574d7df49fddca87a1fbd319356969
|
refs/heads/master
| 2023-05-31T17:59:16.752835
| 2021-06-25T04:54:56
| 2021-06-25T04:54:56
| 292,372,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# Generated by Django 3.2 on 2021-05-16 01:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contatos', '0002_contato_mostrar'),
]
operations = [
migrations.AddField(
model_name='contato',
name='foto',
field=models.ImageField(blank=True, upload_to='fotos/%Y/%m'),
),
]
|
[
"joaoo.vittor007@gmail.com"
] |
joaoo.vittor007@gmail.com
|
00afe15515e8406d7267839d7d8a4be3bccea3fa
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/zoo/sea/bart_base.py
|
44e3581dd73c1b7ad168a64f76a5a09e3c7c18f6
|
[
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 741
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Vanila BART-Large 400m parameter model with no retrieval.
"""
from parlai.core.build_data import built, download_models, get_model_dir
import os
import os.path
def download(datapath):
ddir = os.path.join(get_model_dir(datapath), 'sea')
model_type = 'bart_base'
version = 'v1.0'
if not built(os.path.join(ddir, model_type), version):
opt = {'datapath': datapath, 'model_type': model_type}
fnames = [f'model_{version}.tgz']
download_models(opt, fnames, 'sea', version=version, use_model_type=True)
|
[
"noreply@github.com"
] |
facebookresearch.noreply@github.com
|
2fe4cec6defc2e66ddc4db17511c536f84514dd1
|
ee6acbd5fcd0fcd16230e96a4a539de41a02c97e
|
/operators/special-resource-operator/python/pulumi_pulumi_kubernetes_crds_operators_special_resource_operator/sro/v1alpha1/__init__.py
|
7e6f12156a258138619de35e038acddf3d969e0c
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulumi-kubernetes-crds
|
777e78137aaf6525a44b61a02dccf91bf0d87a14
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
refs/heads/master
| 2023-03-15T04:29:16.039753
| 2020-12-30T19:35:54
| 2020-12-30T19:35:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .SpecialResource import *
from ._inputs import *
from . import outputs
|
[
"albertzhong0@gmail.com"
] |
albertzhong0@gmail.com
|
5a1a215fc88b1c2d5c7a9729d348862c15461931
|
b64687833bbbd206d871e5b20c73e5bf363c4995
|
/crocs.py
|
2ea505fc464101c7b928b4bbcbb3e5e9cd5a0f07
|
[
"Apache-2.0"
] |
permissive
|
barkinet/crocs
|
462225eee0975c9240ec25ca1275e0f9dc991e00
|
7ab44d1eb45aac7b24ab64601255d9fb38049040
|
refs/heads/master
| 2020-12-02T09:12:18.988446
| 2017-07-09T20:09:52
| 2017-07-09T20:09:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,547
|
py
|
from random import choice, randint
from string import printable
import re
class RegexStr(object):
def __init__(self, value):
self.value = value
def invalid_data(self):
pass
def valid_data(self):
return self.value
def __str__(self):
return re.escape(self.value)
class RegexOperator(object):
# It may be interesting to have a base class Pattern
# that implements common methods with Group and Include, Exclude.
# Because these accept multiple arguments.
def __init__(self):
pass
def invalid_data(self):
pass
def valid_data(self):
pass
def encargs(self, args):
return [RegexStr(ind) if isinstance(ind, str) else ind
for ind in args]
def encstr(self, regex):
regex = RegexStr(regex) if isinstance(
regex, str) else regex
return regex
def test(self):
regex = str(self)
data = self.valid_data()
# It has to be search in order to work with ConsumeNext.
strc = re.search(regex, data)
print 'Regex;', regex
print 'Input:', data
print 'Group dict:', strc.groupdict()
print 'Group 0:', strc.group(0)
print 'Groups:', strc.groups()
def join(self):
return ''.join(map(lambda ind: str(ind), self.args))
def __str__(self):
pass
class NamedGroup(RegexOperator):
"""
Named groups.
(?P<name>...)
"""
def __init__(self, name, *args):
self.args = self.encargs(args)
self.name = name
def invalid_data(self):
pass
def valid_data(self):
return ''.join(map(lambda ind: \
ind.valid_data(), self.args))
def __str__(self):
return '(?P<%s>%s)' % (self.name, self.join())
class Group(RegexOperator):
"""
A normal group.
(abc).
"""
def __init__(self, *args):
self.args = self.encargs(args)
def invalid_data(self):
pass
def valid_data(self):
return ''.join(map(lambda ind: \
ind.valid_data(), self.args))
def __str__(self):
return '(%s)' % self.join()
class Times(RegexOperator):
"""
Match n, m times.
a{1, 3}
Note: The * and + are emulated by
Times(regex, 0) or Times(regex, 1)
"""
TEST_MAX = 10
def __init__(self, regex, min=0, max=''):
self.regex = self.encstr(regex)
self.min = min
self.max = max
def invalid_data(self):
pass
def valid_data(self):
count = randint(self.min, self.max
if self.max else self.TEST_MAX)
data = ''.join((self.regex.valid_data()
for ind in xrange(count)))
return data
def __str__(self):
return '%s{%s,%s}' % (self.regex,
self.min, self.max)
class ConsumeNext(RegexOperator):
"""
Lookbehind assertion.
(?<=...)
"""
def __init__(self, regex0, regex1):
self.regex0 = self.encstr(regex0)
self.regex1 = self.encstr(regex1)
def invalid_data(self):
pass
def valid_data(self):
return '%s%s' % (self.regex0.valid_data(),
self.regex1.valid_data())
def __str__(self):
return '(?<=%s)%s' % (self.regex0, self.regex1)
class ConsumeBack(RegexOperator):
"""
Lookahead assertion.
(?=...)
"""
def __init__(self, regex0, regex1):
self.regex0 = self.encstr(regex0)
self.regex1 = self.encstr(regex1)
def invalid_data(self):
pass
def valid_data(self):
return '%s%s' % (self.regex0.valid_data(),
self.regex1.valid_data())
def __str__(self):
return '%s(?=%s)' % (self.regex0, self.regex1)
class Seq(RegexOperator):
def __init__(self, start, end):
self.start = start
self.end = end
self.seq = [chr(ind) for ind in xrange(
ord(self.start), ord(self.end))]
def valid_data(self):
return ''.join(self.seq)
def __str__(self):
return '%s-%s' % (self.start, self.end)
class Include(RegexOperator):
"""
Sets.
[abc]
"""
def __init__(self, *args):
self.args = self.encargs(args)
def invalid_data(self):
pass
def valid_data(self):
chars = ''.join(map(lambda ind: \
ind.valid_data(), self.args))
char = choice(chars)
return char
def __str__(self):
return '[%s]' % self.join()
class Exclude(RegexOperator):
"""
Excluding.
[^abc]
"""
def __init__(self, *args):
self.args = self.encargs(args)
def invalid_data(self):
pass
def valid_data(self):
chars = ''.join(map(lambda ind: \
ind.valid_data(), self.args))
data = filter(lambda ind: \
not ind in chars, printable)
return choice(data)
def __str__(self):
return '[^%s]' % self.join()
class X(RegexOperator):
"""
The dot.
.
"""
TOKEN = '.'
def __init__(self):
pass
def invalid_data(self):
return ''
def valid_data(self):
char = choice(printable)
return char
def __str__(self):
return self.TOKEN
class Pattern(RegexOperator):
"""
Setup a pattern.
"""
def __init__(self, *args):
self.args = self.encargs(args)
def invalid_data(self):
pass
def valid_data(self):
return ''.join(map(lambda ind: \
ind.valid_data(), self.args))
def __str__(self):
return self.join()
|
[
"ioliveira.id.uff.br"
] |
ioliveira.id.uff.br
|
e1918394a57db35a46a6856e38ebedd667af34e4
|
d21112887ed1ec675b7b519cc991fc47bfa11735
|
/SaleML_PreDjango/Predicting/urls.py
|
fc440ad929f7717a684452088ecfe3d8b3a0a1bb
|
[] |
no_license
|
SydNS/DjangoML-model
|
8c9ab65075b896ff129a872b087cdcd9dfc87e83
|
c15474b136d592e182e707f6a73269685c3e62ad
|
refs/heads/master
| 2023-03-02T13:27:33.809869
| 2021-02-06T09:57:34
| 2021-02-06T09:57:34
| 336,550,706
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('products', views.product_describe_view, name='product_add'),
]
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
a97d0b7b5c266a837d5caf3fefb00339c7d845dc
|
8fcae139173f216eba1eaa01fd055e647d13fd4e
|
/.history/scraper_20191220144406.py
|
d08dde54a93939a73cdb07eb8e08d72519375f5e
|
[] |
no_license
|
EnriqueGalindo/backend-web-scraper
|
68fdea5430a0ffb69cc7fb0e0d9bcce525147e53
|
895d032f4528d88d68719838a45dae4078ebcc82
|
refs/heads/master
| 2020-11-27T14:02:59.989697
| 2019-12-21T19:47:34
| 2019-12-21T19:47:34
| 229,475,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module docstring: One line description of what your program does.
There should be a blank line in between description above, and this
more detailed description. In this section you should put any caveats,
environment variable expectations, gotchas, and other notes about running
the program. Author tag (below) helps instructors keep track of who
wrote what, when grading.
"""
__author__ = "Enrique Galindo"
# Imports go at the top of your file, after the module docstring.
# One module per import line. These are for example only.
import sys
import requests
import re
def main(args):
"""Main function is declared as standalone, for testability"""
url = args[0]
response = requests.get(url)
response.raise_for_status()
url_list = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', response.text)
01-\x08\x0regex_email = r'''(?:[a-z0-9!#$%&‘*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&‘*+/=?^_`{|}~-]+)*|“(?:[\xb\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*“)@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])'''
regex_phone = r'''(1?\W*([2-9][0-8][0-9])\W*([2-9][0-9]{2})\W*([0-9]{4})(\se?x?t?(\d*))?)'''
email_list = set(re.findall(regex_email, response.text))
phone_list = set(re.findall(re_phone, response.text))
print(email_list)
if __name__ == '__main__':
"""Docstring goes here"""
main(sys.argv[1:])
|
[
"egalindo@protonmail.com"
] |
egalindo@protonmail.com
|
1c0a243fae087ba9520b940a1940a5458e5d1a61
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_95/2103.py
|
a414f3ccb3b1e3142cf256e7d173e37982b1e31b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
#! /usr/bin/env python
import operator
from sys import stdin
in1 = "ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jvzq"
out1 = "our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give upqz"
sample = "ejp mysljylc kd kxveddknmc re jsicpdrysi"
def getInput():
raw = stdin.readlines()
for x in range(0, len(raw)):
raw[x] = raw[x].replace('\n', '')
return raw
def makeMap(input_str, output_str):
mymap = {}
for x,y in zip(input_str, output_str):
if(x != " "):
mymap[x] = y
return mymap
def googler2english(input_str):
mymap = makeMap(in1, out1)
ret_str = ""
for x in input_str:
if x != ' ':
ret_str = ret_str + mymap[x]
else:
ret_str = ret_str + " "
return ret_str
def main():
myinput = getInput()
bound = int(myinput[0])
for x in range(1, bound + 1):
print "Case #%d: %s" % (x, googler2english(myinput[x]))
if __name__ == "__main__":
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
5e1e1a8a01e9a4132bd94ac4745a7070a47d4718
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/22114002.py
|
7953c9feeb8c8d3d7f7c9d855b04a94363e3a510
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/22114002.py generated: Fri, 27 Mar 2015 15:48:15
#
# Event Type: 22114002
#
# ASCII decay Descriptor: [D0 -> pi+ pi- mu+ mu-]cc
#
from Configurables import Generation
Generation().EventType = 22114002
Generation().SampleGenerationTool = "SignalPlain"
from Configurables import SignalPlain
Generation().addTool( SignalPlain )
Generation().SignalPlain.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/D0_pipimumu=DecProdCut.dec"
Generation().SignalPlain.CutTool = "DaughtersInLHCb"
Generation().SignalPlain.SignalPIDList = [ 421,-421 ]
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
1565de3abac24dee338faefa2cd4b094f35f2ecd
|
7963f09b4002249e73496c6cbf271fd6921b3d22
|
/emulator_6502/instructions/sbc.py
|
31e549a6575d30d766fd1bf37990f233dd92938b
|
[] |
no_license
|
thales-angelino/py6502emulator
|
6df908fc02f29b41fad550c8b773723a7b63c414
|
1cea28489d51d77d2dec731ab98a6fe8a515a2a8
|
refs/heads/master
| 2023-03-19T14:46:17.393466
| 2021-03-08T04:10:45
| 2021-03-08T04:10:45
| 345,754,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,553
|
py
|
SBC_IMMEDIATE_OPCODE = 0xe9
SBC_ZEROPAGE_OPCODE = 0xe5
SBC_ZEROPAGEX_OPCODE = 0xf5
SBC_ABSOLUTE_OPCODE = 0xed
SBC_ABSOLUTEX_OPCODE = 0xfd
SBC_ABSOLUTEY_OPCODE = 0xf9
SBC_INDIRECTX_OPCODE = 0xe1
SBC_INDIRECTY_OPCODE = 0xf1
class SBCImmediate(object):
def __init__(self):
super(SBCImmediate, self).__init__()
def run(self, cpu):
byte_r = cpu.immediate()
print("SBC memory byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status Carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCZeroPage(object):
"""SBC Zero Page instruction"""
def __init__(self):
super(SBCZeroPage, self).__init__()
def run(self, cpu):
byte_r = cpu.zero_page()
print("SBC zero page byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status Carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCZeroPageX(object):
"""SBC Zero Page X instruction"""
def __init__(self):
super(SBCZeroPageX, self).__init__()
def run(self, cpu):
byte_r = cpu.zero_page_x()
print("SBC zero page X byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCAbsolute(object):
"""SBC absolute instruction"""
def __init__(self):
super(SBCAbsolute, self).__init__()
def run(self, cpu):
byte_r = cpu.absolute()
print("SBC absolute byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCAbsoluteX(object):
"""SBC absolute X instruction"""
def __init__(self):
super(SBCAbsoluteX, self).__init__()
def run(self, cpu):
byte_r = cpu.absolute_x()
print("SBC absolute x byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCAbsoluteY(object):
"""SBC absolute Y instruction"""
def __init__(self):
super(SBCAbsoluteY, self).__init__()
def run(self, cpu):
byte_r = cpu.absolute_y()
print("SBC absolute Y byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCIndirectX(object):
"""SBC indirect X instruction"""
def __init__(self):
super(SBCIndirectX, self).__init__()
def run(self, cpu):
byte_r = cpu.indirect_x()
print("SBC indirect X byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCIndirectY(object):
"""SBC Indirect Y instruction"""
def __init__(self):
super(SBCIndirectY, self).__init__()
def run(self, cpu):
byte_r = cpu.indirect_y()
print("SBC indirect Y byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status Carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
|
[
"thales.angelino@gmail.com"
] |
thales.angelino@gmail.com
|
a0adbf0801f319434a3785fe01f994198732a1a1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2864/60618/317011.py
|
a6f56f689fbf65afed239fae41b9105f1e30fc28
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
class Solution:
def find(self, n, data):
if n == 1:
return data[0]
re = 0
d = dict()
for i in range(n):
d[data[i]] = data.count(data[i])
# 这两行可以好好学习一下!!!
sorted_key_list = sorted(d, reverse=True) # key从大到小
sorted_dict = sorted(d.items(), key=lambda x: x[0], reverse=True)
k = sorted_key_list
dd = []
for item in sorted_dict:
dd.append(item[1])
i = 0
while i < len(k):
if k[i] == k[i+1] + 1: # 下一个不能要了
if dd[i] * k[i] > dd[i + 1] * k[i + 1]:
re += dd[i] * k[i]
i += 1
if i == len(k) - 1:
break
if i == len(k) - 2:
re += dd[i + 1] * k[i + 1]
break
else:
re += dd[i + 1] * k[i + 1]
i += 1
if i == len(k) - 1:
break
if k[i] == k[i+1]+1:
i += 1
i += 1
else: # 下一个还能要
re += dd[i] * k[i]
i += 1
if i == len(k) - 1:
re += dd[i] * k[i]
break
return re
if __name__ == '__main__':
n = int(input())
data = [int(a) for a in input().split()]
s = Solution()
re = s.find(n, data)
print(re)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
b248f7b6e4a7f92757f0a8c13236f489a28b112f
|
ace30d0a4b1452171123c46eb0f917e106a70225
|
/filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/oslo_privsep/tests/test_comm.py
|
72f7aefceab898b5a143045d7aa771a0fc759a9f
|
[
"Python-2.0"
] |
permissive
|
juancarlosdiaztorres/Ansible-OpenStack
|
e98aa8c1c59b0c0040c05df292964520dd796f71
|
c01951b33e278de9e769c2d0609c0be61d2cb26b
|
refs/heads/master
| 2022-11-21T18:08:21.948330
| 2018-10-15T11:39:20
| 2018-10-15T11:39:20
| 152,568,204
| 0
| 3
| null | 2022-11-19T17:38:49
| 2018-10-11T09:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
# Copyright 2015 Rackspace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from oslotest import base
from oslo_privsep import comm
class BufSock(object):
def __init__(self):
self.readpos = 0
self.buf = six.BytesIO()
def recv(self, bufsize):
if self.buf.closed:
return b''
self.buf.seek(self.readpos, 0)
data = self.buf.read(bufsize)
self.readpos += len(data)
return data
def sendall(self, data):
self.buf.seek(0, 2)
self.buf.write(data)
def shutdown(self, _flag):
self.buf.close()
class TestSerialization(base.BaseTestCase):
def setUp(self):
super(TestSerialization, self).setUp()
sock = BufSock()
self.input = comm.Serializer(sock)
self.output = iter(comm.Deserializer(sock))
def send(self, data):
self.input.send(data)
return next(self.output)
def assertSendable(self, value):
self.assertEqual(value, self.send(value))
def test_none(self):
self.assertSendable(None)
def test_bool(self):
self.assertSendable(True)
self.assertSendable(False)
def test_int(self):
self.assertSendable(42)
self.assertSendable(-84)
def test_bytes(self):
data = b'\x00\x01\x02\xfd\xfe\xff'
self.assertSendable(data)
def test_unicode(self):
data = u'\u4e09\u9df9'
self.assertSendable(data)
def test_tuple(self):
self.assertSendable((1, 'foo'))
def test_list(self):
# NB! currently lists get converted to tuples by serialization.
self.assertEqual((1, 'foo'), self.send([1, 'foo']))
def test_dict(self):
self.assertSendable(
{
'a': 'b',
1: 2,
None: None,
(1, 2): (3, 4),
}
)
def test_badobj(self):
class UnknownClass(object):
pass
obj = UnknownClass()
self.assertRaises(TypeError, self.send, obj)
def test_eof(self):
self.input.close()
self.assertRaises(StopIteration, next, self.output)
|
[
"jcdiaztorres96@gmail.com"
] |
jcdiaztorres96@gmail.com
|
a7f52a070ab9786932134e6185e25c4294abacda
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories_2to3/113677/KaggleBillionWordImputation-master/scripts/test_to_train.py
|
d6a8b8242d2e01d61592d440427057247ee7db57
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151
| 2018-12-20T19:09:50
| 2018-12-20T19:09:50
| 147,406,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#!/usr/bin/env python
'''Convert test file format to train file format'''
import sys
if __name__ == '__main__':
header = sys.stdin.readline()
for line in sys.stdin:
i, sentence = line.rstrip().split(',', 1)
print(sentence[1:-1].replace('""', '"'))
|
[
"keesiu.wong@gmail.com"
] |
keesiu.wong@gmail.com
|
7914eab270311d6a94213bb0d0fa5edfa4c36fb0
|
863d32f9adc6890600a7a114574be66e80dc4ec7
|
/models/seg_model.py
|
0e3d6fddf9a0d4b5e475694ffe2eb863038fda1d
|
[] |
no_license
|
dsl2009/dsl_instance
|
9e60dc36a3106a9500a9486208533c2eb23578ae
|
ca299c16feaf58eadfd21f282bf681194b6c118f
|
refs/heads/master
| 2020-04-24T15:18:08.246023
| 2019-07-26T08:38:19
| 2019-07-26T08:38:19
| 172,060,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
from models import resnet
import torch
from torch import nn
from torch.nn import functional as F
from layer import renet
class SegModel(nn.Module):
def __init__(self):
super(SegModel, self).__init__()
self.cnn = resnet.resnet50(pretrained=False)
self.cov1 = nn.Sequential(
nn.Conv2d(2048, 512, kernel_size=1, stride=1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
)
self.cov2 = nn.Sequential(
nn.Conv2d(768, 256, kernel_size=3,padding=1, stride=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.cov3 = nn.Sequential(
nn.Conv2d(320, 64, kernel_size=3,padding=1, stride=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.seg = nn.Conv2d(64, 1, kernel_size=3,padding=1, stride=1, bias=False)
self.edge = nn.Conv2d(64, 1, kernel_size=3, padding=1, stride=1, bias=False)
def forward(self, img):
x1, x2, x3 = self.cnn(img)
x3 = self.cov1(x3)
x3_up = F.interpolate(x3,scale_factor=2, mode='bilinear')
x2 = torch.cat([x3_up, x2],dim =1)
x2 = self.cov2(x2)
x2_up = F.interpolate(x2,scale_factor=2, mode='bilinear')
x1 = torch.cat([x2_up, x1],dim =1)
x1 = self.cov3(x1)
x0 = F.interpolate(x1,scale_factor=2, mode='bilinear')
seg = self.seg(x0)
edge = self.edge(x0)
return seg,edge
if __name__ == '__main__':
x = torch.randn(2,3,256,256).cuda()
md = SegModel().cuda()
md(x)
|
[
"dsl"
] |
dsl
|
34c4d58dbc00a029cccf06bca3604352c7a3dc0b
|
833e9e3b34b271aa2522471bd0b281b892adff78
|
/backend/forms.py
|
9f1014a729fa0d32ce2cc205096f506180fa41c4
|
[] |
no_license
|
emilte/case
|
b3fcd869468e093ec754980824c6b155f283caa7
|
35eadb05bdd224f845353a952c9aa18b03d95591
|
refs/heads/master
| 2021-06-27T13:19:32.550253
| 2019-11-24T23:21:36
| 2019-11-24T23:21:36
| 223,599,299
| 0
| 0
| null | 2021-03-19T08:42:52
| 2019-11-23T14:10:19
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
from django import forms
from urllib import request
from captcha.fields import ReCaptchaField
from django.conf import settings
def between(x, a, b):
return x >= a and x <= b
class Info(forms.Form):
applicant = forms.CharField(initial="emil", required=True, widget=forms.HiddenInput)
name = forms.CharField(initial="Emil Telstad", required=True, min_length=2)
email = forms.EmailField(initial="emil.telstad@gmail.com", required=True)
phone = forms.IntegerField(initial="41325358", required=True)
areacode = forms.CharField(initial="7051", required=False, min_length=4, max_length=4)
comment = forms.CharField(required=False, widget=forms.Textarea)
captcha = ReCaptchaField(
public_key=settings.RECAPTCHA_PUBLIC_KEY,
private_key=settings.RECAPTCHA_PRIVATE_KEY,
)
required_css_class = 'required'
def __init__(self, *args, **kwargs):
super(type(self), self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs.update({'class': 'form-control'})
self.fields['name'].widget.attrs.update({'placeholder': 'Ola Nordmann'})
self.fields['email'].widget.attrs.update({'placeholder': 'navn@domene.no'})
self.fields['phone'].widget.attrs.update({'placeholder': '12345678'})
self.fields['areacode'].widget.attrs.update({'placeholder': '1234'})
def clean_phone(self):
data = self.cleaned_data['phone']
if between(data, 40000000, 49999999) or between(data, 90000000, 99999999):
return data
raise forms.ValidationError("Invalid Norwegian phone number")
def clean_areacode(self):
data = self.cleaned_data['areacode']
if not data: # Areacode is not required
return data
try: int(data)
except: raise forms.ValidationError("Areacodes contain only digits (0-9)")
if len(data) != 4:
raise forms.ValidationError("Norwegian areacodes contain exactly 4 digits")
resource = request.urlopen("https://www.bring.no/postnummerregister-ansi.txt")
encode = resource.headers.get_content_charset()
for line in resource:
line = line.decode(encode)
n = line.split('\t')[0]
if int(n) == int(data):
return data
raise forms.ValidationError("Areacode does not exist")
|
[
"emil.telstad@gmail.com"
] |
emil.telstad@gmail.com
|
87cb6e36d3ce8f25552e58055a81a96c81d016d0
|
9994911f0ff388c92c21ca8178eec2d3af57082d
|
/teamup/cli.py
|
8379e8bc873e2b905aca6bd2f170758de61ca15c
|
[
"MIT"
] |
permissive
|
BruceEckel/TeamUp
|
2809b36b8946b51bf96fcc113ef24ef02508f3c9
|
23e29301b462c329ad17253b4d4fb7f56fb7881b
|
refs/heads/master
| 2023-01-05T19:06:21.010258
| 2022-12-26T23:30:44
| 2022-12-26T23:30:44
| 127,565,232
| 7
| 1
|
MIT
| 2022-12-26T23:30:45
| 2018-03-31T19:42:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
# -*- coding: utf-8 -*-
"""
Combine people for group activities
"""
from pathlib import Path
import os, sys
import click
import webbrowser
from teamup.pairings import Pairings
from teamup.PersistentLoopCounter import PersistentLoopCounter
attendees = Path("Attendees.txt")
html = Path() / "html"
@click.group()
@click.version_option()
def main():
"""
Generates and displays all combinations of 2-person teams using a
round-robin algorithm. Requires an Attendees.txt file containing
one name per line. Remove the 'html' directory to restart.
"""
def display(index):
pairing = html / f"pairing{index}.html"
assert pairing.exists()
webbrowser.open_new_tab(pairing)
@main.command()
def current():
"""
Show current teams
"""
if not attendees.exists():
print("Attendees.txt not found")
sys.exit(1)
pairings = Pairings.from_file(Path("Attendees.txt"))
if not html.exists():
pairings.create_html_files()
PersistentLoopCounter.create(html, pairings.bound)
display(PersistentLoopCounter.get(html).index())
@main.command()
def next():
"""
Moves to next team grouping and shows
"""
if not html.exists():
print("No 'html' directory, first run 'teamup current'")
sys.exit(1)
display(PersistentLoopCounter.get(html).next())
# @main.command()
# def clean():
# """
# Erases the 'html' directory
# """
# if html.exists():
# html.unlink()
if __name__ == "__main__":
main()
|
[
"mindviewinc@gmail.com"
] |
mindviewinc@gmail.com
|
9770331cc4ed8b9caba652786a87ec8aced75466
|
e94c7bd97d8b8b3b2945d357521bd346e66d5d75
|
/test/lmp/script/gen_txt/test_signature.py
|
1a75301a671acbdfbd9ac9ea870cb204b57d9bc1
|
[
"Beerware"
] |
permissive
|
ProFatXuanAll/language-model-playground
|
4d34eacdc9536c57746d6325d71ebad0d329080e
|
ec4442a0cee988a4412fb90b757c87749b70282b
|
refs/heads/main
| 2023-02-19T16:21:06.926421
| 2022-09-25T13:35:01
| 2022-09-25T13:35:01
| 202,471,099
| 11
| 26
|
NOASSERTION
| 2023-02-16T06:39:40
| 2019-08-15T03:57:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
"""Test :py:mod:`lmp.script.gen_txt` signatures."""
import argparse
import inspect
from inspect import Parameter, Signature
from typing import List
import lmp.script.gen_txt
def test_module_method() -> None:
"""Ensure module methods' signatures."""
assert hasattr(lmp.script.gen_txt, 'parse_args')
assert inspect.isfunction(lmp.script.gen_txt.parse_args)
assert inspect.signature(lmp.script.gen_txt.parse_args) == Signature(
parameters=[
Parameter(
annotation=List[str],
default=Parameter.empty,
kind=Parameter.POSITIONAL_OR_KEYWORD,
name='argv',
),
],
return_annotation=argparse.Namespace,
)
assert hasattr(lmp.script.gen_txt, 'main')
assert inspect.isfunction(lmp.script.gen_txt.main)
assert inspect.signature(lmp.script.gen_txt.main) == Signature(
parameters=[
Parameter(
annotation=List[str],
default=Parameter.empty,
kind=Parameter.POSITIONAL_OR_KEYWORD,
name='argv',
),
],
return_annotation=None,
)
|
[
"ProFatXuanAll@gmail.com"
] |
ProFatXuanAll@gmail.com
|
68b259649181c54eea9faebc337711ab016af534
|
5c4289608693609de3d755674cba53b77cbc4c69
|
/Python_Study/2课堂练习/Python基础班/06_名片管理系统/cards_main.py
|
32a8e9caa251e2f2c3000e3de1f3a1e6e5ad5bcf
|
[
"Apache-2.0"
] |
permissive
|
vipliujunjie/HouseCore
|
95892e632f840f22715d08467d6610195d562261
|
e9fa5ebc048cbede7823ac59a011a554bddf8674
|
refs/heads/master
| 2021-02-05T13:09:43.962224
| 2020-02-28T14:46:26
| 2020-02-28T14:46:26
| 243,783,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
#! /Library/Frameworks/Python.framework/Versions/3.7/bin/python3
import cards_tools
# 无限循环 由用户决定什么时候退出循环
while True:
# TODO(刘俊杰) 显示功能菜单
cards_tools.show_menu()
action_str = input("请输入希望执行的操作:")
print("您选择的操作是【%s】" % action_str)
# [1,2,3] 针对名片的操作
if action_str in ["1", "2", "3"]: # 判断在指定列表内
# 新增名片
if action_str == "1":
cards_tools.new_card()
# pass
# 显示全部
if action_str == "2":
cards_tools.show_all()
# pass
# 查询名片
if action_str == "3":
cards_tools.search_card()
# pass
# pass
# 0 退出系统
elif action_str == "0":
# 如果在开发程序时,不希望立刻编写分支内部的代码
# 可以使用 pass 关键字,表示一个占位符,能够保证程序的代码结构正确!
# 程序运行时,pass 关键字不会执行任何的操作
print("\n欢迎再次使用【名片管理系统】")
break
# pass
# 输入其他内容提示用户错误
else:
print("您输入的不正确,请从新选择")
|
[
"1520997065@qq.com"
] |
1520997065@qq.com
|
468ec6b362681d9a3018b5f0182ef31622ef30b1
|
1b0a729f6e20c542a6370785a49c181c0675e334
|
/main.py
|
35fb3f77ad0ea393411e9e0c57d85315d85bd310
|
[] |
no_license
|
fans656/mint-dev
|
68125c4b41ab64b20d54a2b19e8bf0179dc4636b
|
408f6f055670b15a3f3ee9c9ec086b1090cce372
|
refs/heads/master
| 2021-05-04T11:43:44.740116
| 2016-09-07T13:43:44
| 2016-09-07T13:43:44
| 45,515,119
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
from mint import *
from mint.protocols.test import Retransmit
a, b, c = Host(), Host(), Host()
s = Switch()
link(a, s.tips[0], 1)
link(b, s.tips[1], 2)
#link(c, s.tips[2], 3)
a += Retransmit()
a.send('hi')
#b.send('me').at(5)
start()
|
[
"fans656@yahoo.com"
] |
fans656@yahoo.com
|
17edec3a0cbd5397bc360dc2289f7aa23fef2f2b
|
02122ec38633c178ced34d8a027addc919b4c200
|
/Nutrients/api/urls.py
|
757826e0b86fe90b0ab82e9e332d35f5dd0ee419
|
[] |
no_license
|
SIBU99/serverCVKM
|
07907b3c416892bcc432b9317506927112750a93
|
8182f2274216016a15a2a98ea5a31d7e05222ed5
|
refs/heads/master
| 2023-01-12T10:19:54.966211
| 2020-11-10T08:33:41
| 2020-11-10T08:33:41
| 311,407,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
from django.urls import path
from .views import NutrientExamination
urlpatterns = [
path("nutrient-examination/", NutrientExamination.as_view(), name="nutrient-examination"),
]
|
[
"kumarmishra678@gmail.com"
] |
kumarmishra678@gmail.com
|
2f42da8393cd536ef56b1a0bef15efe947177b66
|
d83118503614bb83ad8edb72dda7f449a1226f8b
|
/src/dprj/platinumegg/app/cabaret/views/mgr/model_edit/trade_shop.py
|
d402834b28b5ad1f8056bc5d4ec9eec808d29ae6
|
[] |
no_license
|
hitandaway100/caba
|
686fe4390e182e158cd9714c90024a082deb8c69
|
492bf477ac00c380f2b2758c86b46aa7e58bbad9
|
refs/heads/master
| 2021-08-23T05:59:28.910129
| 2017-12-03T19:03:15
| 2017-12-03T19:03:15
| 112,512,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
# -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.mgr.model_edit import AdminModelEditHandler,\
AppModelForm, ModelEditValidError, AppModelChoiceField
from defines import Defines
from platinumegg.app.cabaret.util.api import BackendApi
from platinumegg.app.cabaret.models.TradeShop import TradeShopMaster, TradeShopItemMaster
from platinumegg.app.cabaret.models.Schedule import ScheduleMaster
class Handler(AdminModelEditHandler):
"""マスターデータの操作.
"""
class Form(AppModelForm):
class Meta:
model = TradeShopMaster
exclude = (
Defines.MASTER_EDITTIME_COLUMN,
)
schedule = AppModelChoiceField(ScheduleMaster, required=False, label=u'期間')
def setting_property(self):
self.MODEL_LABEL = u'トレードショップ'
def valid_insert(self, master):
self.__valid_master(master)
def valid_update(self, master):
self.__valid_master(master)
def __valid_master(self, master):
model_mgr = self.getModelMgr()
self.__check_schedule(model_mgr, master)
self.__check_trade_shop_item_masetr_ids(model_mgr, master)
model_mgr.write_all()
def __check_schedule(self, model_mgr, master):
model = model_mgr.get_model(ScheduleMaster, master.schedule)
if model is None:
raise ModelEditValidError(u'スケジュールに、存在しないIDが指定されています.id=%d' % master.id)
def __check_trade_shop_item_masetr_ids(self, model_mgr, master):
if not isinstance(master.trade_shop_item_master_ids, (list)):
raise ModelEditValidError(u'trade_shop_item_master_idsのJsonが壊れています.id=%d' % master.id)
for trade_shop_item_master_id in master.trade_shop_item_master_ids:
model = model_mgr.get_model(TradeShopItemMaster, trade_shop_item_master_id)
if model is None:
raise ModelEditValidError(u'trade_shop_item_master_idsで指定されているidがTradeShopItemMasterに存在しません.id=%d' % master.id)
def main(request):
return Handler.run(request)
|
[
"shangye@mail.com"
] |
shangye@mail.com
|
47b2fcaa1e74c97b42be077420a4335f38b24f8d
|
a7ff1ba9437204454c6b8639e99b007393c64118
|
/synapse/tools/aha/enroll.py
|
a643a485268842bbc531afab92dd9b5e8bf84112
|
[
"Apache-2.0"
] |
permissive
|
vishalbelsare/synapse
|
67013933db31ac71a4074b08a46b129774f63e47
|
a418b1354b2f94e32644ede612c271a6c362ccae
|
refs/heads/master
| 2023-09-01T10:45:34.439767
| 2022-05-13T21:07:20
| 2022-05-13T21:07:20
| 164,022,574
| 0
| 0
|
Apache-2.0
| 2022-05-15T07:45:07
| 2019-01-03T21:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
import os
import sys
import asyncio
import argparse
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.output as s_output
import synapse.lib.certdir as s_certdir
descr = '''
Use a one-time use key to initialize your AHA user enrivonment.
Examples:
python -m synapse.tools.aha.register tcp://aha.loop.vertex.link:27272/b751e6c3e6fc2dad7a28d67e315e1874
'''
async def main(argv, outp=s_output.stdout):
pars = argparse.ArgumentParser(prog='provision', description=descr)
pars.add_argument('onceurl', help='The one-time use AHA user enrollment URL.')
opts = pars.parse_args(argv)
async with s_telepath.withTeleEnv():
certpath = s_common.getSynDir('certs')
yamlpath = s_common.getSynPath('telepath.yaml')
teleyaml = s_common.yamlload(yamlpath)
if teleyaml is None:
teleyaml = {}
teleyaml.setdefault('version', 1)
teleyaml.setdefault('aha:servers', ())
s_common.gendir(certpath)
certdir = s_certdir.CertDir(path=certpath)
async with await s_telepath.openurl(opts.onceurl) as prov:
userinfo = await prov.getUserInfo()
ahaurls = userinfo.get('aha:urls')
ahauser = userinfo.get('aha:user')
ahanetw = userinfo.get('aha:network')
username = f'{ahauser}@{ahanetw}'
capath = certdir.getCaCertPath(ahanetw)
if capath is not None:
os.path.unlink(capath)
byts = await prov.getCaCert()
capath = certdir.saveCaCertByts(byts)
outp.printf(f'Saved CA certificate: {capath}')
keypath = certdir.getUserKeyPath(username)
if keypath is not None:
os.path.unlink(keypath)
crtpath = certdir.getUserCertPath(username)
if crtpath is not None:
os.path.unlink(keypath)
xcsr = certdir.genUserCsr(username)
byts = await prov.signUserCsr(xcsr)
crtpath = certdir.saveUserCertByts(byts)
outp.printf(f'Saved user certificate: {crtpath}')
ahaurls = s_telepath.modurl(ahaurls, user=ahauser)
if ahaurls not in teleyaml.get('aha:servers'):
outp.printf('Updating known AHA servers')
servers = list(teleyaml.get('aha:servers'))
servers.append(ahaurls)
teleyaml['aha:servers'] = servers
s_common.yamlsave(teleyaml, yamlpath)
if __name__ == '__main__': # pragma: no cover
sys.exit(asyncio.run(main(sys.argv[1:])))
|
[
"noreply@github.com"
] |
vishalbelsare.noreply@github.com
|
ace388a41b74682d643ef7c6c7176d8cf1f6b831
|
3a5d8cdc7ac14c389fd9426f3f39c3b1dc906dda
|
/nautobot/extras/tests/test_jobs.py
|
e04668889b1dffc9a3853d2e190027a5f793514f
|
[
"Apache-2.0"
] |
permissive
|
nammie-punshine/nautobot
|
f3cdb9d269c37a74706c105d237b883650f10465
|
d6227b211ad89f25233a8791937cd75092421c8a
|
refs/heads/main
| 2023-03-08T10:51:29.437859
| 2021-02-24T20:44:32
| 2021-02-24T20:44:32
| 342,080,836
| 0
| 0
|
Apache-2.0
| 2021-02-25T01:01:36
| 2021-02-25T01:01:36
| null |
UTF-8
|
Python
| false
| false
| 1,970
|
py
|
import os
import uuid
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from nautobot.extras.choices import JobResultStatusChoices
from nautobot.extras.jobs import get_job, run_job
from nautobot.extras.models import JobResult
from nautobot.utilities.testing import TestCase
class JobTest(TestCase):
"""
Test basic jobs to ensure importing works.
"""
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_pass"
name = "TestPass"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result=job_result)
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_fail"
name = "TestFail"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result=job_result)
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
|
[
"lampwins@gmail.com"
] |
lampwins@gmail.com
|
0829499a37fc13ac636386433fe887068436789a
|
b8ab0e1ac2634741a05e5fef583585b597a6cdcf
|
/wsltools/utils/faker/providers/date_time/fil_PH/__init__.py
|
42a736439193745ecd672678cc198a9d48ef49e4
|
[
"MIT"
] |
permissive
|
Symbo1/wsltools
|
be99716eac93bfc270a5ef0e47769290827fc0c4
|
0b6e536fc85c707a1c81f0296c4e91ca835396a1
|
refs/heads/master
| 2022-11-06T16:07:50.645753
| 2020-06-30T13:08:00
| 2020-06-30T13:08:00
| 256,140,035
| 425
| 34
|
MIT
| 2020-04-16T14:10:45
| 2020-04-16T07:22:21
|
Python
|
UTF-8
|
Python
| false
| false
| 829
|
py
|
from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
"""Provider for datetimes for fil_PH locale"""
DAY_NAMES = {
'0': 'Linggo',
'1': 'Lunes',
'2': 'Martes',
'3': 'Miyerkules',
'4': 'Huwebes',
'5': 'Biyernes',
'6': 'Sabado',
}
MONTH_NAMES = {
'01': 'Enero',
'02': 'Pebrero',
'03': 'Marso',
'04': 'Abril',
'05': 'Mayo',
'06': 'Hunyo',
'07': 'Hulyo',
'08': 'Agosto',
'09': 'Setyembre',
'10': 'Oktubre',
'11': 'Nobyembre',
'12': 'Disyembre',
}
def day_of_week(self):
day = self.date('%w')
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
|
[
"tr3jer@gmail.com"
] |
tr3jer@gmail.com
|
b3cffcaaac0bef8d65f8fdbae1aa31e4b48f15ed
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/FiltersAndTransformers/Scripts/JoinIfSingleElementOnly/JoinIfSingleElementOnly.py
|
c91e49454d83bdef53b8f6eeabbd9dcc16b073fc
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 466
|
py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def return_first_element_if_single(value):
res = value
if isinstance(value, list):
if len(value) == 1:
res = value[0]
return res
def main(): # pragma: no cover
value = demisto.args()["value"]
res = return_first_element_if_single(value)
demisto.results(res)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
[
"noreply@github.com"
] |
demisto.noreply@github.com
|
40d836471602038f8e490438807b48014491d9e2
|
df97d5b25d40b54e0714ed9c0a6dd7a579011e2e
|
/mikadocms/flikr_grabber.py
|
966050a532ec3be0269d2f1bc60375d21d2ae39b
|
[] |
no_license
|
mikadosoftware/mikadoCMS
|
90ac1910b06f32bc3e808d1df656ba38a30e781c
|
7bb1ca4f66b74d4529a601540e1bf469f44d3b01
|
refs/heads/master
| 2021-01-17T00:20:34.489198
| 2018-06-13T15:27:53
| 2018-06-13T15:27:53
| 8,103,422
| 0
| 0
| null | 2013-05-03T23:07:59
| 2013-02-08T23:27:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,740
|
py
|
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
### Copyright Paul Brian 2013
# This program is licensed, without under the terms of the
# GNU General Public License version 2 (or later). Please see
# LICENSE.txt for details
###
"""
:author: paul@mikadosoftware.com <Paul Brian>
Flikr.com provides a useful outlet for using photographs on
a website with minimal cost, and importantly, fuss.
1. visit http://www.flickr.com/search/advanced/
Search for a photo (by tag / text) but click "creative commons"
and "commercial" use.
2. Find the right photo URL
3. run ``python flickr_grabber.py <URL>``
4. I will grab the page and make a best guess as to the original photo
URL
5.
"""
import requests
from bs4 import BeautifulSoup
import sys
from bookmaker import lib
import conf
from optparse import OptionParser
import logging
import webbrowser
import urllib
import os
class myError(Exception):
pass
#########
PHOTO_STORE = "./photos"
testurl = "http://www.flickr.com/photos/comedynose/4230176889/"
def extract_photo_url(url):
r = requests.get(url)
soup = BeautifulSoup(r.text)
likelicandidate = soup.find(property='og:image')
resultstr = """
From page %s
We have likely candidate of
%s
or these:
"""
resultstr = resultstr % (url, str(likelicandidate))
for imgtag in soup.find_all("img"):
resultstr += str(imgtag)
return (likelicandidate, resultstr)
def get_photo(url):
"""
"""
tgt = os.path.join(PHOTO_STORE, os.path.basename(url))
urllib.urlretrieve(url, tgt)
#########
def parse_args():
parser = OptionParser()
parser.add_option("--config", dest="confpath",
help="path to ini file")
parser.add_option("--flikrpage", dest="flikrpage",
help="url to embedded photo")
parser.add_option("--flikrphoto", dest="flikrphoto",
help="url to stadnalone photo (mutually xlusive with glikrpage")
(options, args) = parser.parse_args()
return (options, args)
def main(opts, args):
"""
"""
if opts.confpath:
confd = conf.get_config(opts.confpath)
lgr.debug(pprint.pformat(confd))
else:
confd = {}
if opts.flikrpage:
likelicandidate, resultstr = extract_photo_url(opts.flikrpage)
print likelicandidate
print resultstr
if opts.flikrphoto:
get_photo(opts.flikrphoto)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
opts, args = parse_args()
try:
main(opts, args)
except Exception, e:
print "We can trap a lot up here"
raise e
|
[
"paul@mikadosoftware.com"
] |
paul@mikadosoftware.com
|
5dfb79becde51feb01c67400ff548446d6963775
|
0cb38adedbe3a5192076de420e1aa0fd10ae3311
|
/return_merchandise_authorizations/admin.py
|
213dea63a59221b56ba699e6a457f59ff5076d67
|
[] |
no_license
|
fogcitymarathoner/rma
|
73ada816b98f068b6c00b2e1fcf39461259453fa
|
133d6026f99820d0702f0578b8a3b4574671f888
|
refs/heads/master
| 2021-01-11T00:32:47.797673
| 2016-10-10T18:34:54
| 2016-10-10T18:35:11
| 70,516,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
from django.contrib import admin
from return_merchandise_authorizations.models import Rma
from return_merchandise_authorizations.models import Item
from return_merchandise_authorizations.models import RmaAttachment
class ItemInline(admin.TabularInline):
model = Item
class AttachInline(admin.TabularInline):
model = RmaAttachment
class RmaAdmin(admin.ModelAdmin):
list_display = ('date', 'customer', 'case_number', 'reference_number', 'address')
search_fields = ('case_number', 'reference_number', 'address', 'issue')
inlines = [
ItemInline,
AttachInline
]
#
admin.site.register(Rma, RmaAdmin)
class ItemAdmin(admin.ModelAdmin):
list_display = ('note', 'quantity')
#
admin.site.register(Item, ItemAdmin)
|
[
"marc@fogtest.com"
] |
marc@fogtest.com
|
2e2f74124954a3985bfb08d9d40e0bc56bc5fff2
|
6e373b40393fb56be4437c37b9bfd218841333a8
|
/Level_6/Lecture_9/enroll/forms.py
|
a24e95e08208751aa12e95e489b7e6bdfa3638eb
|
[] |
no_license
|
mahto4you/Django-Framework
|
6e56ac21fc76b6d0352f004a5969f9d4331defe4
|
ee38453d9eceea93e2c5f3cb6895eb0dce24dc2b
|
refs/heads/master
| 2023-01-22T01:39:21.734613
| 2020-12-04T03:01:17
| 2020-12-04T03:01:17
| 318,383,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from django.contrib.auth.models import User
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
class SignUpForm(UserCreationForm):
password2 = forms.CharField(label='Confirm Password (again)', widget=forms.PasswordInput)
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
labels ={'email':'Email'}
class EditUserProfileForm(UserChangeForm):
password = None
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'date_joined', 'last_login', 'is_active']
labels = {'email':'Email'}
|
[
"mahto4you@gmail.com"
] |
mahto4you@gmail.com
|
bb69649a492b5bb2e5ee249630dca2d8b04e8c78
|
8f1996c1b5a0211474c7fa287be7dc20a517f5f0
|
/batch/batch/cloud/driver.py
|
96349e4c4d578c9209d5ffabef4590256096a62d
|
[
"MIT"
] |
permissive
|
johnc1231/hail
|
9568d6effe05e68dcc7bf398cb32df11bec061be
|
3dcaa0e31c297e8452ebfcbeda5db859cd3f6dc7
|
refs/heads/main
| 2022-04-27T10:51:09.554544
| 2022-02-08T20:05:49
| 2022-02-08T20:05:49
| 78,463,138
| 0
| 0
|
MIT
| 2022-03-01T15:55:25
| 2017-01-09T19:52:45
|
Python
|
UTF-8
|
Python
| false
| false
| 936
|
py
|
from hailtop import aiotools
from gear import Database
from gear.cloud_config import get_global_config
from ..inst_coll_config import InstanceCollectionConfigs
from ..driver.driver import CloudDriver
from .azure.driver.driver import AzureDriver
from .gcp.driver.driver import GCPDriver
async def get_cloud_driver(
app,
db: Database,
machine_name_prefix: str,
namespace: str,
inst_coll_configs: InstanceCollectionConfigs,
credentials_file: str,
task_manager: aiotools.BackgroundTaskManager,
) -> CloudDriver:
cloud = get_global_config()['cloud']
if cloud == 'azure':
return await AzureDriver.create(
app, db, machine_name_prefix, namespace, inst_coll_configs, credentials_file, task_manager
)
assert cloud == 'gcp', cloud
return await GCPDriver.create(
app, db, machine_name_prefix, namespace, inst_coll_configs, credentials_file, task_manager
)
|
[
"noreply@github.com"
] |
johnc1231.noreply@github.com
|
6249e0ffb60185954c5323d646f6ee5e4b97a4cc
|
2be8a9f06d4003d12c0a727fb83d284c31a53050
|
/HoudiniHotBox17.0/lib/PastFbx.py
|
a984bb3fb35778efa1d77ea747bb869b4f43016f
|
[] |
no_license
|
LiuLiangFx/SmileHotBOX
|
7551d9578b2defe612950cb8e3bffdb85024cede
|
8bd8eac69b3c2a9824b9aa4488ca77789bea8d85
|
refs/heads/master
| 2021-01-01T10:22:26.959731
| 2020-02-09T03:16:32
| 2020-02-09T03:16:32
| 239,236,801
| 0
| 0
| null | 2020-02-09T02:47:18
| 2020-02-09T02:47:18
| null |
UTF-8
|
Python
| false
| false
| 3,133
|
py
|
import hou
class PastFbx:
def __init__(self):
pass
def checkNode(self,node, name,temp1 =0):
for childrenNode in node.parent().children():
if childrenNode.name() == name:
temp1 =childrenNode
return temp1
def checkInput(self,qian,hou1,temp=0):
if hou1.inputs() ==():
pass
else:
for node in hou1.inputs():
if node == qian:
temp =hou1
else:
temp =0
return temp
def creatNode(self,node,temp ):
for mergeName in temp:
serachNode = self.checkNode(node, mergeName)
if serachNode :
houNode = self.checkInput(node, serachNode )
if houNode ==0:
serachNode.setInput(100,node)
node = serachNode
else:
node = houNode
else:
merge = node.createOutputNode("merge",mergeName)
node = merge
def run(self):
plane = hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor)
pos = plane.selectPosition()
pos1 = pos
node = plane.currentNode()
fl1=open('list.txt', 'r')
a= len( fl1.readlines())
check = 0
fl1.close()
for index in range(a):
pos[0] +=1
try:
null = node.createNode("object_merge")
except:
b = node.parent()
null =b.createNode("object_merge")
null.setPosition(pos)
fl1=open('list.txt', 'r')
path = fl1.readlines()[index][0:-1]
allPath= path.split("++")
null.parm("objpath1").set(allPath[0])
null.parm("xformtype").set("local")
attNode = null.createOutputNode("attribcreate")
attNode.parm("name1").set("shop_materialpath")
attNode.parm("type1").set("index")
attNode.parm("string1").set("/shop/"+ allPath[-1])
attNode.parm("class1").set("primitive")
catchNode = attNode.createOutputNode("catche_tool_1.0.1")
catchNode.bypass(1)
currentNode =catchNode
self.creatNode(currentNode,allPath[1:-1] )
comping =int((index*1.0/(a-1))*100 )
fl1.close()
print "CreatNode for " + null.name()+","+" Comping: " + str(comping)+"%"
print "\nCopy node success!!!!"
|
[
"change52092@yahoo.com"
] |
change52092@yahoo.com
|
0f6b34fbcc11d1d36e1186122b4196348d01de41
|
15d3a10db27128c06f84c30fa8d64b2e1c629fd9
|
/express/express/api_exception.py
|
50d8121033b83ac36e6070744f39d492bda13465
|
[] |
no_license
|
yiyuhao/exp
|
7cba6650e3113ba05698f90a7baf75b680dd6435
|
866a90b2e6f0d113559b0674f514cdd56020f7d6
|
refs/heads/master
| 2020-03-19T20:20:04.799355
| 2018-07-15T14:55:24
| 2018-07-15T14:55:24
| 136,897,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# -*- coding: utf-8 -*
from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
# Now add the HTTP status code to the response.
if response is not None:
response.data['status_code'] = response.status_code
return response
|
[
"yiyuhao@mixadx.com"
] |
yiyuhao@mixadx.com
|
3b2ebe81d2835ea42691bb7d5bff97c782a8bc00
|
59ac1d0f09ebfb527701031f3ab2cfbfb8055f51
|
/soapsales/employees/migrations/0003_auto_20200902_1721.py
|
0819efd6aafd9e48e4f311586f2596836d84ff10
|
[] |
no_license
|
DUMBALINYOLO/erpmanu
|
d4eb61b66cfa3704bd514b58580bdfec5639e3b0
|
db979bafcc7481f60af467d1f48d0a81bbbfc1aa
|
refs/heads/master
| 2023-04-28T13:07:45.593051
| 2021-05-12T09:30:23
| 2021-05-12T09:30:23
| 288,446,097
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
# Generated by Django 3.0.7 on 2020-09-02 15:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employees', '0002_auto_20200902_0038'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='is_staff',
new_name='is_admin',
),
migrations.AlterField(
model_name='employee',
name='is_superuser',
field=models.BooleanField(default=False),
),
]
|
[
"baridzimaximillem@gmail.com"
] |
baridzimaximillem@gmail.com
|
27ca4ceae6de9d605e2bfc5c1fee240d3f1fe145
|
10300363f12e5a6a0ea6a69d0a6d210174499d60
|
/times.py
|
746272f2b9f1a029c66f51b8e55c0ba5edea3241
|
[] |
no_license
|
nedbat/point_match
|
2da5cc12bf3f3866b35ec71ea227a5d21760ca97
|
a6c19ed1d206ec1ad02b13e15b8d761192b32593
|
refs/heads/master
| 2023-06-22T04:16:09.638622
| 2019-04-01T21:25:09
| 2019-04-01T21:25:09
| 100,109,656
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
import random
import timeit
if 0:
TRIES = 10000
for z in range(7):
n = int(10**z)
stmt='random.randint(1, 999999) in d'
setup='import random; d = {{random.randint(1, 999999): 1 for _ in xrange({N:d})}}'.format(N=n)
total = timeit.timeit(stmt=stmt, setup=setup, number=TRIES)
print("{N:>9d}: {time:.7f}s".format(time=total/TRIES, N=n))
if 0:
TRIES = 2000
for z in range(7):
n = int(10**z)
stmt='random.randint(1, 999999) in x'
setup='import random; x = [random.randint(1, 999999) for _ in xrange({N:d})]'.format(N=n)
total = timeit.timeit(stmt=stmt, setup=setup, number=TRIES)
print("{N:>9d}: {time:.7f}s".format(time=total/TRIES, N=n))
if 1:
TRIES = 200
for z in range(7):
n = int(10**z)
stmt='sorted(x)'
setup='import random; x = [random.randint(1, 999999) for _ in xrange({N:d})]'.format(N=n)
total = timeit.timeit(stmt=stmt, setup=setup, number=TRIES)
print("{N:>9d}: {time:.7f}s".format(time=total/TRIES, N=n))
|
[
"ned@nedbatchelder.com"
] |
ned@nedbatchelder.com
|
adcb107a99607a4473a99cbe4a62c8ecc5918f4d
|
f71118a9f24e09bba18d021f9c4a43a97dc4dead
|
/codes/scripts/make_gif_video.py
|
fc81e5647ff7ce75b5bb35f226bce946a93a1d56
|
[
"Apache-2.0"
] |
permissive
|
BlueAmulet/BasicSR
|
d7420fd9d7b73bf0cd90a3201d84393f262e63be
|
7040913d8659a05af4c2428feb71c260efbf1e9c
|
refs/heads/lite
| 2021-07-10T14:48:26.037589
| 2020-07-23T01:59:27
| 2020-07-23T01:59:27
| 196,041,187
| 19
| 9
|
Apache-2.0
| 2020-09-01T17:39:00
| 2019-07-09T16:00:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,311
|
py
|
"""
Add text to images, then make gif/video sequence from images.
Since the created gif has low quality with color issues, use this script to generate image with
text and then use `gifski`.
Call `ffmpeg` to make video.
"""
import os.path
import numpy as np
import cv2
crt_path = os.path.dirname(os.path.realpath(__file__))
# configurations
img_name_list = ['x1', 'x2', 'x3', 'x4', 'x5']
ext = '.png'
text_list = ['1', '2', '3', '4', '5']
h_start, h_len = 0, 576
w_start, w_len = 10, 352
enlarge_ratio = 1
txt_pos = (10, 50) # w, h
font_size = 1.5
font_thickness = 4
color = 'red'
duration = 0.8 # second
use_imageio = False # use imageio to make gif
make_video = False # make video using ffmpeg
is_crop = True
if h_start == 0 or w_start == 0:
is_crop = False # do not crop
img_name_list = [x + ext for x in img_name_list]
input_folder = os.path.join(crt_path, './ori')
save_folder = os.path.join(crt_path, './ori')
color_tb = {}
color_tb['yellow'] = (0, 255, 255)
color_tb['green'] = (0, 255, 0)
color_tb['red'] = (0, 0, 255)
color_tb['magenta'] = (255, 0, 255)
color_tb['matlab_blue'] = (189, 114, 0)
color_tb['matlab_orange'] = (25, 83, 217)
color_tb['matlab_yellow'] = (32, 177, 237)
color_tb['matlab_purple'] = (142, 47, 126)
color_tb['matlab_green'] = (48, 172, 119)
color_tb['matlab_liblue'] = (238, 190, 77)
color_tb['matlab_brown'] = (47, 20, 162)
color = color_tb[color]
img_list = []
# make temp dir
if not os.path.exists(save_folder):
os.makedirs(save_folder)
print('mkdir [{}] ...'.format(save_folder))
if make_video:
# tmp folder to save images for video
tmp_video_folder = os.path.join(crt_path, '_tmp_video')
if not os.path.exists(tmp_video_folder):
os.makedirs(tmp_video_folder)
idx = 0
for img_name, write_txt in zip(img_name_list, text_list):
img = cv2.imread(os.path.join(input_folder, img_name), cv2.IMREAD_UNCHANGED)
base_name = os.path.splitext(img_name)[0]
print(base_name)
# crop image
if is_crop:
print('Crop image ...')
if img.ndim == 2:
img = img[h_start:h_start + h_len, w_start:w_start + w_len]
elif img.ndim == 3:
img = img[h_start:h_start + h_len, w_start:w_start + w_len, :]
else:
raise ValueError('Wrong image dim [{:d}]'.format(img.ndim))
# enlarge img if necessary
if enlarge_ratio > 1:
H, W, _ = img.shape
img = cv2.resize(img, (W * enlarge_ratio, H * enlarge_ratio), \
interpolation=cv2.INTER_CUBIC)
# add text
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(img, write_txt, txt_pos, font, font_size, color, font_thickness, cv2.LINE_AA)
cv2.imwrite(os.path.join(save_folder, base_name + '_text.png'), img)
if make_video:
idx += 1
cv2.imwrite(os.path.join(tmp_video_folder, '{:05d}.png'.format(idx)), img)
img = np.ascontiguousarray(img[:, :, [2, 1, 0]])
img_list.append(img)
if use_imageio:
import imageio
imageio.mimsave(os.path.join(save_folder, 'out.gif'), img_list, format='GIF', duration=duration)
if make_video:
os.system('ffmpeg -r {:f} -i {:s}/%05d.png -vcodec mpeg4 -y {:s}/movie.mp4'.format(
1 / duration, tmp_video_folder, save_folder))
if os.path.exists(tmp_video_folder):
os.system('rm -rf {}'.format(tmp_video_folder))
|
[
"wxt1994@126.com"
] |
wxt1994@126.com
|
6747e33efcd4f93c3dbf79fe12368de440154955
|
b45e649b4580692dd1b8bf63ad29befb3daad95a
|
/spark/src/main/python/preprocBinning.py
|
6c21866ee6f9e294698dfe7cff5be5841bf1c7fa
|
[] |
no_license
|
xu-hao/FHIR-PIT
|
21ea0e5b8796d86f3a931b99e3e7a3f1e58b04a2
|
db2fb04e2cc0d9fce2f8043f594f60fdb8f5a8e8
|
refs/heads/master
| 2021-05-25T09:49:48.084629
| 2021-05-19T20:17:11
| 2021-05-19T20:17:11
| 127,015,534
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
import os
import sys
import json
from preprocPatient import *
from preprocVisit import *
year_start, year_end, config_file, input_dir, output_dir = sys.argv[1:]
for year in range(int(year_start), int(year_end) + 1):
print(year)
input_file_p = f"{input_dir}/{year}/all_patient"
output_file_p = f"{output_dir}/{year}patient"
preproc_patient(config_file, input_file_p, output_file_p)
input_file_v = f"{input_dir}/{year}/all_visit"
output_file_v = f"{output_dir}/{year}visit"
preproc_visit(config_file, input_file_v, output_file_v)
|
[
"xuh@cs.unc.edu"
] |
xuh@cs.unc.edu
|
f93a39f3c7ce5dc35b811f46c70586ec4a00c270
|
4d93acd63ce2835fcd7ea610fcd412b727a4f03e
|
/08-Markov/decay.py
|
aa454eea1ad7fb4d3765d62e0e5f8e83dfc8525a
|
[] |
no_license
|
jsbarbosa/JuanBarbosa_MCA
|
41ebcc27bb7dd8a886c9b4c1b416bd7e3cad2e57
|
4f49d17282679ae1fa81d7cc892b6560edf93828
|
refs/heads/master
| 2021-01-11T17:53:44.115810
| 2017-04-24T17:58:09
| 2017-04-24T17:58:09
| 79,863,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,590
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 18:21:00 2017
@author: juan
"""
import numpy as np
import matplotlib.pyplot as plt
obs = np.array([1.5, 1.7, 2])
def rand():
return 2*np.random.random() - 1
def integral(a, b, lm):
return -lm*(func(b, lm) - func(a, lm))
def func(x, lm):
return np.exp(-x/lm)
def probability(x, lm):
p = 1
z = integral(1, 20, lm)
for x_ in x:
p *= func(x_, lm)/z
return p
def bayesian(x, lm):
return probability(x, lm)
def hastings(N, dx = 1):
lambdas = np.ones(N+1)
lambdas[0] = np.random.random()*10.0
for i in range(N):
second = lambdas[i] + dx*rand()
q = bayesian(obs, second)/bayesian(obs, lambdas[i])
alpha = min(q, 1.0)
u = np.random.random()
if u <= alpha and second > 0:
lambdas[i+1] = second
else:
lambdas[i+1] = lambdas[i]
return lambdas
def rubin(N, M, dl):
avs = np.zeros(M)
vas = np.zeros(M)
R = np.zeros(N-2)
chains = np.array([hastings(N, dl) for i in range(M)])
for j in range(2, N):
for i in range(M):
avs[i] = np.mean(chains[i, :j])
vas[i] = np.std(chains[i, :j])**2
total = np.mean(avs)
B = j/(M-1)*np.sum((avs-total)**2)
W = vas.mean()
R[j-2] = (j-1)/j + (B/W)*(M+1)/(j*M)
return R
N = 10000
lm = np.logspace(-3, 3, 5)
for l in lm:
R = rubin(N, 5, l)
plt.plot(R, label="%f"%l)
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.show()
|
[
"js.barbosa10@uniandes.edu.co"
] |
js.barbosa10@uniandes.edu.co
|
d39c8a61833fc2f4123d6803bf8dce614ed0a12a
|
cfc9a8831e5946d738329fad2763d643dec8566f
|
/src/encoded/tests/test_create_mapping.py
|
44d89dea3b1ec2d190ef281061e331a2302547be
|
[
"MIT"
] |
permissive
|
emi80/encoded
|
8e244a66b0d36610dcf8d9a47d385640dfa7987d
|
2fe2c2afbd3be21b65b10a189a3bd623ecdaee37
|
refs/heads/master
| 2021-01-18T12:34:56.060690
| 2015-03-05T21:56:05
| 2015-03-05T21:56:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
import pytest
from ..loadxl import ORDER
@pytest.mark.parametrize('item_type', ORDER)
def test_create_mapping(registry, item_type):
from ..commands.create_mapping import type_mapping
from ..contentbase import TYPES
mapping = type_mapping(registry[TYPES], item_type)
assert mapping
|
[
"laurence@lrowe.co.uk"
] |
laurence@lrowe.co.uk
|
8fc10d35f9fa5cced3f4939ab0d2ca50d42ab5cb
|
b5dbf732d26a2a924c85c5a107035be48bfe69cd
|
/2.7.py
|
a41cca6bfe45aaf10f7b7a81df3ea5680c11f318
|
[] |
no_license
|
Beks667/2.7Hw
|
2435bfa58e252357c46819f6987639ca025549be
|
4e03706bdfc70f2f94145a50f493f36995d08cdb
|
refs/heads/main
| 2023-04-19T13:10:24.348768
| 2021-05-07T12:44:27
| 2021-05-07T12:44:27
| 365,230,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
# class Phone :
# def __init__ (self,brand,model,color):
# self.brand = brand
# self.model = model
# self.color = color
# def show (self):
# print(f"{self.brand},{self.model},{self.color}")
# phone = Phone("Apple", "XS", "black")
# phone.show()
# class Monkey:
# max_age = 12
# loves_bananas = True
# def climb(self):
# print('I am climbing the tree')
# abc = Monkey()
# abc.climb()
# print(abc.max_age)
# abc.climb()
# print(abc.loves_bananas)
# Это через input----------------------------------------------------------------
# class Person:
# def __init__(self,name,age,gender):
# self.name = name
# self.age = age
# self.gender = gender
# def calculate_age(self):
# self.number = int(input('enter year:'))
# print(self.age + self.number)
# p = Person('John', 23, 'male')
# p.calculate_age()
# #Это через self-----------------------------------------------------------------------
# class Person:
# def __init__(self,name,age,gender):
# self.name = name
# self.age = age
# self.gender = gender
# def calculate_age(self,year):
# self.year = year
# print(self.age + self.year)
# p = Person('John', 23, 'male')
# p.calculate_age(10)
#
|
[
"you@example.com"
] |
you@example.com
|
e1bccde57c18d31ab7ae91528e51e89563c8c9b2
|
3e7b2ebb64e9e324ce47d19def21ae62cc1e56a6
|
/Problem-solving/HackerRank/p14- sWAP cASE.py
|
5f4f5a0512103085cb85a010c0c4672a7a9a5c87
|
[] |
no_license
|
shuvo14051/python-data-algo
|
9b6622d9260e95ca9ffabd39b02996f13bdf20d1
|
8f66ff6f2bd88a0ae48dac72e4ea6c5382a836ec
|
refs/heads/master
| 2023-02-03T03:04:01.183093
| 2020-12-13T10:13:15
| 2020-12-13T10:13:15
| 274,106,480
| 0
| 0
| null | 2020-07-05T06:33:28
| 2020-06-22T10:24:05
|
Python
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
# n = input()
#
# swaped_n = n.swapcase()
#
# print(swaped_n)
def swap_case(s):
return s.swapcase()
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
|
[
"shuvo1137017@gmail.com"
] |
shuvo1137017@gmail.com
|
25dd87758892b414426ec0e9c48e05fb4ac4a527
|
a4a44ad46cd1306e2da72ff89483b0102fc9787d
|
/SamplePython/Developer Tool/fab_fabric/pengenalan_dr_web/11_settings.py
|
8a018fc9d73ccd0692dcf6cf3d3a40dad2777d35
|
[] |
no_license
|
okipriyadi/NewSamplePython
|
640eb3754de98e6276f0aa1dcf849ecea22d26b1
|
e12aeb37e88ffbd16881a20a3c37cd835b7387d0
|
refs/heads/master
| 2020-05-22T01:15:17.427350
| 2017-02-21T04:47:08
| 2017-02-21T04:47:08
| 30,009,299
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
"""
settings (fabric.context_managers.settings)
When you need to temporarily (i.e. for a certain command chain),
you can use the settings statement (i.e. override env values).
Usage examples:
"""
from fabric.api import settings, sudo
# Perform actions using a different *user*
with settings(user="avionics"):
sudo("cmd")
|
[
"oki.priyadi@pacificavionics.net"
] |
oki.priyadi@pacificavionics.net
|
4e8a125a7458dd004507e648e9417922ad85affe
|
14d7f5f83b6f84871ff6ebfa0af4c17b7115a33f
|
/eco_models/mpb/integration_stop.py
|
f391a20c2a14bae90e14d4ebe8bd5777a3fa7d32
|
[] |
no_license
|
tonychangmsu/Python_Scripts
|
8ca7bc841c94dcab36743bce190357ac2b1698a5
|
036f498b1fc68953d90aac15f0a5ea2f2f72423b
|
refs/heads/master
| 2016-09-11T14:32:17.133399
| 2016-03-28T16:34:40
| 2016-03-28T16:34:40
| 10,370,475
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
# Title: integration_stop.py
# Author: Tony Chang
# Date: 10.26.2015
# Abstract: Attempt to find a solution to determining where the cumulative sum (numerical integration), of a array of
2D matricies sum up to one (find the index)
import numpy as np
#first suppose we have a 3D matrix of values under 1
G = np.random.uniform(0,.05, (365,500,400))
#now develop a cumulative sum for each step
integral_G = np.cumsum(G, axis =0)
#now find out the index of the first axis where the value is equal to one.
index = np.argmax(integral_G>1, axis = 0)
#if any of these equals to 0 then we have a development that didn't complete, and we have a problem
#need more time to finish (i.e. more years to inspect).
#done!
|
[
"tony.chang@msu.montana.edu"
] |
tony.chang@msu.montana.edu
|
9ae067e5cd4eccc2e3a324cc2e07669caccf8637
|
6630694f401f6f475dd81bb01ff9368db844ccff
|
/configs/_base_/models/hrnet/hrnet-w48.py
|
f0604958481ba2af277e3a0f9515dc1423def6c6
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpretrain
|
98a4d6b3bb747efc3d50decebf84fc3ffa41076a
|
d2ccc44a2c8e5d49bb26187aff42f2abc90aee28
|
refs/heads/main
| 2023-08-30T19:11:24.771498
| 2023-08-23T02:45:18
| 2023-08-23T02:45:18
| 278,415,292
| 652
| 186
|
Apache-2.0
| 2023-09-08T08:01:40
| 2020-07-09T16:25:04
|
Python
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w48'),
neck=[
dict(type='HRFuseScales', in_channels=(48, 96, 192, 384)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
170f4291b543e014fadf954a0e8b37173c22f52f
|
965e1e205bf053d93b32be0dab4d45455b42b3a2
|
/NLP/PartsSpeech.py
|
29aa35ab37a1d1ca416e2d528400a686da8f4ba4
|
[] |
no_license
|
panditdandgule/DataScience
|
9e58867dd960ec554e0bbb8e4ce93baa226ab927
|
3eb59c129d81a6ba6b45e24113e25e63d19c60cb
|
refs/heads/master
| 2021-07-22T21:44:12.700518
| 2020-05-14T12:01:05
| 2020-05-14T12:01:05
| 166,497,260
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 15 19:59:50 2018
@author: pandit
"""
import nltk
from nltk.corpus import state_union
from nltk.tokenize import PunktSentenceTokenizer
train_text=state_union.raw("2005-GWBush.txt")
sample_text=state_union.raw("2005-GWBush.txt")
custom_sent_tokenizer=PunktSentenceTokenizer(train_text)
tokenized =custom_sent_tokenizer.tokenize(sample_text)
def process_content():
try:
for i in tokenized:
words=nltk.word_tokenize(i)
tagged=nltk.pos_tag(words)
print(tagged)
except Exception as e:
print(str(e))
process_content()
|
[
"panditdandgule777@gmail.com"
] |
panditdandgule777@gmail.com
|
f70d4e2e4894ba7b8637af7ba93f753c0b5faa18
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/115_testing/examples/Github/_Level_1/python_unittests-master/sample_functions.py
|
8d63bc99d2a30ac0321b97976440c0d8474e1244
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 204
|
py
|
def sum(a, b):
return a + b
def contains_numbers(alpha_num_str):
for char in alpha_num_str:
if char.isdigit():
return False
return False
def div(a, b):
return a / b
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
d4a278c814384d490f690a077bab77a109b60b57
|
0ad79e7104500b5988f07e9f19f17a540f07605a
|
/Python算法指南/动态规划/67_最小和子数组_灵活运用动态规划.py
|
3273d8c3606054f4d873463057975d507015c93a
|
[] |
no_license
|
tonyyo/PycharmProjects
|
f0ce458ed662e33e75ddffbfcf28b0d1ed638743
|
a28620923336c352103858e0ccfc4117d1c4ea01
|
refs/heads/master
| 2022-09-19T02:02:15.919201
| 2020-06-03T12:57:38
| 2020-06-03T12:57:38
| 263,204,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
class Solution:
def minSubArray(self, nums):
MIN = 65536
SUM = nums[0]
for i in range(1, len(nums)):
SUM = SUM + nums[i] if SUM < 0 else nums[i] # SUM > 0有害于最小和
MIN = min(MIN, SUM)
return MIN
if __name__ == '__main__':
temp = Solution()
List1 = [1, -1, -2, 1]
List2 = [3, -2, 2, 1]
print("输入:" + str(List1))
print(("输出:" + str(temp.minSubArray(List1))))
print("输入:" + str(List2))
print(("输出:" + str(temp.minSubArray(List2))))
|
[
"1325338208@qq.com"
] |
1325338208@qq.com
|
962ad189b3695ad55e5db43027b6e869b2817147
|
fb408595c1edee0be293302c6d7bfc0c77d37c46
|
/python/DP/DP_2096.py
|
a5753e0e8dda2057310f4dee0f056e7940fbb74d
|
[] |
no_license
|
as950118/Algorithm
|
39ad25519fd0e42b90ddf3797a61239862ad79b5
|
739a7d4b569057cdb6b6faa74254512b83d02bb1
|
refs/heads/master
| 2023-07-21T12:38:00.653579
| 2023-07-19T06:57:17
| 2023-07-19T06:57:17
| 125,176,176
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
n = int(input())
arr = [0]*(3)
dp_max = [0]*(3)
dp_min = [0]*(3)
arr = list(map(int, input().split()))
temp = arr[:]
dp_max = temp[:]
dp_min = temp[:]
for i in range(1, n):
arr = list(map(int, input().split()))
temp[0] = max(dp_max[0], dp_max[1]) + arr[0]
temp[1] = max(dp_max[0], dp_max[1], dp_max[2]) + arr[1]
temp[2] = max(dp_max[1], dp_max[2]) + arr[2]
dp_max = temp[:]
temp[0] = min(dp_min[0], dp_min[1]) + arr[0]
temp[1] = min(dp_min[0], dp_min[1], dp_min[2]) + arr[1]
temp[2] = min(dp_min[1], dp_min[2]) + arr[2]
dp_min = temp[:]
print(max(dp_max), min(dp_min))
|
[
"na_qa@icloud.com"
] |
na_qa@icloud.com
|
f443e27275903b151314c40311f6464aafca1b44
|
72784799e5436e8a96462bdbcb29baeb644dcc7f
|
/utilities/animate.py
|
2c562e41c8ec2e736db293f0f772a55ff0091345
|
[] |
no_license
|
simonsben/undergrad_thesis
|
31dd205cb734f7c876b5053040e5ab0bf8fbd5cb
|
8458d00ae6525602b944279c2c280149a5957cb1
|
refs/heads/master
| 2020-04-02T10:46:55.255322
| 2019-04-08T06:01:48
| 2019-04-08T06:01:48
| 154,354,775
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
from matplotlib.pylab import figure, show, savefig, title, axis, draw
from networkx import spring_layout, draw_networkx_edges, draw_networkx_nodes
from matplotlib.animation import FuncAnimation
def add_node(graph, i, plot_layout):
# draw_networkx_edges(graph, plot_layout, alpha=.3)
# draw_networkx_nodes(node, plot_layout, node_size=100, edgecolors='k', node_color='w')
i += 1
draw()
def animate_creation(network, blocking=True, save_plot=True):
_title = 'Free-Scale Network'
fig = figure(_title)
axis('off')
graph = network.network_plot
plot_layout = spring_layout(graph)
init_nodes = graph.nodes[:3]
init_edges = graph.edges[:2]
draw_networkx_nodes(graph, plot_layout, nodelist=init_nodes, node_size=100, edgecolors='k', node_color='w')
draw_networkx_edges(graph, plot_layout, edgelist=init_edges, alpha=.3)
draw()
show()
i = 3
animation = FuncAnimation(fig, add_node, fargs=(graph, i, plot_layout))
|
[
"simons.ben0@gmail.com"
] |
simons.ben0@gmail.com
|
52a608c85aa5b18e530c6cb0cae1d8d2f58b7ec4
|
14d8418ca5990217be67aee89fdaa310db03fbba
|
/test_stats_d_graph_display.py
|
fffe014750a15f323e8f39408530e03c6133cae4
|
[
"Apache-2.0"
] |
permissive
|
sachanta/lm-sdk-python
|
3a16457bd2d5b880a0d238a88a9d1d5b8d9675f0
|
e476d415c7279457f79b5d032a73d950af2fe96b
|
refs/heads/master
| 2023-08-03T08:39:42.842790
| 2021-09-13T07:20:56
| 2021-09-13T07:20:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. Note: For Python SDKs, the REQUEST parameters can contain camelCase or an underscore. However, the RESPONSE parameters will always contain an underscore. For example, the REQUEST parameter can be testLocation or test_location. The RESPONSE parameter will be test_location. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.stats_d_graph_display import StatsDGraphDisplay # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestStatsDGraphDisplay(unittest.TestCase):
"""StatsDGraphDisplay unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatsDGraphDisplay(self):
"""Test StatsDGraphDisplay"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.stats_d_graph_display.StatsDGraphDisplay() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"bamboo@build01.us-west-1.logicmonitor.net"
] |
bamboo@build01.us-west-1.logicmonitor.net
|
ed3cea97ae571dfe1f0a45dba14fc43b93212a84
|
fb21a8f1fc02f5cee6f0a759e336561726d3b184
|
/experiments/lstm-notcut/run.py
|
74e6c8e8a8f1be2abab441657d41651360c17bd5
|
[] |
no_license
|
yamaguchi-milkcocholate/GoogleBrain-VentilatorPressurePrediction
|
76632353ff25a0c9ad8db51ef1f4b728954537b5
|
1996bb81f5b6880a20b8e39c681fecef0bc8a201
|
refs/heads/main
| 2023-09-05T17:25:46.980274
| 2021-11-24T04:40:50
| 2021-11-24T04:40:50
| 410,795,933
| 0
| 0
| null | 2021-11-04T01:28:27
| 2021-09-27T08:06:55
|
Python
|
UTF-8
|
Python
| false
| false
| 6,082
|
py
|
from random import seed
import numpy as np
import pandas as pd
import json
import os
import sys
import gc
import shutil
from pprint import pprint
from pathlib import Path
from typing import *
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from sklearn.metrics import mean_absolute_error as mae
from sklearn.preprocessing import RobustScaler, PowerTransformer, MinMaxScaler
from sklearn.model_selection import KFold
import sys
print(str(Path(__file__).resolve().parent.parent.parent))
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
from src.utils import (
seed_every_thing,
fetch_data,
Config,
plot_metric,
reduce_tf_gpu_memory,
reduce_mem_usage,
fetch_custom_data,
CustomL1Loss
)
def build_model(config: Config, n_features) -> keras.models.Sequential:
model = keras.models.Sequential([keras.layers.Input(shape=(config.cut, n_features))])
for n_unit in config.n_units:
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(
n_unit,
return_sequences=True,
)
)
)
for n_unit in config.n_dense_units:
model.add(keras.layers.Dense(n_unit, activation="selu"))
model.add(keras.layers.Dense(1))
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=config.lr),
loss='mae')
return model
def main(config: Dict[str, Any]):
config = Config().update(config)
seed_every_thing(seed=config.seed)
reduce_tf_gpu_memory(gpu_id=config.gpu_id)
basedir = Path(__file__).resolve().parent
datadir = basedir / ".." / ".." / "data"
logdir = basedir / ".." / ".." / "logs" / config.dirname
cachedir = basedir / ".." / ".." / "cache"
os.makedirs(logdir, exist_ok=True)
config.to_json(logdir / "config.json")
_, test_df, submission_df = fetch_custom_data(datadir=datadir, n_splits=config.n_splits)
test_df["count"] = (np.arange(test_df.shape[0]) % 80).astype(int)
test_preds_idx = test_df["count"] < config.cut
test_df = test_df[test_preds_idx].reset_index(drop=True)
test_df["pressure"] = 0
train_df = reduce_mem_usage(pd.read_csv(cachedir / f"train-10fold-debug{config.debug}.csv"))
test_df = reduce_mem_usage(pd.read_csv(cachedir / f"test-10fold-debug{config.debug}.csv"))
kfolds = train_df.iloc[0::config.cut]['kfold'].values
features = list(train_df.drop(["kfold", "pressure"], axis=1).columns)
pprint(features)
print(len(features))
cont_features = [f for f in features if ("RC_" not in f) and ("R_" not in f) and ("C_" not in f) and ("u_out" not in f)]
pprint(cont_features)
RS = RobustScaler()
train_df[cont_features] = RS.fit_transform(train_df[cont_features])
test_df[cont_features] = RS.transform(test_df[cont_features])
train_data, test_data = train_df[features].values, test_df[features].values
train_data = train_data.reshape(-1, config.cut, train_data.shape[-1])
targets = train_df[["pressure"]].to_numpy().reshape(-1, config.cut)
test_data = test_data.reshape(-1, config.cut, test_data.shape[-1])
with tf.device(f"/GPU:{config.gpu_id}"):
valid_preds = np.empty_like(targets)
test_preds = []
for fold in range(config.n_splits):
train_idx, test_idx = (kfolds != fold), (kfolds == fold)
print("-" * 15, ">", f"Fold {fold+1}", "<", "-" * 15)
savedir = logdir / f"fold{fold}"
os.makedirs(savedir, exist_ok=True)
X_train, X_valid = train_data[train_idx], train_data[test_idx]
y_train, y_valid = targets[train_idx], targets[test_idx]
model = build_model(config=config, n_features=len(features))
# es = EarlyStopping(
# monitor="val_loss",
# patience=config.es_patience,
# verbose=1,
# mode="min",
# restore_best_weights=True,
# )
customL1 = CustomL1Loss(
X_valid=X_valid,
y_valid=y_valid,
u_outs=X_valid[:, :, features.index("u_out")],
filepath=savedir / "weights_custom_best.h5"
)
check_point = ModelCheckpoint(
filepath=savedir / "weights_best.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
save_weights_only=True,
)
schedular = ReduceLROnPlateau(
mode="min", **config.schedular
)
history = model.fit(
X_train,
y_train,
validation_data=(X_valid, y_valid),
epochs=config.epochs,
batch_size=config.batch_size,
callbacks=[check_point, schedular, customL1]
)
model.save_weights(savedir / "weights_final.h5")
model.load_weights(savedir / "weights_custom_best.h5")
pd.DataFrame(history.history).to_csv(savedir / "log.csv")
plot_metric(filepath=savedir / "log.csv", metric="loss")
valid_preds[test_idx, :] = model.predict(X_valid).squeeze()
test_preds.append(model.predict(test_data).squeeze().reshape(-1, 1).squeeze())
del model, X_train, X_valid, y_train, y_valid
keras.backend.clear_session()
gc.collect()
pd.DataFrame(valid_preds).to_csv(logdir / "valid_preds.csv")
if not config.debug:
submission_df.loc[test_preds_idx, "pressure"] = np.median(test_preds, axis=0)
submission_df.to_csv(logdir / "submission.csv", index=False)
shutil.copyfile(Path(__file__), logdir / "script.py")
if __name__ == "__main__":
cnf_file = sys.argv[1]
cfg_file_path = Path(__file__).resolve().parent / cnf_file
with open(cfg_file_path, "rb") as f:
config = json.load(f)
main(config=config)
|
[
"zuuuubo.tetsu@outlook.jp"
] |
zuuuubo.tetsu@outlook.jp
|
75b4c345054f9757d6e642ce84b0d8c16a1c82c6
|
eb00755d9d0f2630ffdb21e3ab6685b2fbcb0d9e
|
/tests/bench/bench_scripts/bench_sampleData.py
|
729fcf79af5383d0af68875e3179d971fe99aff2
|
[
"BSD-3-Clause"
] |
permissive
|
mlangill/biom-format
|
aca45518c71b807cf30b0f548ad726880802a2b5
|
4cebfbdba8b6b64ff0d503df33634e3d52de1de0
|
refs/heads/master
| 2021-01-16T21:59:51.218830
| 2013-12-04T16:41:50
| 2013-12-04T16:41:50
| 9,486,201
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
#!/usr/bin/env python
from sys import argv
from gzip import open as gzip_open
from biom.parse import parse_biom_table
from random import choice
if __name__ == '__main__':
table = parse_biom_table(gzip_open(argv[1]))
foo = table.sampleData(choice(table.SampleIds))
|
[
"mcdonadt@colorado.edu"
] |
mcdonadt@colorado.edu
|
73b01d6e83f15e3b8998e48fde1d8e9a8e9c8657
|
5b7a0d2c364e40581eeff6c592067c954b96aa5b
|
/test_circle_ellipse.py
|
d03fd6ea80484a28a8acc42dbf20a692f6fa80ae
|
[] |
no_license
|
skconan/dice_detection
|
a0f5afbfd1d5e38cf6f5d72872103280690e5ffc
|
da5b065398c0976b90833a10e6dfcde162ce1540
|
refs/heads/master
| 2020-03-18T16:42:32.272709
| 2018-07-05T04:26:47
| 2018-07-05T04:28:03
| 134,981,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,445
|
py
|
import cv2 as cv
from lib import *
import numpy as np
from dice_detection import *
if __name__=='__main__':
cap = cv.VideoCapture(CONST.VDO_PATH + 'dice_01.mp4')
while True:
ret, image = cap.read()
if image is None:
continue
# image = cv.resize(image,(0,0),fx=0.5,fy=0.5)
image = pre_processing(image)
mask_th = find_mask_threshold(image)
img = mask_th.copy()
img.fill(0)
_,cnts,hierachy = cv.findContours(mask_th,cv.RETR_CCOMP,cv.CHAIN_APPROX_NONE)
ct = 0
x_min = 100000
x_max = -1
y_min = 100000
y_max = -1
for (cnt,hh) in zip(cnts,hierachy[0]):
if len(cnt) < 5:
continue
(x,y),(w,h),angle = ellipse = cv.fitEllipse(cnt)
x,y,_,_ = cv.boundingRect(cnt)
area = cv.contourArea(cnt)
area_ellipse = math.pi * (w/2.0) * (h/2.0)
hull = cv.convexHull(cnt)
hull_area = cv.contourArea(hull)
solidity = float(area)/hull_area
print(ct,w,h,w/h, solidity, hh)
ct += 1
# print()
if not (list(hh[2:]) == [-1,-1]):
continue
if not (w >= 8 and h>=8):
continue
if not 0.35 <= float(w)/h < 1.2:
continue
if not solidity >= 0.925 or not area/area_ellipse >= 0.8:
continue
if area > 10000:
continue
box = cv.boxPoints(ellipse)
box = np.int0(box)
cv.ellipse(img,ellipse,(255),-1)
x,y,w,h = cv.boundingRect(cnt)
dice_size = max(h/2.0,w/2.0) * 9
# cv.rectangle(img,(int(x-(w*0.5)),int(y-(h*0.5))),(int(x+(w*4.5)),int(y+(h*4.5))),(155),1)
cv.rectangle(img,(int(x-(w*2)),int(y-(h*2))),(int(x+(w*2.75)),int(y+(h*2.75))),(155),1)
# cv.rectangle(img,(int(x+(w*0.5)),int(y+(h*0.5))),(int(x-(w*4.5)),int(y-(h*4.5))),(155),1)
cv.rectangle(img,(int(x),int(y)),(int(x+w),int(y+h)),(155),1)
# img = cv.drawContours(img,[box],0,(0,0,255),1)
# img = cv.drawContours(img,cnt,-1,(0,0,255),1)
cv.imshow('img',img)
cv.imshow('image',image)
k = cv.waitKey(-1) & 0xff
if k == ord('q'):
break
cap.release()
cv.destroyAllWindows()
|
[
"supakit.kr@gmail.com"
] |
supakit.kr@gmail.com
|
75a1c7bfd7129ce55f5eba80d259be9cc3f58c32
|
d4cd2476f8fa8a7d94e183a68bd0678971310c5b
|
/checkio/05_Alice_in_Wonderland/01_Alice_05_DigitDoublets.py
|
93be0ef309f0753e3758c5c296e1049c4e7b3414
|
[] |
no_license
|
gwqw/LessonsSolution
|
b495579f6d5b483c30d290bfa8ef0a2e29515985
|
0b841b1ae8867890fe06a5f0dcee63db9a3319a3
|
refs/heads/master
| 2020-07-05T19:15:53.758725
| 2019-10-01T11:34:44
| 2019-10-01T11:34:44
| 202,744,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,207
|
py
|
# check if nums differs only by one digit
def isOneDiff(n1, n2):
n1 = str(n1)
n2 = str(n2)
diffcount = 0
for i in range(len(n1)):
if n1[i] != n2[i]: diffcount += 1
if diffcount > 1: return False
return (diffcount == 1)
# find next nums in list
def findnext(numbers):
first_num = numbers[0]
next_nums = []
for n in numbers[1:]:
if isOneDiff(n, first_num):
next_nums.append(n)
return next_nums
# move next number to second position
def regroupList(numbers, snum):
i = numbers.index(snum)
reslst = numbers[:]
n = reslst[i]
reslst[i] = reslst[1]
reslst[1] = n
return reslst
# construct all trees
def constrTree(numbers):
#print("inp_nums= ", numbers)
res_tree = []
isFinal = len(numbers) == 2
finalNum = numbers[-1]
# find next and form tree
next_nums = findnext(numbers)
#print("next_nums= ", next_nums)
for n in next_nums:
if n == finalNum:
#print("find final")
res_tree.append([numbers[0], n])
break
elif not isFinal:
lst = regroupList(numbers, n)
tmptree = constrTree(lst[1:])
for t in tmptree:
t.insert(0, numbers[0])
res_tree.append(t)
return res_tree
# find the shortest tree
def findShortest(trees):
short_len = 100000
short_tree = []
for t in trees:
if len(t) < short_len:
short_len = len(t)
short_tree = t
return short_tree
def checkio(numbers):
print("input_tree= ", numbers)
res_trees = constrTree(numbers)
print("res_trees= ", res_trees)
short_tree = findShortest(res_trees)
print("short_tree= ", short_tree)
return short_tree
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio([123, 991, 323, 321, 329, 121, 921, 125, 999]) == [123, 121, 921, 991, 999], "First"
assert checkio([111, 222, 333, 444, 555, 666, 121, 727, 127, 777]) == [111, 121, 127, 727, 777], "Second"
assert checkio([456, 455, 454, 356, 656, 654]) == [456, 454, 654], "Third, [456, 656, 654] is correct too"
|
[
"="
] |
=
|
bf7d221c249a3241ed1caec79c3c80e33dfe5221
|
35fb414cc9f5c408dc5d2c8316a5b6e4de3ccf22
|
/test/templates/analyze_2l_2tau_cfg.py
|
569b94fbe3d5ab083963e3c54bb48fe7dbaef4c9
|
[] |
no_license
|
kartikmaurya/tth-htt
|
abf1abafc9335da9687938f8588550a86631f751
|
8486aa6f33085a7b2d665e9215b828970f6ee8a7
|
refs/heads/master
| 2020-05-05T02:09:31.876729
| 2019-04-05T06:54:50
| 2019-04-05T06:54:50
| 177,517,377
| 0
| 0
| null | 2019-03-25T05:01:21
| 2019-03-25T05:01:21
| null |
UTF-8
|
Python
| false
| false
| 4,412
|
py
|
import FWCore.ParameterSet.Config as cms
import os
from tthAnalysis.HiggsToTauTau.configs.recommendedMEtFilters_cfi import *
from tthAnalysis.HiggsToTauTau.configs.EvtYieldHistManager_cfi import *
process = cms.PSet()
process.fwliteInput = cms.PSet(
fileNames = cms.vstring(),
maxEvents = cms.int32(-1),
outputEvery = cms.uint32(100000)
)
process.fwliteOutput = cms.PSet(
fileName = cms.string('')
)
process.analyze_2l_2tau = cms.PSet(
treeName = cms.string('Events'),
process = cms.string(''),
histogramDir = cms.string(''),
era = cms.string(''),
triggers_1e = cms.vstring(),
use_triggers_1e = cms.bool(True),
triggers_2e = cms.vstring(),
use_triggers_2e = cms.bool(True),
triggers_1mu = cms.vstring(),
use_triggers_1mu = cms.bool(True),
triggers_2mu = cms.vstring(),
use_triggers_2mu = cms.bool(True),
triggers_1e1mu = cms.vstring(),
use_triggers_1e1mu = cms.bool(True),
apply_offline_e_trigger_cuts_1e = cms.bool(True),
apply_offline_e_trigger_cuts_2e = cms.bool(True),
apply_offline_e_trigger_cuts_1mu = cms.bool(True),
apply_offline_e_trigger_cuts_2mu = cms.bool(True),
apply_offline_e_trigger_cuts_1e1mu = cms.bool(True),
electronSelection = cms.string(''),
muonSelection = cms.string(''),
lep_mva_cut = cms.double(1.),
apply_leptonGenMatching = cms.bool(True),
leptonChargeSelection = cms.string(''),
hadTauChargeSelection = cms.string(''),
hadTauGenMatch = cms.string('all'),
hadTauSelection = cms.string(''),
apply_hadTauGenMatching = cms.bool(False),
chargeSumSelection = cms.string(''),
applyFakeRateWeights = cms.string(""),
leptonFakeRateWeight = cms.PSet(
inputFileName = cms.string(""),
histogramName_e = cms.string(""),
histogramName_mu = cms.string("")
),
hadTauFakeRateWeight = cms.PSet(
inputFileName = cms.string(""),
lead = cms.PSet(
absEtaBins = cms.vdouble(-1., 1.479, 9.9),
graphName = cms.string("jetToTauFakeRate/$hadTauSelection/$etaBin/jetToTauFakeRate_mc_hadTaus_pt"),
applyGraph = cms.bool(True),
fitFunctionName = cms.string("jetToTauFakeRate/$hadTauSelection/$etaBin/fitFunction_data_div_mc_hadTaus_pt"),
applyFitFunction = cms.bool(True)
),
sublead = cms.PSet(
absEtaBins = cms.vdouble(-1., 1.479, 9.9),
graphName = cms.string("jetToTauFakeRate/$hadTauSelection/$etaBin/jetToTauFakeRate_mc_hadTaus_pt"),
applyGraph = cms.bool(True),
fitFunctionName = cms.string("jetToTauFakeRate/$hadTauSelection/$etaBin/fitFunction_data_div_mc_hadTaus_pt"),
applyFitFunction = cms.bool(True)
)
),
minNumJets = cms.int32(2),
isMC = cms.bool(True),
central_or_shift = cms.string(''),
lumiScale = cms.double(1.),
apply_genWeight = cms.bool(True),
apply_DYMCReweighting = cms.bool(False),
apply_hlt_filter = cms.bool(False),
apply_met_filters = cms.bool(True),
cfgMEtFilter = cms.PSet(),
apply_hadTauFakeRateSF = cms.bool(False),
fillGenEvtHistograms = cms.bool(False),
cfgEvtYieldHistManager = cms.PSet(),
branchName_electrons = cms.string('Electron'),
branchName_muons = cms.string('Muon'),
branchName_hadTaus = cms.string('Tau'),
branchName_jets = cms.string('Jet'),
branchName_met = cms.string('MET'),
branchName_memOutput = cms.string(''),
branchName_genLeptons = cms.string('GenLep'),
branchName_genHadTaus = cms.string('GenVisTau'),
branchName_genPhotons = cms.string('GenPhoton'),
branchName_genJets = cms.string('GenJet'),
redoGenMatching = cms.bool(True),
selEventsFileName_input = cms.string(''),
selEventsFileName_output = cms.string(''),
selectBDT = cms.bool(False),
syncNtuple = cms.PSet(
tree = cms.string(''),
output = cms.string(''),
requireGenMatching = cms.bool(False),
),
useNonNominal = cms.bool(False),
isDEBUG = cms.bool(False),
hasLHE = cms.bool(True),
evtWeight = cms.PSet(
apply = cms.bool(False),
histogramFile = cms.string(''),
histogramName = cms.string(''),
branchNameXaxis = cms.string(''),
branchNameYaxis = cms.string(''),
branchTypeXaxis = cms.string(''),
branchTypeYaxis = cms.string(''),
),
)
|
[
"karlehataht@gmail.com"
] |
karlehataht@gmail.com
|
c47123eb1d1b70624bb34e5b9652c9cf7a8dd2ec
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-2/vse-naloge-brez-testov/DN10-M-123.py
|
0c1eae41abe8c8c3d571897a3c84d3a0b0442dcb
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036
| 2018-12-06T22:56:38
| 2018-12-06T22:56:38
| 126,011,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
otroci = {
"Adam": ["Matjaž", "Cilka", "Daniel"],
"Aleksander": [],
"Alenka": [],
"Barbara": [],
"Cilka": [],
"Daniel": ["Elizabeta", "Hans"],
"Erik": [],
"Elizabeta": ["Ludvik", "Jurij", "Barbara"],
"Franc": [],
"Herman": ["Margareta"],
"Hans": ["Herman", "Erik"],
"Jožef": ["Alenka", "Aleksander", "Petra"],
"Jurij": ["Franc", "Jožef"],
"Ludvik": [],
"Margareta": [],
"Matjaž": ["Viljem"],
"Petra": [],
"Tadeja": [],
"Viljem": ["Tadeja"],
}
def premozenje(oseba,denar):
xs = [denar[oseba]]
for otrok in otroci[oseba]:
xs.append(premozenje(otrok,denar))
return sum(xs)
def najbogatejsi(oseba,denar):
najvec_denarja = 0
#print("oseba: ",oseba)
#if denar[oseba] > najbolj_bogat:
obdelani = []
najbolj_bogat = (oseba,denar[oseba])
for otrok in otroci[oseba]:
if denar[otrok] >= (denar[oseba] in najbolj_bogat):
najbolj_bogat = najbogatejsi(otrok,denar)
#if int(denar[otrok]) > najvec_denarja:
# najvec_denarja = denar[otrok]
#print(najbolj_bogat,"-----1")
#print(najbolj_bogat,"-----2")
#print("------------------------------------------------------")
#print(najvec_denarja)
#print(otrok,'---',denar[otrok])
return najbolj_bogat
|
[
"benjamin.fele@gmail.com"
] |
benjamin.fele@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.