blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb06704c09d4561b67183d828d13a1595f5c9985
|
d2eb7bd335175edd844a3e6c1c633ee0dc2dbb25
|
/contests_atcoder/abc176/abc176_d.py
|
5d96807a125631d40ae1146b10f7bee768af4a30
|
[
"BSD-2-Clause"
] |
permissive
|
stdiorion/competitive-programming
|
5020a12b85f1e691ceb0cacd021606a9dc58b72c
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
refs/heads/main
| 2023-03-27T01:13:42.691586
| 2021-03-08T08:05:53
| 2021-03-08T08:05:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
from itertools import accumulate,chain,combinations,groupby,permutations,product
from collections import deque,Counter
from bisect import bisect_left,bisect_right
from math import gcd,sqrt,sin,cos,tan,degrees,radians
from fractions import Fraction
from decimal import Decimal
import sys
input = lambda: sys.stdin.readline().rstrip()
#from sys import setrecursionlimit
#setrecursionlimit(10**7)
MOD=10**9+7
INF=float('inf')
h, w = map(int, input().split())
ch, cw = map(lambda x: int(x) - 1, input().split())
dh, dw = map(lambda x: int(x) - 1, input().split())
field = [list(input()) for _ in range(h)]
warp_needed = [[INF] * w for _ in range(h)]
d = deque()
d.append((ch, cw))
warp_needed[ch][cw] = 0
d_afterwarp = deque()
def walk_from(p):
return [(p[0] - 1, p[1]), (p[0], p[1] - 1), (p[0] + 1, p[1]), (p[0], p[1] + 1)]
def warp_from(p):
ret = []
for i in range(-2, 3):
for j in range(-2, 3):
if abs(i) + abs(j) > 1:
ret.append((p[0] + i, p[1] + j))
return ret
warp_count = 0
while True:
if d:
now = d.popleft()
for dst in walk_from(now):
if 0 <= dst[0] < h and 0 <= dst[1] < w and field[dst[0]][dst[1]] != "#" and warp_needed[dst[0]][dst[1]] > warp_count:
warp_needed[dst[0]][dst[1]] = warp_count
d.append(dst)
for dst in warp_from(now):
if 0 <= dst[0] < h and 0 <= dst[1] < w and field[dst[0]][dst[1]] != "#" and warp_needed[dst[0]][dst[1]] > warp_count + 1:
warp_needed[dst[0]][dst[1]] = warp_count + 1
d_afterwarp.append(dst)
elif d_afterwarp:
d = d_afterwarp
d_afterwarp = deque()
warp_count += 1
else:
break
print(warp_needed[dh][dw] if warp_needed[dh][dw] != INF else -1)
|
[
"itkn1900@gmail.com"
] |
itkn1900@gmail.com
|
e2ecca12b421baaa62443f60509bb4cab2d71d1c
|
df3853b41ed05d86f5bcd992fcc265f637c67784
|
/1sem/Lab7/Lab7_Task6.py
|
d6c64f14b12411e13c504f482958f2a1aeaf6f10
|
[] |
no_license
|
KseniaMIPT/Adamasta
|
6ab0121519581dbbbf6ae788d1da85f545f718d1
|
e91c34c80834c3f4bf176bc4bf6bf790f9f72ca3
|
refs/heads/master
| 2021-01-10T16:48:31.141709
| 2016-11-23T21:02:25
| 2016-11-23T21:02:25
| 43,350,507
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
import matplotlib.pyplot as plt
f = open('input.txt', 'r')
file_lines = f.readlines()
words_list = []
for i in range(len(file_lines)):
words_list += file_lines[i].split()
words_len = [len(word) for word in words_list]
number_of_words_with_length = [words_len.count(length) for length in range(min(words_len), max(words_len))]
plt.plot(number_of_words_with_length, )
plt.show()
|
[
"ksenia22.11@yandex.ru"
] |
ksenia22.11@yandex.ru
|
7f2dcbafba43ae2baa347247eac3a5cde1f0b8f6
|
3431ace8cae7b804f977a631f231dd1a4cb4200e
|
/1 first draft/sudoku 4.py
|
34e595e3afa22622880ee995df0aee86bdb256e5
|
[] |
no_license
|
rayyan-khan/7-sudoku
|
6f9f0dbf735dc6c8c956db453efae956e9d8fd23
|
e1407e5caa78fb215e6a3818da9b9c448f69f2ea
|
refs/heads/master
| 2022-02-22T10:35:41.806166
| 2018-12-20T14:38:00
| 2018-12-20T14:38:00
| 161,864,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,752
|
py
|
import sys, time
# need to fix bug
# input:
INPUT = open(sys.argv[1], 'r') if len(sys.argv) == 2 else open('puzzles.txt', 'r')
# set up global variables:
INP = '.'*81
def setGlobals(pzl):
global PZLSIZE, CSTRSIZE, SUBHEIGHT, SUBWIDTH, SYMSET, ROWCSTR, COLCSTR, SUBCSTR, CSTRS, NBRS
pzl = ''.join([n for n in pzl if n != '.'])
PZLSIZE = len(INP)
CSTRSIZE = int(len(INP) ** .5)
SUBHEIGHT, SUBWIDTH = int(CSTRSIZE ** .5), int(CSTRSIZE ** .5) \
if int(CSTRSIZE ** .5 // 1) == int(CSTRSIZE ** .5) \
else (int(CSTRSIZE ** .5 // 1), int(CSTRSIZE ** .5 // 1 + 1))
SYMSET = {n for n in pzl} - {'.'}
if len(SYMSET) != CSTRSIZE:
otherSyms = [n for n in '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0']
while len(SYMSET) < CSTRSIZE:
SYMSET.add(otherSyms.pop(0))
ROWCSTR = [{index for index in range(row*CSTRSIZE, (row + 1)*CSTRSIZE)}
for row in range(CSTRSIZE)]
COLCSTR = [{index for index in range(col, col + PZLSIZE - SUBWIDTH*SUBHEIGHT + 1, SUBWIDTH*SUBHEIGHT)}
for col in range(CSTRSIZE)]
SUBCSTR = [{boxRow + boxColOffset + subRow * CSTRSIZE + subCol
for subRow in range(SUBHEIGHT) for subCol in range(SUBWIDTH)}
for boxRow in range(0, PZLSIZE, SUBHEIGHT * CSTRSIZE) for boxColOffset in range(0, CSTRSIZE, SUBWIDTH)]
CSTRS = ROWCSTR + COLCSTR + SUBCSTR
NBRS = [set().union(*[cset for cset in CSTRS if n in cset]) - {n} for n in range(PZLSIZE)]
setGlobals(INP)
# helper methods
def printPzl(pzl):
cstrsize = int(len(pzl) ** .5)
subheight, subwidth = int(cstrsize ** .5), int(cstrsize ** .5) \
if int(cstrsize ** .5 // 1) == int(cstrsize ** .5) \
else (int(cstrsize ** .5 // 1), int(cstrsize ** .5 // 1 + 1))
rowLen = subwidth*(int(cstrsize/subheight))
for row in range(cstrsize):
print(' '.join(pzl[rowLen*row: rowLen*(row + 1)]))
def checkSum(pzl):
return sum(ord(n) for n in pzl) - PZLSIZE*ord('0')
def getBestPos(pzl):
bestPos = 0 # positions that fewest symbols can go into
mostNbrs = 0
for index in range(PZLSIZE):
if pzl[index] != '.':
continue
nbrSet = set()
for nbrInd in NBRS[index]:
if pzl[nbrInd] != '.':
nbrSet.add(pzl[nbrInd])
if len(nbrSet) > mostNbrs:
mostNbrs = len(nbrSet)
bestPos = index
return bestPos
def getBestSyms(pzl):
bestSyms = set()
mostPlaced = 0
for sym in SYMSET:
placed = pzl.count(sym)
if placed > mostPlaced:
mostPlaced = placed
bestSyms = set()
if placed == mostPlaced:
bestSyms.add(sym)
return bestSyms
# solve
def solve(pzl):
if pzl.find('.') == -1:
return pzl
bestPos = getBestPos(pzl)
for sym in SYMSET - {pzl[n] for n in NBRS[bestPos]}:
#setAllPosForSym = {index for index in range(PZLSIZE) if pzl[index] != sym}
# cs in CSTRS:
# setAllPosForSymInCS = cs & setAllPosForSym
#for pos in setAllPosForSymInCS:
pzlMove = pzl[:bestPos] + sym + pzl[bestPos + 1:]
newPzl = solve(pzlMove)
if newPzl:
return newPzl
return ''
# run
time51 = time.clock()
for line in enumerate(INPUT.readlines()):
start = time.clock()
pzlNum, INP = line
if pzlNum == 50:
print('Time for 51: {}'.format(time.clock() - time51))
INP = INP.strip()
setGlobals(INP)
solution = solve(INP)
print('{}: Time: {} Sum: {} \n'.format(pzlNum + 1, round(time.clock() - start, 3), checkSum(solution)), end='')
if solution == '':
print('No solution -- i.e. theres a bug here')
#else:
# printPzl(solution)
|
[
"rayyan.khan258@gmail.com"
] |
rayyan.khan258@gmail.com
|
4b32b7205890b2163ed6f8361383352233344849
|
b140b104b6de0c8a924db008a48d9798e046919e
|
/byte/module_using_sys.py
|
0098ec94e831916455510b9078702d9bbc2d84fb
|
[] |
no_license
|
saibi/python
|
ad206fbfe752198492c939578607f1c31223d3c3
|
fd94a623241c28dffe60350496a5c858c6f912e8
|
refs/heads/main
| 2023-09-01T08:20:33.379923
| 2023-08-31T01:32:17
| 2023-08-31T01:32:17
| 74,268,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
print('the command line arguments are:')
for i in sys.argv:
print(i)
print('\n\nThe PYTHONPATH is', sys.path, '\n')
|
[
"kimyoungmin@gmail.com"
] |
kimyoungmin@gmail.com
|
e99e71df9008cda3e264baeefafc564222bbb05f
|
e92bcb90e3bfb4b6076cd9deffc3e67c9770f122
|
/spk/haproxy/src/app/application/auth.py
|
7cedb692302bd19331b9d202589ec62b2f837eea
|
[
"BSD-3-Clause"
] |
permissive
|
SynoCommunity/spksrc
|
e708e77af58b95259e802229b19495ad011b536b
|
b0e665b8dcc6c3eedd7814cc8e3d957842b9c01d
|
refs/heads/master
| 2023-09-03T13:19:35.212489
| 2023-09-02T11:17:53
| 2023-09-02T11:17:53
| 2,565,137
| 2,602
| 1,388
|
NOASSERTION
| 2023-09-14T21:56:02
| 2011-10-12T20:25:50
|
Makefile
|
UTF-8
|
Python
| false
| false
| 2,088
|
py
|
# -*- coding: utf-8 -*-
from collections import namedtuple
from flask import abort, request
from functools import wraps, partial
from subprocess import check_output
import grp
import os
import pwd
__all__ = ['authenticate', 'requires_auth']
def authenticate():
"""Authenticate a user using Synology's authenticate.cgi
If the user is authenticated, returns a nametuple with the
username and its groups, if not returns None. For example::
>>> authenticate()
User(name='admin', groups=['administrators'])
:rtype: namedtuple or None
"""
User = namedtuple('User', ['name', 'groups'])
with open(os.devnull, 'w') as devnull:
user = check_output(['/usr/syno/synoman/webman/modules/authenticate.cgi'], stderr=devnull).strip()
if not user:
return None
groups = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
groups.append(grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name)
return User(user, set(groups))
def requires_auth(f=None, groups=None, users=None):
"""Require a user to be authenticated. If he is not, this aborts
on 403.
The condition to be authorized is for the user to be authenticated
and in one of the listed groups (if any) or one of the listed users
(if any)
:param function f: the decorated function
:param list groups: groups whitelist
:param list users: users whitelist
"""
if f is None:
return partial(requires_auth, groups=groups, users=users)
@wraps(f)
def decorated(*args, **kwargs):
user = authenticate()
if user is None: # Not authenticated
abort(403)
# A user is authorized if he is in the groups whitelist or the users whitelist
authorized = False
if groups is not None and len(set(groups) & user.groups) > 0: # Authorized group
authorized = True
if users is not None and user.name in users: # Authorized user
authorized = True
if not authorized:
abort(403)
return f(*args, **kwargs)
return decorated
|
[
"diaoulael@gmail.com"
] |
diaoulael@gmail.com
|
e3530234a6047e169a09f7a802ba4ee2672cb2e6
|
3705110f5d8fc536b9d6fb8473482babac491dd7
|
/build/src/django-doc-wiki-0.2.0BETA/doc_wiki/models.py
|
68d89f5bc1fb6fec0dd13ae3fc483e6e6fdf2d0d
|
[
"Apache-2.0"
] |
permissive
|
taylanpince/wiki
|
c726933258142b19b226c066f755bbcdb9196498
|
227abb4991a071494394e2bbae25775e4baa6d1d
|
refs/heads/master
| 2020-06-07T10:53:13.479818
| 2012-02-28T16:56:26
| 2012-02-28T16:56:26
| 479,720
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
from django.core.cache import cache
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from doc_wiki import settings
from doc_wiki.managers import WikiPageManager
from doc_wiki.parsers import parse_markdown
class WikiPage(models.Model):
"""
A wiki page based on a document in the file system
"""
slug = models.SlugField(_("Slug"), max_length=255)
path = models.FilePathField(_("Path"), path=settings.DIRECTORY_PATH, recursive=False, max_length=255)
content = models.TextField(_("Content"), blank=True)
timestamp = models.DateTimeField(_("Time Stamp"), auto_now=True)
admin_objects = models.Manager()
objects = WikiPageManager()
class Meta:
verbose_name = _("Wiki Page")
verbose_name_plural = _("Wiki Pages")
def __unicode__(self):
return u"Wiki Page: %s" % self.slug
@models.permalink
def get_absolute_url(self):
return ("doc_wiki_page", (), {
"slug": self.slug,
})
@property
def content_html(self):
"""
Parses the content field using markdown and pygments, caches the results
"""
key = "wiki_pages_content_%d" % self.pk
html = cache.get(key)
if not html:
html = parse_markdown(self.content)
cache.set(key, html, 60 * 60 * 24 * 30)
return mark_safe(html)
def save(self):
if self.pk:
cache.delete("wiki_pages_content_%d" % self.pk)
super(WikiPage, self).save()
|
[
"taylanpince@gmail.com"
] |
taylanpince@gmail.com
|
c051681c4d71382457478f2977678850900a2d9d
|
d2f50124ff3bec70b9b3139ecb063b06e526781d
|
/biable/migrations/0061_cliente_competencia.py
|
1df9f28fe4cf97ab807d908f104f46d2fed7fb1d
|
[] |
no_license
|
odecsarrollo/odecopack-componentes
|
e8d993f089bf53bbf3c53d1265e70ac5c06b59b8
|
b583a115fb30205d358d97644c38d66636b573ff
|
refs/heads/master
| 2022-12-12T00:33:02.874268
| 2020-08-13T18:45:01
| 2020-08-13T18:45:01
| 189,262,705
| 0
| 0
| null | 2022-12-08T11:23:46
| 2019-05-29T16:37:21
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-07 21:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biable', '0060_auto_20170207_1638'),
]
operations = [
migrations.AddField(
model_name='cliente',
name='competencia',
field=models.BooleanField(default=False),
),
]
|
[
"fabio.garcia.sanchez@gmail.com"
] |
fabio.garcia.sanchez@gmail.com
|
7985931ad924c0fffe6fde629612ec580893d2ec
|
8f48d12b88048e424ebb0d72ca6dfab5cf12ae0f
|
/0600_0999/917.py
|
032ca01c50e412e01c507c9dde9a100310423937
|
[] |
no_license
|
renjieliu/leetcode
|
e1caf13c18a8107ed9252588b339fb76bcb1b246
|
4668b64fcb9320b6c316d8608fc61911ce43b6c7
|
refs/heads/master
| 2023-03-18T18:16:06.187741
| 2023-03-14T20:31:59
| 2023-03-14T20:31:59
| 128,823,819
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
class Solution: # RL 20210914: 2 pointers approach
def reverseOnlyLetters(self, s: str) -> str:
s = list(s)
l = 0
r = len(s)-1
letter = lambda x: 1 if (65<=ord(x)<=90 or 97 <= ord(x)<=122) else 0
while l <= r: #2 pointers, once meet letters on the left and right, just swap them
while l < r and letter(s[l]) == 0:
l+=1
while r > l and letter(s[r]) == 0:
r-=1
s[l], s[r] = s[r], s[l]
l+=1
r-=1
# print(s)
return "".join(s)
# previous approach
# class Solution:
# def reverseOnlyLetters(self, s: str) -> str:
# tmp = ""
# s = list(s)
# for i in range(len(s)):
# curr = s[i]
# if 65 <= ord(curr) <= 90 or 97<=ord(curr) <=122:
# tmp+=curr
# s[i] = "A"
# tmp = list(tmp)
# for i in range(len(s)):
# if s[i] == "A":
# s[i] = tmp.pop() #pop from the tail for reverse
#
# return "".join(s)
# previous approach
# def reverseOnlyLetters(S: 'str'):
# temp = ""
# for i in S:
# if 65<=ord(i)<=90 or 97 <= ord(i) <=122:
# temp += i
#
# temp = temp [::-1] #reverse
# output = ""
# curr = 0
# for i in S:
# if not(65<=ord(i)<=90 or 97 <= ord(i) <=122): #if it's not a letter, then put it to the current position
# output+=i
# else:
# output+=temp[curr] #if it's a letter, then find the letter in the reversed string.
# curr+=1
#
# return output
#
# print(reverseOnlyLetters("ab-cd"))
# print(reverseOnlyLetters("a-bC-dEf-ghIj"))
# print(reverseOnlyLetters("Test1ng-Leet=code-Q!"))
|
[
"anlrj@qq.com"
] |
anlrj@qq.com
|
1696cb9419dcb9e751dbe8da54a9e2b67dc2f20e
|
1f09834b125de2da3060af78d92012a7ddc16c75
|
/gusto/settings.py
|
91b8fd7a35d2c224357d9dae1dbcefa638719188
|
[] |
no_license
|
letsy1337/gusto
|
e8db9459146c49911935fcf467b4046060468af5
|
4dc45449d79674d7e0ed304315e05f9ec9c7d395
|
refs/heads/main
| 2023-03-03T00:43:26.252307
| 2021-02-13T14:39:44
| 2021-02-13T14:39:44
| 330,979,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,166
|
py
|
"""
Django settings for gusto project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
DB_NAME = os.environ.get('DB_NAME')
DB_PASSWORD = os.environ.get('DB_PASSWORD')
DB_HOST = os.environ.get('DB_HOST')
DB_USER = os.environ.get('DB_USER')
SECRET_KEY = os.environ.get('SECRET_KEY')
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'm-cr%^%eeylqe_^dm_w)c=n6m)j^ab8m38z889^7ad00)0#c%y'
SECRET_KEY = SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['gusto-stud.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main_gusto',
'menu_gusto',
'events_gusto',
'users_messages',
'accounts'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'gusto.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gusto.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': DB_NAME,
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST': DB_HOST,
'PORT': '5432',
}
}
import dj_database_url
db = dj_database_url.config()
DATABASES['default'].update(db)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Extra places for collectstatic to find static files.
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
ad58ed10f0a6e5ccb17585b08ed905795c5bdd48
|
1c9999d51cfe4491685ec6eb865c299251f4fd60
|
/1-b-lstm-return-sequences-states/__init__.py
|
3e4ec51525567476778abb17f805e0aa729f2c47
|
[] |
no_license
|
bjbluejita/deep-learning-notebook
|
d7a081443eda570eb6d36fd7d2983f6077b43962
|
0e3598a20214dd78deb4f5e6809f7789722f6f5d
|
refs/heads/master
| 2023-05-27T15:29:32.639505
| 2023-05-14T02:16:59
| 2023-05-14T02:16:59
| 252,683,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
'''
@Project: deep-learning-with-keras-notebooks
@Package
@author: ly
@date Date: 2019年02月22日 10:19
@Description:
@URL:
@version: V1.0
'''
|
[
"30115682@qq.com"
] |
30115682@qq.com
|
86ad171ff6405302c17d09989ab9eb6063e09ce9
|
52e6310fab09209583c075f42963099858fb1f4f
|
/backend/mobile_8_oct_dev_12930/settings.py
|
ce46b616137bb9169a90c984380402b2c16c074e
|
[] |
no_license
|
crowdbotics-apps/mobile-8-oct-dev-12930
|
01f002d825b9be091d6a736549654ccc8805d3e1
|
35f5acfac8580c158145aeb51f1a42a911bd4417
|
refs/heads/master
| 2022-12-26T14:34:01.544851
| 2020-10-08T14:09:00
| 2020-10-08T14:09:00
| 302,234,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,112
|
py
|
"""
Django settings for mobile_8_oct_dev_12930 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "mobile_8_oct_dev_12930.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "mobile_8_oct_dev_12930.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9148d6a0ecbe711f958235eefce04d3ff1b0e9db
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_255/ch129_2020_04_01_18_23_34_386742.py
|
a7848f92f19eb5a4c9eeb1c5bb902fe5b642b7c0
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
def verifica_quadrado_perfeito(n):
x=1
while n>0:
n-=x
x+=2
return n
if n==0:
print('True')
elif n<0:
print('False')
|
[
"you@example.com"
] |
you@example.com
|
fa20b747629dca817cd808fb369cabbc10565862
|
219992b56f8e5cd8b47534d98417dd8ac795110b
|
/com/ibm/testing/dict&file.py
|
74572cc02da9e8ce622b4624a64b89f3d6598af0
|
[] |
no_license
|
haohaixingyun/dig-python
|
63844877de0acad04d07d7119e381b9bb4a97395
|
4e8c3e3cb1ba98f39d65095b4d3b09ba115e586b
|
refs/heads/master
| 2021-01-13T08:45:59.669829
| 2016-10-26T05:54:07
| 2016-10-26T05:54:07
| 71,970,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
'''
Created on Mar 30, 2016
@author: yunxinghai
'''
def main():
dicts = {}
dicts['a'] = 'alpha'
dicts['g'] = 'gamma'
dicts['o'] = 'omega'
print dicts
print dicts.keys()
if 'a' in dicts:
print dicts['a']
for key in dicts :
print key
print dicts[key]
f = open('C:\workplacebus\\business\\yunxinghai_Ethan\\PROD_DIMNSN\\rshr1.prod_dimnsn_debug.tbl.sql','rU')
for line in f:
print line
f.close()
if __name__ == '__main__':
main()
|
[
"yunxinghai@hotmail.com"
] |
yunxinghai@hotmail.com
|
494c300b90dcab7df2391d17dba61414da9b7717
|
6f56da8db171d4a6c006b5d944437bf061069faf
|
/XCat.v.0.0.1/source/XCat_Output/XCat_Report/XCat_Report.py
|
0ad012e217548f3517e089f1db38b8443e388b19
|
[] |
no_license
|
afarahi/XCat
|
16819bef7087e994907c413dd6331cdebde72ffb
|
498602eb7f61696d169f071185115345c68bcf86
|
refs/heads/master
| 2021-01-21T01:59:36.907059
| 2013-05-03T05:12:07
| 2013-05-03T05:12:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
import subprocess
import os
import shlex
from XCat_tex_Construction import Report_File
def Creat_Report(Halo,Input,Output):
print "Creating report ..."
Report_File(Halo,Input,Output)
proc=subprocess.Popen(shlex.split('pdflatex Report/report.tex'))
proc.communicate()
proc=subprocess.Popen(shlex.split('pdflatex Report/report.tex'))
proc.communicate()
proc=subprocess.Popen(shlex.split('cp report.pdf Report'))
proc.communicate()
os.unlink('report.aux')
os.unlink('report.log')
os.unlink('report.out')
os.unlink('report.pdf')
print "Report is created successfully."
|
[
"aryaf66@gmail.com"
] |
aryaf66@gmail.com
|
9c9a52c1aeac8287ffe746dd8321a6e56ee87c08
|
c65af972b843e4f11a9aa9005104ac54a283032d
|
/practice4/database1.py
|
51320f63d12e8e845b44c63342cb448e4382cbd0
|
[] |
no_license
|
ljeleven/mypython
|
a63438c4246606082f000967a5d47256fa297aeb
|
b652338be3937543f0b35a9111dd0d346eb913b5
|
refs/heads/master
| 2023-05-24T19:30:37.001198
| 2020-04-09T15:40:40
| 2020-04-09T15:40:40
| 240,815,098
| 0
| 0
| null | 2023-05-22T22:41:00
| 2020-02-16T01:46:29
|
Python
|
UTF-8
|
Python
| false
| false
| 282
|
py
|
#__author:"longjin"
#date: 2019/7/16
# -*- coding: UTF-8 -*-
import sys
import pymysql
#建立连接
conn = pymysql.connect(db='test', user='root', passwd='123456', charset='utf8')
cur = conn.cursor()
sql = 'create table a(id int, hh varchar(30));'
cur.execute(sql)
conn.commit()
|
[
"ljeleven@foxmail.com"
] |
ljeleven@foxmail.com
|
f584e020f872e7dd88e708fa1581b0bfb46638b1
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/ETF/Redemption_SA/YW_ETFSS_SZSH_057.py
|
ff2e617be2fb70e66168d46d6d6a8a81aebb76c2
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,106
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import time
sys.path.append("/home/yhl2/workspace/xtp_test/ETF")
from import_common import *
sys.path.append("/home/yhl2/workspace/xtp_test/ETF/etf_service")
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ETFSS_SZSH_057(xtp_test_case):
def test_YW_ETFSS_SZSH_057(self):
# -----------ETF申购-------------
title = '深圳ETF赎回--错误的数量(数量<0)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'case_ID': 'ATC-204-056',
'期望状态': '废单',
'errorID': 10210301,
'errorMSG': queryOrderErrorMsg(10210301),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title + ', case_ID=' + case_goal['case_ID'])
unit_info = {
'ticker': '179850', # etf代码
}
# -----------ETF申购-------------
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_REDEMPTION'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
-1000000,
}
EtfParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = etfServiceTest(Api, case_goal, wt_reqs)
etf_creation_log(case_goal, rs)
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
9a03dbf9ee4dcfaac6cc5b242193b5047b594d22
|
75e8f932e1e08c7e71380e6b71d85ddd04f052dd
|
/SDAPythonBasics/list_ex.py
|
bd1f0706f87e8b3dfd0e0535ddd461107f069623
|
[] |
no_license
|
aeciovc/sda_python_ee4
|
fe470a253126ad307c651d252f9f9b489da32835
|
9e1e8be675fcafe4a61c354b55b71f53ad2af0fe
|
refs/heads/master
| 2023-08-29T15:17:34.033331
| 2021-10-31T09:41:57
| 2021-10-31T09:41:57
| 365,678,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
"""
# Ask the user name/phone cel/phone home/age
# Store those in a dictionary
# Save this dictionary to a list
# and Print out as result this:
[
{
'name': 'Aecio',
'phone_cel': '4535345435',
'phone_home': '34234234',
'age': 22
}
]
"""
list_phone_books = []
name = input("Type your name: ")
phone_cel = input("Type your cel phone number:")
phone_home = input("Type your home number:")
age = input("Type your age:")
phone_book = {} # {}
# the syntax to assign a new key and value ( dict_name[KEY] = VALUE )
phone_book['name'] = name # {'name': 'Aecio'}
phone_book['phone_cel'] = phone_cel # {'name': 'Aecio', 'phone_cel': '345435345'}
phone_book['phone_home'] = phone_home # {'name': 'Aecio', 'phone_cel': '345435345', 'phone_home': '53545435'}
phone_book['age'] = int(age) # {'name': 'Aecio', 'phone_cel': '345435345', 'phone_home': '53545435', 'age': 22}
list_phone_books.append(phone_book) # [{'name': 'Aecio', 'phone_cel': '345435345', 'phone_home': '53545435', 'age': 22}]
print(len(list_phone_books)) # 1
print(list_phone_books) # [{'name': 'Aecio', 'phone_cel': '345435345', 'phone_home': '53545435', 'age': 22}]
|
[
"aeciovc@gmail.com"
] |
aeciovc@gmail.com
|
22fd525a675865729fdcd29d2e652cc86d653d11
|
3e381dc0a265afd955e23c85dce1e79e2b1c5549
|
/hs-S1/icicealtigen.py
|
38ca41f7d89dcab101bac3f89dbd3ba9d5af8a91
|
[] |
no_license
|
serkancam/byfp2-2020-2021
|
3addeb92a3ff5616cd6dbd3ae7b2673e1a1a1a5e
|
c67206bf5506239d967c3b1ba75f9e08fdbad162
|
refs/heads/master
| 2023-05-05T04:36:21.525621
| 2021-05-29T11:56:27
| 2021-05-29T11:56:27
| 322,643,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
import turtle as t
t.Screen().setup(600,400)
t.shape("turtle")
# çözüm buraya yapılacak
for i in range(3):
for k in range(6):
t.forward(50)
t.right(60)
#ileri atla
t.penup()
t.forward(100)
t.pendown()
t.done()
|
[
"sekocam@gmail.com"
] |
sekocam@gmail.com
|
e4f19edfd0046a2a97ff217f1889c4ca0143aa5c
|
5af5fa981a0a8598b2b031aaf10c6ba6d2f5c28c
|
/images/views.py
|
52a223a2f7a48ace977ebf42c00165721d3e50e9
|
[] |
no_license
|
FMularski/image-house
|
d15b2fe7d0379cd237c5aef3336a0ad2ee5a136c
|
968e7c6a566090d0cf25246e506820dd955b34c0
|
refs/heads/main
| 2023-07-31T18:49:25.705577
| 2021-09-13T18:56:10
| 2021-09-13T18:56:10
| 404,382,258
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,683
|
py
|
from django.shortcuts import render, reverse, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage
from . import forms, models
def filt3r(request, images):
if request.GET.get('category'):
images = images.filter(category__name=request.GET.get('category'))
return images
def sort(request, images):
if request.GET.get('sort-date') == 'asc':
images = images.order_by('created_at')
elif request.GET.get('sort-date') == 'desc':
images = images.order_by('-created_at')
if request.GET.get('sort-views') == 'asc':
images = images.order_by('views')
elif request.GET.get('sort-views') == 'desc':
images = images.order_by('-views')
if request.GET.get('sort-votes') == 'asc':
images = images.order_by('votes')
elif request.GET.get('sort-votes') == 'desc':
images = images.order_by('-votes')
return images
def paginate(request, images, images_per_page):
paginator = Paginator(images, images_per_page)
try:
page = paginator.page(request.GET.get('page', 1))
except EmptyPage:
page = paginator.page(1)
return page
def sign_in(request):
form = forms.SignInForm()
if request.method == 'POST':
form = forms.SignInForm(data=request.POST)
if form.is_valid():
user = form.user_cache
login(request, user)
return redirect(reverse('home', ))
messages.error(request, 'Invalid credentials.')
context = {'form': form}
return render(request, 'images/sign_in.html', context)
def sign_up(request):
form = forms.SignUpForm()
if request.method == 'POST':
form = forms.SignUpForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, f'User \'{form.cleaned_data.get("username")}\' has been signed up.')
return redirect(reverse('sign_in', ))
context = {'form': form}
return render(request, 'images/sign_up.html', context)
@login_required(login_url='sign_in')
def home(request):
images = models.Image.objects.select_related('user', 'category').all()
most_viewed = images.order_by('-views').first()
most_voted = images.order_by('-votes').first()
most_recent = images.order_by('created_at').first()
images = filt3r(request, images)
images = sort(request, images)
images = paginate(request, images, images_per_page=9)
categories = models.Category.objects.all()
context = {'images': images, 'most_viewed': most_viewed,
'most_voted': most_voted, 'most_recent': most_recent,
'categories': categories}
return render(request, 'images/home.html', context)
@login_required(login_url='sign_in')
def my_images(request):
my_images = models.Image.objects.select_related('user', 'category').filter(user=request.user)
my_images = filt3r(request, my_images)
my_images = sort(request, my_images)
my_images = paginate(request, my_images, images_per_page=9)
categories = models.Category.objects.all()
context = {'images': my_images, 'categories': categories}
return render(request, 'images/my_images.html', context)
@login_required(login_url='sign_in')
def delete_img(request, pk):
models.Image.objects.get(pk=pk).delete()
messages.success(request, 'Image has been deleted.')
return redirect(reverse('my_images', ))
@login_required(login_url='sign_in')
def image(request, pk):
image = models.Image.objects.select_related('user', 'category').get(pk=pk)
image.views += 1
image.save()
context = {'image': image}
return render(request, 'images/image.html', context)
@login_required(login_url='sign_in')
def vote(request, pk, vote):
image = models.Image.objects.get(pk=pk)
image.votes += vote if vote else -1
image.save()
return redirect(reverse('image', kwargs={'pk': pk}))
@login_required(login_url='sign_in')
def add_image(request):
form = forms.ImageForm()
if request.method == 'POST':
form = forms.ImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.user = request.user
image.save()
messages.success(request, 'Image has been added.')
return redirect(reverse('my_images', ))
context = {'form': form}
return render(request, 'images/add.html', context)
def sign_out(request):
logout(request)
return redirect(reverse('sign_in', ))
|
[
"mularskif@gmail.com"
] |
mularskif@gmail.com
|
1db73a5ea084e5e00837a54631bc558892cc61a6
|
efe1546fa1f057cbbbe974bd8478309b6176d641
|
/waf/playground/gtest/tests/test1/wscript_build
|
2af2f0639e6374c08fa8b907bd37213432c364ab
|
[
"Apache-2.0"
] |
permissive
|
yankee14/reflow-oven-atmega328p
|
2df323aba16ac4f3eac446abc633a5d79a1a55cb
|
e6792143576f13f0a3a49edfd54dbb2ef851d95a
|
refs/heads/master
| 2022-12-02T21:32:39.513878
| 2019-05-30T06:25:12
| 2019-05-30T06:25:12
| 188,760,664
| 0
| 1
|
Apache-2.0
| 2022-11-15T18:22:50
| 2019-05-27T02:52:18
|
Python
|
UTF-8
|
Python
| false
| false
| 220
|
#! /usr/bin/env python
# encoding: utf-8
bld.program(
features = 'test',
source = 'AccumulatorTest.cpp',
target = 'unit_test_program',
use = 'unittestmain useless GTEST',
ut_cwd = bld.path.abspath(),
)
|
[
"yankee14.ed@gmail.com"
] |
yankee14.ed@gmail.com
|
|
49e7c471cc04688d4233e45fa2ec38aa03a969bb
|
2b167e29ba07e9f577c20c54cb943861d0ccfa69
|
/numerical_analysis_backup/large-scale-multiobj2/core-arch5-guard0-beta0-hebbe/pareto310.py
|
8c6607739df901db6b14e0cca2e08dcef97dc67e
|
[] |
no_license
|
LiYan1988/kthOld_OFC
|
17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f
|
b1237577ea68ad735a65981bf29584ebd889132b
|
refs/heads/master
| 2021-01-11T17:27:25.574431
| 2017-01-23T05:32:35
| 2017-01-23T05:32:35
| 79,773,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch5_decomposition_new import Arch5_decompose
np.random.seed(2010)
num_cores=10
num_slots=320
i = 10
filename = 'traffic_matrix_pod250_load50_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
#%% arch2
corev = [10, 12]
connection_ub = []
throughput_ub = []
obj_ub = []
connection_lb = []
throughput_lb = []
obj_lb = []
connection_he = []
throughput_he = []
obj_he = []
for c in corev:
m = Arch5_decompose(tm, num_slots=num_slots, num_cores=c,
alpha=1,beta=0)
m.create_model_routing(mipfocus=1,timelimit=36000,mipgap=0.01, method=3,
threads=20)
connection_ub.append(m.connection_ub_)
throughput_ub.append(m.throughput_ub_)
obj_ub.append(m.obj_ub_)
np.save('core_usagex_i%d_c%d.npy'%(i,c), m.core_usagex)
# m.create_model_sa(mipfocus=1,timelimit=26000,mipgap=0.01, method=2,
# SubMIPNodes=2000, heuristics=0.8, threads=4, presolve=2)
# connection_lb.append(m.connection_lb_)
# throughput_lb.append(m.throughput_lb_)
# obj_lb.append(m.obj_lb_)
# m.write_result_csv('cnklist_lb_%d_%d.csv'%(i,c), m.cnklist_lb)
connection_lb.append(0)
throughput_lb.append(0)
obj_lb.append(0)
# m.heuristic()
# connection_he.append(m.obj_heuristic_connection_)
# throughput_he.append(m.obj_heuristic_throughput_)
# obj_he.append(m.obj_heuristic_)
# m.write_result_csv('cnklist_heuristic_%d_%d.csv'%(i,c),
# m.cnklist_heuristic_)
connection_he.append(0)
throughput_he.append(0)
obj_he.append(0)
result = np.array([corev,
connection_ub,throughput_ub,obj_ub,
connection_lb,throughput_lb,obj_lb,
connection_he,throughput_he,obj_he]).T
file_name = "result_pareto_arch5_old_3_{}.csv".format(i)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['beta', 'connection_ub', 'throughput_ub',
'obj_ub', 'connection_lb', 'throughput_lb', 'obj_lb',
'connection_he', 'throughput_he', 'obj_he'])
writer.writerows(result)
|
[
"li.yan.ly414@gmail.com"
] |
li.yan.ly414@gmail.com
|
830461b71b4a998b1b41c369276838c5086a614f
|
457db67d845d47bf9f65d8c4ae8c781fd9c9c74c
|
/Peaks_detection.py
|
943e20ab6af6c11f234caa76ffdd3656a25adf42
|
[] |
no_license
|
shanonentropy/DiamondNVDataProcessing
|
f5b0d3607a6c9d350f22b970d598c030125396e1
|
0c7b72a879f554e8beaf8b846028587ef66aed36
|
refs/heads/master
| 2021-08-16T14:39:17.239834
| 2017-11-20T01:41:06
| 2017-11-20T01:41:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,499
|
py
|
import sys
from numpy import NaN, Inf, arange, isscalar, asarray, array
def peakdet(v, delta, x = None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
if __name__=="__main__":
from matplotlib.pyplot import plot, scatter, show
series = [0,0,0,2,0,0,0,-2,0,0,0,2,0,0,0,-2,0]
maxtab, mintab = peakdet(series,.3)
plot(series)
scatter(array(maxtab)[:,0], array(maxtab)[:,1], color='blue')
scatter(array(mintab)[:,0], array(mintab)[:,1], color='red')
show()
print array(maxtab)[0]
print series[3]
|
[
"noreply@github.com"
] |
shanonentropy.noreply@github.com
|
6589983858de7fac1bfc6bfab9cfdae6dfa84f4d
|
4e5141121d8b4015db233cbc71946ec3cfbe5fe6
|
/samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-aaa-lib-cfg/gn-delete-xr-aaa-lib-cfg-20-ydk.py
|
93a22ad7752d9e1443eea3358f521fd01caad79e
|
[
"Apache-2.0"
] |
permissive
|
itbj/ydk-py-samples
|
898c6c9bad9d6f8072892300d42633d82ec38368
|
c5834091da0ebedbb11af7bbf780f268aad7040b
|
refs/heads/master
| 2022-11-20T17:44:58.844428
| 2020-07-25T06:18:02
| 2020-07-25T06:18:02
| 282,382,442
| 1
| 0
| null | 2020-07-25T06:04:51
| 2020-07-25T06:04:50
| null |
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Delete all config data for model Cisco-IOS-XR-aaa-lib-cfg.
usage: gn-delete-xr-aaa-lib-cfg-20-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_aaa_lib_cfg \
as xr_aaa_lib_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
aaa = xr_aaa_lib_cfg.Aaa() # create object
# delete configuration on gNMI device
crud.delete(provider, aaa)
exit()
# End of script
|
[
"deom119@gmail.com"
] |
deom119@gmail.com
|
3aa08f7780110a7bdf6645e4e423705b1f892161
|
0cf21c1880d43a9b9384682ce7179897de08508d
|
/AtCoder Beginner Contest/2019_ABC/2019-11-16(ABC145)/ABC145_C.py
|
cb436adb1b407097411873168d3c1dfadd4c48bc
|
[] |
no_license
|
neoneo0106/AtCoder
|
44bc54c82c9c1a0ded396cca54b110bc02ca86ea
|
1ff28a526e6a1b32cf18bd7daa4e33462daea080
|
refs/heads/master
| 2022-03-24T00:35:55.664228
| 2022-02-22T14:44:14
| 2022-02-22T14:44:14
| 246,288,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
import math
def dis(x1, y1, x2, y2):
return ((x1 - x2)**2 + (y1 - y2)**2) ** (1/2)
def kaijo(n):
return math.factorial(n)
n = int(input())
x = [0] * n
y = [0] * n
for i in range(n):
x[i], y[i] = map(int, input().split())
sum = 0
for j in range(n):
for i in range(n):
if i != j:
sum = sum + (dis(x[i], y[i], x[j], y[j])) * kaijo(n-1)
print(sum/kaijo(n))
|
[
"neo@neonoMacBook-puro.local"
] |
neo@neonoMacBook-puro.local
|
a39592556defdd0e737b3a3010ee24fc4b6a9448
|
ef50bb32ab941f64621ba17b419dd19531da220d
|
/products/migrations/0003_product_price.py
|
7c4097495791fbcc1c6f39e6ee27de4905f04246
|
[] |
no_license
|
OleksandrMyshko/test-site
|
3de69d5a29e2860016f5fdc5d4c510e493c50e1a
|
0bbf84a4eddfa92789f516b52e5b05fa15817262
|
refs/heads/master
| 2021-07-10T07:28:27.769729
| 2017-09-25T17:22:05
| 2017-09-25T17:22:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-22 16:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_productimage_img'),
]
operations = [
migrations.AddField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
),
]
|
[
"sashamushko@gmail.com"
] |
sashamushko@gmail.com
|
191e8c8ebd498df80b64cd84e5b7f15eca56a5f6
|
fb82fdf706863465b1f357cd1fa0447474cd8a70
|
/ServerComponent/venv/Lib/site-packages/pythonrv/test/mock_and_helpers.py
|
296b68023a48690ab41c922fedc65e25c621d2ac
|
[
"MIT"
] |
permissive
|
CDU55/FakeNews
|
d79e2a069b3f1392f779d5b2256cd54c696e789a
|
707bd48dd78851081d98ad21bbdadfc2720bd644
|
refs/heads/main
| 2023-02-20T06:27:18.618837
| 2021-01-17T15:14:27
| 2021-01-17T15:14:27
| 305,167,221
| 0
| 1
|
MIT
| 2020-12-07T19:51:46
| 2020-10-18T18:16:49
|
Python
|
UTF-8
|
Python
| false
| false
| 823
|
py
|
# -*- coding: utf-8 -*-
import unittest
import logging.handlers
class MockLoggingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
super(MockLoggingHandler, self).__init__(*args, **kwargs)
def emit(self, record):
self.messages.append(record)
def reset(self):
self.messages = []
class TestLogging(unittest.TestCase):
def setUp(self):
self.logging_handler = MockLoggingHandler()
logging.getLogger('pythonrv').addHandler(self.logging_handler)
def tearDown(self):
logging.getLogger('pythonrv').removeHandler(self.logging_handler)
def assertLog(self, level, msg):
record = self.logging_handler.messages[-1]
self.assertEquals(record.levelno, level)
self.assertEquals(record.getMessage(), msg)
|
[
"48147775+BiancaChirica@users.noreply.github.com"
] |
48147775+BiancaChirica@users.noreply.github.com
|
cea172f7fc2168b6a992da358c297658f258989b
|
7b7bfbfebd627a3ccfdd52bb7164fa4f94cda7fc
|
/optic_store/optic_store/doctype/group_discount/test_group_discount.py
|
6abaa25f486d759b1930cf23d4f05c363a577971
|
[
"MIT"
] |
permissive
|
f-9t9it/optic_store
|
d117b7ef7c4107ec15d8194fc57d66a18aff5945
|
4682ae99cdb2cbfb1ff99196398d7379b4b6c8f1
|
refs/heads/master
| 2022-07-01T10:29:54.783550
| 2022-06-21T14:34:40
| 2022-06-21T14:34:40
| 171,165,708
| 23
| 43
|
NOASSERTION
| 2022-06-21T14:21:16
| 2019-02-17T19:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, 9T9IT and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestGroupDiscount(unittest.TestCase):
pass
|
[
"sun@libermatic.com"
] |
sun@libermatic.com
|
bb2c39b81b685aa1f33c7cc8aa706e7e60cb2876
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/414/usersdata/315/80495/submittedfiles/av1_programa2.py
|
736f8af8c79f8c3b81b483e1639ddc5f9b14123c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
# -*- coding: utf-8 -*-
matricula = input('Matricula: ')
nota1 = float(input('Digite nota1 com 1 casa decimal: '))
nota2 = float(input('Digite nota2 com 1 casa decimal: '))
nota3 = float(input('Digite nota3 com 1 casa decimal: '))
ME = float(input('Digite ME com 1 casa decimal: '))
MA = (nota1 + (nota2*2) + nota3*3 +ME)/7
if MA >=9:
print (matricula
print ('%.1f'%MA)
print (A)
print (APROVADO)
elif MA>=7.5 and MA<9:
print (matricula
print ('%.1f'%MA)
print (B)
print (APROVADO)
elif MA>=6 and MA<7.5:
print (matricula
print ('%.1f'%MA)
print (C)
print (APROVADO)
elif MA>=4 and MA<6:
print (matricula
print ('%.1f'%MA)
print (D)
print (REPROVADO)
elif MA<4:
print (matricula
print ('%.1f'%MA)
print (E)
print (REPROVADO)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
07306b347c9a516d94e561248d0074b5d9a8a4ba
|
5c484d9ecd194ad31555303aff004b739fc6b564
|
/stats/migrations/0046_offer_last_active_at.py
|
16f9b8d611c0ee397ebab74f2f2c0e76da862727
|
[] |
no_license
|
bloogrox/hasoffers-kpi
|
15b82c9287fc4a62e14e4b637c3d57d03db54233
|
227472f0090856048d1fdb0591ffbb15b575a311
|
refs/heads/master
| 2021-01-01T17:31:14.448122
| 2017-11-08T16:26:40
| 2017-11-08T16:26:40
| 98,095,120
| 0
| 1
| null | 2017-08-12T11:43:31
| 2017-07-23T12:05:31
|
Python
|
UTF-8
|
Python
| false
| false
| 584
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-10-18 00:44
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('stats', '0045_offer_status'),
]
operations = [
migrations.AddField(
model_name='offer',
name='last_active_at',
field=models.DateTimeField(default=datetime.datetime(2010, 1, 1, 0, 0, tzinfo=utc)),
preserve_default=False,
),
]
|
[
"bloogrox@gmail.com"
] |
bloogrox@gmail.com
|
45c71549dbaad7e978eac089619642a8467b3a13
|
6067d10ccf61070ff2f7ec00068a4a138d835b48
|
/analyze_error_rate_book.py
|
fa1ec11f8ee79584a35f6f5e71fdd08a08b89484
|
[] |
no_license
|
Doreenruirui/OCR
|
aa3c7f3bc34a98951288ab6608e8e4c7373a2bda
|
693562e966e2b18b759aabeac23c068cf8c59a7c
|
refs/heads/master
| 2021-07-09T08:41:04.763718
| 2018-08-07T19:34:34
| 2018-08-07T19:34:34
| 91,371,468
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,416
|
py
|
import os
from os.path import join, exists
import numpy as np
import sys
from collections import OrderedDict
from plot_curve import plotBar, plot
# folder_data = '/Users/doreen/Documents/Experiment/dataset/OCR/'
folder_data = '/gss_gpfs_scratch/dong.r/Dataset/OCR'
#folder_data = '/home/rui/Dataset/OCR'
def merge_error_rate(cur_folder):
cur_folder = join(folder_data, cur_folder)
error = [[], [], []]
num_line = []
books = []
for line in file(join(cur_folder, 'book.man_wit.test.ec.txt')):
items = line.strip('\n').split('\t')
error[0].append(float(items[2]))
num_line.append(int(items[1]))
books.append(items[0])
for line in file(join(cur_folder, 'book.man_wit.test.single.ec.txt')):
items = line.strip('\n').split('\t')
error[1].append(float(items[2]))
for line in file(join(cur_folder, 'book.man_wit.test.avg.ec.txt')):
items = line.strip('\n').split('\t')
error[2].append(float(items[2]))
ngroup = len(books)
print 'AVG better than SINGLE:'
num_avg = 0
with open(join(cur_folder, 'error_rate_per_book.txt'), 'w') as f_:
for i in range(ngroup):
if error[2][i] < error[1][i]:
num_avg += 1
f_.write('\t'.join(map(str, [books[i], num_line[i], error[0][i], error[1][i], error[2][i]])) + '\n')
# print books[i], num_line[i], error[0][i], error[1][i], error[2][i]
# f_.write('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print num_avg
print 'AVG worse than SINGLE:'
num_avg = 0
for i in range(ngroup):
if error[2][i] > error[1][i]:
num_avg += 1
f_.write('\t'.join(map(str, [books[i], num_line[i], error[0][i], error[1][i], error[2][i]])) + '\n')
print num_avg
# stickers = books
# stickers = [ele for ele in range(len(books))]
# lenlabels = ['OCR', 'SINGLE', 'AVG']
# xlabel = 'Book Name'
# ylabel = 'Error Rate'
# title = 'Error Rate Per Book'
# figure_name = 'Results/Error_Rate_Per_Book.png'
# error = [error[0][:10], error[1][:10], error[2][:10]]
# plotBar(ngroup, error, stickers, lenlabels, xlabel, ylabel, title, figure_name, 0.2)
# plot(stickers, error, xlabel, ylabel, [0, 380], [0, 1], lenlabels, title, figure_name)
arg_folder = sys.argv[1]
merge_error_rate(arg_folder)
|
[
"ruiruidong1989@gmail.com"
] |
ruiruidong1989@gmail.com
|
fda84663dd476b25868d4899cb14568ead0f5dad
|
621a40fa363dc0c32c96a4c8fdfe9142877e2ff1
|
/ietf/mailtrigger/admin.py
|
e192a2066c255795a6d46e26d189225602b69f30
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
omunroe-com/ietfdb2
|
d9c40bebe4b25059f810c70dd1370cca30cb3c36
|
aeaae292fbd55aca1b6043227ec105e67d73367f
|
refs/heads/master
| 2020-04-04T21:05:56.067430
| 2018-11-05T09:08:27
| 2018-11-05T09:08:27
| 156,273,382
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
from django.contrib import admin
from ietf.mailtrigger.models import MailTrigger, Recipient
class RecipientAdmin(admin.ModelAdmin):
list_display = [ 'slug', 'desc', 'template', 'has_code', ]
def has_code(self, obj):
return hasattr(obj,'gather_%s'%obj.slug)
has_code.boolean = True
admin.site.register(Recipient, RecipientAdmin)
class MailTriggerAdmin(admin.ModelAdmin):
list_display = [ 'slug', 'desc', ]
filter_horizontal = [ 'to', 'cc', ]
admin.site.register(MailTrigger, MailTriggerAdmin)
|
[
"henrik@levkowetz.com"
] |
henrik@levkowetz.com
|
de4237a85539bc2bf65a12af93a1b4f75141497e
|
0e7aed5eef2e1d132a7e75dd8f439ae76c87639c
|
/python/863_All_Nodes_Distance_K_in_Binary_Tree.py
|
b048d616be21233247e4a98d9029d20749033190
|
[
"MIT"
] |
permissive
|
liaison/LeetCode
|
2a93df3b3ca46b34f922acdbc612a3bba2d34307
|
bf03743a3676ca9a8c107f92cf3858b6887d0308
|
refs/heads/master
| 2022-09-05T15:04:19.661298
| 2022-08-19T19:29:19
| 2022-08-19T19:29:19
| 52,914,957
| 17
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,950
|
py
|
"""
We are given a binary tree (with root node root), a target node, and an integer value K.
Return a list of the values of all nodes that have a distance K from the target node. The answer can be returned in any order.
The distance between a node and its child nodes is 1.
Input: root = [3,5,1,6,2,0,8,null,null,7,4], target = 5, K = 2
Output: [7,4,1]
@author: Lisong Guo <lisong.guo@me.com>
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildGraph(self, node, parent, graph):
if node is None:
return
if parent is not None:
graph[node].append(parent)
if node.left is not None:
graph[node].append(node.left)
self.buildGraph(node.left, node, graph)
if node.right is not None:
graph[node].append(node.right)
self.buildGraph(node.right, node, graph)
def distanceK(self, root, target, K):
"""
:type root: TreeNode
:type target: TreeNode
:type K: int
:rtype: List[int]
"""
from collections import defaultdict
# vetex: [parent, left, right]
graph = defaultdict(list)
# DFS to build graph
self.buildGraph(root, None, graph)
# BFS to retrieve the nodes with given distance
# Starting from the target node
q = [(target, 0)]
# keep the records, since the graph is all connected
visited = set()
# results
ans = []
while q:
node, distance = q.pop(0)
if node in visited:
continue
visited.add(node)
# we've reached the desired distance/radius
if K == distance:
ans.append(node.val)
# we haven't reached the desired distance, keep going
elif distance < K:
for child in graph[node]:
q.append((child, distance+1))
# exceed the desired distance
# No need to go further
return ans
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution2:
def buildParentMap(self, node, parent, parentMap):
if node is None:
return
parentMap[node] = parent
self.buildParentMap(node.left, node, parentMap)
self.buildParentMap(node.right, node, parentMap)
def distanceK(self, root, target, K):
"""
:type root: TreeNode
:type target: TreeNode
:type K: int
:rtype: List[int]
"""
# node: parent
parentMap = {}
# DFS to build the map that maps a node to its parent.
self.buildParentMap(root, None, parentMap)
# keep the records, since the graph is all connected
visited = set()
# results
ans = []
# Again, DFS to retrieve the nodes within the given distance
# this time with the help of the parentMap.
# Starting from the target node
def dfs(node, distance):
if node is None or node in visited:
return
visited.add(node)
if distance == K:
ans.append(node.val)
elif distance < K:
dfs(node.left, distance+1)
dfs(node.right, distance+1)
dfs(parentMap[node], distance+1)
# else exceed the scope, no need to explore further
dfs(target, 0)
return ans
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def distanceK(self, root: TreeNode, target: TreeNode, k: int) -> List[int]:
graph = defaultdict(list)
# build a non-directional graph, i.e. bi-directional graph
def build_graph(node):
nonlocal graph
if not node:
return
for next_node in [node.left, node.right]:
if next_node:
graph[node.val].append(next_node.val)
graph[next_node.val].append(node.val)
build_graph(next_node)
build_graph(root)
# run a BFS/DFS exploration
queue = [(target.val, 0)]
visited = set([target.val])
output = []
while queue:
curr, distance = queue.pop()
if distance == k:
output.append(curr)
elif distance < k:
for next_val in graph[curr]:
if next_val not in visited:
visited.add(next_val)
queue.append((next_val, distance+1))
return output
|
[
"lisong.guo@me.com"
] |
lisong.guo@me.com
|
c706f72bc0673621beb535e16fba9c2156cb3234
|
39f13506f0f55856639a77d8d9ff2832e980d577
|
/setup.py
|
e0fd11043af6a8d71832ec0ccfd4bc4f280fba30
|
[] |
no_license
|
HD60Hz-Open/winfspy
|
75e0fd24a6d7edfc00f07c6ecf82f0ad2d0759d3
|
8ea5d2c4f510337ac527eaa8982c3e7c6f4e08c3
|
refs/heads/master
| 2020-09-13T06:42:22.619496
| 2019-11-12T11:15:07
| 2019-11-12T11:15:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
#!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
os.chdir(os.path.dirname(sys.argv[0]) or ".")
# Awesome hack to load `__version__`
__version__ = None
exec(open("src/winfspy/_version.py", encoding="utf-8").read())
requirements = open("requirements.txt").read().split("\n")
setup(
name="winfspy",
version=__version__,
description="CFFI bindings for WinFSP",
long_description=open("README.rst", "rt").read(),
url="https://github.com/Scille/winfspy",
author="Emmanuel Leblond",
author_email="emmanuel.leblond@gmail.com",
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: BSD License",
],
package_dir={"": "src"},
packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]),
install_requires=requirements,
setup_requires=requirements,
cffi_modules=["./src/_cffi_src/build_bindings.py:ffibuilder"],
# for cffi
zip_safe=False,
)
|
[
"emmanuel.leblond@gmail.com"
] |
emmanuel.leblond@gmail.com
|
33e574f5532767f44ed9bc98132d94893ef78fff
|
17fe32a70be82d9fd6c3268b840226b5567c8b29
|
/pycox/__init__.py
|
d2de313f848c3a8d78f26de71902f3b2887cf0c3
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
georgehc/dksa
|
dbb7161a75b8206d3d469bb5b966ed7a0f84d86c
|
bcd9eab6c9ded47f5b166cf1351b06e26e0c8f90
|
refs/heads/master
| 2023-08-02T06:15:12.472386
| 2021-10-01T17:47:25
| 2021-10-01T17:47:25
| 282,355,975
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
# -*- coding: utf-8 -*-
"""Top-level package for pycox."""
__author__ = """Haavard Kvamme"""
__email__ = 'haavard.kvamme@gmail.com'
__version__ = '0.2.1'
import pycox.datasets
import pycox.evaluation
import pycox.preprocessing
import pycox.simulations
import pycox.utils
import pycox.models
|
[
"georgechen@cmu.edu"
] |
georgechen@cmu.edu
|
4ef736fdd3d0ba141bf8f68f9a6b5d5711963d17
|
0fcf4e4b1c61fad0829828fb0294dd5faceb3eaa
|
/app/app/settings.py
|
12b2893abccd49a2589c099d32292f5162f515e3
|
[
"MIT"
] |
permissive
|
MrRezoo/recipe-app-api
|
465127526b7d00fb3b454b18a6bdcf1009e57c83
|
6b4c236490f1dd0a6bcce644bc0ae0ffe376ab8d
|
refs/heads/main
| 2023-07-14T22:48:02.635837
| 2021-08-24T09:27:55
| 2021-08-24T09:27:55
| 395,381,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,593
|
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-u(fk8b8qy=v9-em+cgzg_7i6i6kq*_+%0ly))@k6w08)@965c9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Local apps
'core.apps.CoreConfig',
'user.apps.UserConfig',
'recipe.apps.RecipeConfig',
# Third party apps
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
|
[
"rezam578@gmail.com"
] |
rezam578@gmail.com
|
dcc2f0d21fd217cfec6a78e8d4135813fe46aa8c
|
b7f88c6e703358c6bb4072daf407b1969bccabac
|
/stiff/wordnet/base.py
|
b97e58725ff95f7cfd579de28fe244194dbbfd47
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
frankier/STIFF
|
edf1a34604991a9a23652073b321478e4809697d
|
c69060a1ba8ee36b660def9c5215c74bf5310e0c
|
refs/heads/master
| 2023-03-08T17:03:32.317399
| 2020-10-06T12:16:50
| 2020-10-06T12:16:50
| 133,658,142
| 2
| 1
|
Apache-2.0
| 2023-02-22T23:29:19
| 2018-05-16T11:51:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
from abc import ABC, abstractmethod
from typing import Dict, List, Tuple, Callable, Iterable
from nltk.corpus.reader import Lemma, Synset
from finntk.wordnet.utils import ss2pre
def default_mapper(synset_obj: Synset) -> str:
return ss2pre(synset_obj)
class ExtractableWordnet(ABC):
_synset_mappers: Dict[str, Callable[[Lemma], str]] = {}
@staticmethod
@abstractmethod
def lang() -> str:
pass
@staticmethod
@abstractmethod
def lemma_names() -> Dict[str, List[str]]:
pass
@classmethod
def synset_group_lemmas(
cls, wordnet_lemmas: Dict[str, List[Lemma]]
) -> Iterable[List[Tuple[str, Lemma]]]:
from .utils import synset_group_lemmas
return synset_group_lemmas(wordnet_lemmas, cls)
@classmethod
def canonical_synset_id(cls, wn: str, lemma_obj: Lemma) -> str:
return cls.canonical_synset_id_of_synset(wn, lemma_obj.synset())
@classmethod
def canonical_synset_id_of_synset(cls, wn: str, synset_obj: Synset) -> str:
return cls._synset_mappers.get(wn, default_mapper)(synset_obj)
|
[
"frankie@robertson.name"
] |
frankie@robertson.name
|
0a2badee9e4515c51818ced076629a9d87578423
|
41523dd4871e8ed1043d2b3ddf73417fcbdde209
|
/day05/中国国旗.py
|
1d68bd04ab6bbc59b915825fa15d66adfd4f3000
|
[] |
no_license
|
WayneChen1994/Python1805
|
2aa1c611f8902b8373b8c9a4e06354c25f8826d6
|
a168cd3b7749afc326ec4326db413378fd3677d5
|
refs/heads/master
| 2020-03-30T23:19:00.773288
| 2018-11-02T10:47:40
| 2018-11-02T10:47:40
| 151,697,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# author: Wayne.Chen
'''
使用turtle模块画一个中国国旗
'''
import turtle
turtle.begin_fill()
turtle.fillcolor("red")
for x in range(2):
turtle.forward(300)
turtle.left(90)
turtle.forward(200)
turtle.left(90)
turtle.end_fill()
turtle.up()
turtle.left(90)
turtle.forward(150)
turtle.right(90)
turtle.forward(20)
turtle.down()
turtle.begin_fill()
turtle.fillcolor("yellow")
for x in range(5):
turtle.forward(20)
turtle.left(72)
turtle.forward(20)
turtle.right(144)
turtle.end_fill()
turtle.up()
turtle.forward(60)
turtle.left(90)
turtle.forward(30)
turtle.right(144)
turtle.down()
turtle.begin_fill()
turtle.fillcolor("yellow")
for x in range(5):
turtle.forward(7)
turtle.left(72)
turtle.forward(7)
turtle.right(144)
turtle.end_fill()
turtle.up()
turtle.forward(25)
turtle.left(36)
turtle.down()
turtle.begin_fill()
turtle.fillcolor("yellow")
for x in range(5):
turtle.forward(7)
turtle.left(72)
turtle.forward(7)
turtle.right(144)
turtle.end_fill()
turtle.up()
turtle.right(90)
turtle.forward(20)
turtle.left(90)
turtle.down()
turtle.begin_fill()
turtle.fillcolor("yellow")
for x in range(5):
turtle.forward(7)
turtle.left(72)
turtle.forward(7)
turtle.right(144)
turtle.end_fill()
turtle.up()
turtle.right(90)
turtle.forward(10)
turtle.down()
turtle.begin_fill()
turtle.fillcolor("yellow")
for x in range(5):
turtle.forward(7)
turtle.left(72)
turtle.forward(7)
turtle.right(144)
turtle.end_fill()
turtle.hideturtle()
turtle.done()
|
[
"waynechen1994@163.com"
] |
waynechen1994@163.com
|
0111d066ee2b49d9fedaa8b7b93dce650989fde8
|
e32a75c44ef9c964bc5f97712c8e0e845ee3f6ca
|
/train_vqa_vqg_flt_cand_models.py
|
03a85bba83ea363002c5bfbe143b23c0809319e4
|
[] |
no_license
|
ankita-kalra/ivqa_belief_set
|
29c40ec4076433ac412728aea603e4e69ce530eb
|
6ebba50ff001e1af6695bb3f4d2643e7072ee153
|
refs/heads/master
| 2020-04-05T17:17:00.834303
| 2018-08-27T09:59:16
| 2018-08-27T09:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,109
|
py
|
from __future__ import division
import tensorflow as tf
import os
# from models.model_creater import get_model_creation_fn
from config import TrainConfig, ModelConfig
import training_util
from readers.vqa_naive_vqg_flt_cand_data_fetcher import AttentionDataReader as Reader
# from readers.semi_naive_data_fetcher import SemiReader as Reader
# from naive_ensemble_model import NaiveEnsembleModel as model_fn
from models.vqa_base import BaseModel as model_fn
tf.flags.DEFINE_string("model_type", "VQA-BaseNorm",
"Select a model to train.")
tf.flags.DEFINE_string("version", "v1",
"Dataset version used for training, v1 for VQA 1.0, v2 "
"for VQA 2.0.")
tf.flags.DEFINE_string("train_dir", "model/%s_%s_fltcand",
"Directory for saving and loading model checkpoints.")
tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.")
tf.flags.DEFINE_integer("log_every_n_steps", 10,
"Frequency at which loss and global step are logged.")
tf.flags.DEFINE_string("model_trainset", "kptrain",
"Which split is the model trained on")
tf.flags.DEFINE_boolean("use_var", True,
"Use variational VQA or VQA.")
FLAGS = tf.flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
def train():
_model_suffix = 'var_' if FLAGS.use_var else ''
model_config = ModelConfig()
training_config = TrainConfig()
# Get model
# model_fn = get_model_creation_fn(FLAGS.model_type)
# Create training directory.
train_dir = FLAGS.train_dir % (FLAGS.model_trainset, FLAGS.model_type)
do_counter_sampling = FLAGS.version == 'v2'
if not tf.gfile.IsDirectory(train_dir):
tf.logging.info("Creating training directory: %s", train_dir)
tf.gfile.MakeDirs(train_dir)
g = tf.Graph()
with g.as_default():
# Build the model.
model = model_fn(model_config,
phase='train')
model.build()
# Set up the learning rate
learning_rate = tf.constant(training_config.initial_learning_rate)
def _learning_rate_decay_fn(learn_rate, global_step):
return tf.train.exponential_decay(
learn_rate,
global_step,
decay_steps=training_config.decay_step,
decay_rate=training_config.decay_factor, staircase=False)
learning_rate_decay_fn = _learning_rate_decay_fn
train_op = tf.contrib.layers.optimize_loss(
loss=model.loss,
global_step=model.global_step,
learning_rate=learning_rate,
optimizer=training_config.optimizer,
clip_gradients=training_config.clip_gradients,
learning_rate_decay_fn=learning_rate_decay_fn)
# Set up the Saver for saving and restoring model checkpoints.
saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)
# setup summaries
summary_op = tf.summary.merge_all()
# create reader
model_name = os.path.split(train_dir)[1]
reader = Reader(batch_size=64,
subset=FLAGS.model_trainset,
model_name=model_name,
feat_type='res5c',
version=FLAGS.version,
counter_sampling=do_counter_sampling,
model_suffix=_model_suffix)
# reader = Reader(batch_size=64,
# known_set='kprestval',
# unknown_set='kptrain', # 'kptrain'
# un_ratio=1,
# hide_label=False)
# Run training.
training_util.train(
train_op,
train_dir,
log_every_n_steps=FLAGS.log_every_n_steps,
graph=g,
global_step=model.global_step,
number_of_steps=FLAGS.number_of_steps,
init_fn=model.init_fn,
saver=saver, reader=reader,
feed_fn=model.fill_feed_dict)
def main(_):
with tf.Graph().as_default():
train()
if __name__ == '__main__':
tf.app.run()
|
[
"liufeng@seu.edu.cn"
] |
liufeng@seu.edu.cn
|
10aca41decc98ad1133655ae582bd8a46dab90e5
|
660c4ba43d91999872953ec27c6a72673e3239a0
|
/anrg/test_cleaning.py
|
a2e4168afcf7e7f5cbeb6843274636eed8205397
|
[] |
no_license
|
zaxliu/howmuchrainii
|
4a50c93be82b15bdab47043a7f5bf17224f277b3
|
3804ba4c6b412aadd400ab793d4245d041338fba
|
refs/heads/master
| 2021-01-09T21:55:47.185470
| 2016-03-15T04:52:07
| 2016-03-15T04:52:07
| 53,915,578
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
from cleaning import TargetThresholdFilter, LogPlusOne
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
# # Check basic functionality
# # only works for pandas DataFrame and Series because we are modifying shape in-place
# X1 = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
# X2 = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
# y1 = pd.Series(np.array([1, 2, 3, 4]))
# y2 = pd.Series(np.array([1, 2, 3, 4]))
# ttf = TargetThresholdFilter(threshold=3)
# ttf.fit_transform(X1, y1)
# ttf.transform(X2, y2)
# print X1
# print y1
# print X2
# print y2
#
# # sklearn pipe compatability
# print "==================="
# X1 = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
# X2 = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
# y1 = pd.Series(np.array([1, 2, 3, 4]))
# y2 = pd.Series(np.array([1, 2, 3, 4]))
# steps = [('ttf', TargetThresholdFilter(threshold=1)), ('lr', LinearRegression())]
# pip = Pipeline(steps)
# pip.fit(X1, y1)
# print 'X1'
# print X1
# print 'y1'
# print y1
# print 'X2'
# print X2
# print 'predict2'
# print pip.predict(X2)
# log(1+y)
X1 = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y1 = pd.Series(np.array([1, 2, 3, 4]))
y2 = pd.Series(np.array([1, 2, 3, 4]))
lpo = LogPlusOne()
lpo.fit_transform(X1, y1)
print X1
print y1
print lpo.transform(X1)
print lpo.metric(y2, y1)
|
[
"liujingchu@gmail.com"
] |
liujingchu@gmail.com
|
f83f9b3a5f09f2fd6c191894b8d4e267df122003
|
8be217fe977aa0bcd9e375c75b0fb522f5bf0101
|
/univaluedbinarytree965.py
|
d9f96d60ffccf5ba0e578cdfe667701be77a564f
|
[] |
no_license
|
blueones/LeetcodePractices
|
c63a5e773bebea17e988e8bb4962e012d7d402ba
|
194375ba0c07e420f420aafec98aede2f9f5d8fa
|
refs/heads/master
| 2021-07-14T14:21:55.389334
| 2021-01-24T22:13:21
| 2021-01-24T22:13:21
| 230,814,709
| 0
| 1
| null | 2020-02-25T02:58:04
| 2019-12-29T23:18:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,585
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isUnivalTree(self, root: TreeNode) -> bool:
def dfs(node,value):
if node == None:
return True
if node.val == value:
return dfs(node.left,value) and dfs(node.right,value)
else:
return False
if root == None:
return True
else:
valueN = root.val
return dfs(root,valueN)
class Solution2:
#True recursion. compare if for everynode, node.val =node.left.value=node.right.val
def isUnivalTree(self,root):
if root == None:
return True
if root.right:
if root.right.val!= root.val:
return False
if root.left:
if root.left.val!=root.val:
return False
return self.isUnivalTree(root.left) and self.isUnivalTree(root.right)
class Solution3:
#BFS
def isUnivalTree(self,root):
def bfs(node,valueN):
queueList = [node]
while queueList != []:
currentN = queueList.pop(0)
if currentN != None:
if currentN.val != valueN:
return False
queueList.append(currentN.left)
queueList.append(currentN.right)
return True
if root == None:
return True
return bfs(root,root.val)
|
[
"yiq.shang@gmail.com"
] |
yiq.shang@gmail.com
|
068b03e6832ce421b83bd23c56f5f42c8e3c05c0
|
9a1f105ce6385633e7da47fb13eb2e8db66dbddb
|
/awswrangler/__metadata__.py
|
8ec6474ad28c7b7000ec80205d2f87d6c15e164d
|
[
"Apache-2.0"
] |
permissive
|
datacoral/aws-data-wrangler
|
c47e2d45f2e643b62479f6b0b8f5fdbd8367af9b
|
bb9eb52baf374c616289daa932dc855dcd384994
|
refs/heads/master
| 2021-05-27T04:27:23.700657
| 2020-05-18T13:45:30
| 2020-05-18T13:45:30
| 254,217,334
| 0
| 0
| null | 2020-04-08T22:45:28
| 2020-04-08T22:45:27
| null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
"""Metadata Module.
Source repository: https://github.com/awslabs/aws-data-wrangler
Documentation: https://aws-data-wrangler.readthedocs.io/
"""
__title__ = "awswrangler"
__description__ = "Pandas on AWS."
__version__ = "1.1.2"
__license__ = "Apache License 2.0"
|
[
"igorborgest@gmail.com"
] |
igorborgest@gmail.com
|
1de069266182493d06adf2a86d6e505eff242889
|
2b5dfacdb7389aefff64c67fac863e3f82d3723e
|
/source/tygame-sdk/src/tysdk/entity/paythird/paycattuyouweixin.py
|
049b97dab1b518acc7e1109f6d54bd64be7e7a9e
|
[] |
no_license
|
hi-noikiy/hall0
|
54ef76c715f7ac7fec4c9ca175817e12f60fbd6a
|
21ea94c5b048bc611fb1557ac0b6e3ef4fdbbc09
|
refs/heads/master
| 2020-04-08T21:58:55.239106
| 2018-01-15T14:58:32
| 2018-01-15T14:58:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
# -*- coding=utf-8 -*-
import copy
from tyframework.context import TyContext
class TuYouPayCatTuyouWeixin(object):
@classmethod
def charge_data(cls, chargeinfo):
try:
del chargeinfo['chargeType']
except:
pass
more_categories = TyContext.Configure.get_global_item_json('more_categories_tuyou_weixin')
charge_cats = copy.deepcopy(more_categories)
price = chargeinfo['chargeTotal']
if price > 500:
for i in xrange(len(charge_cats)):
if 'CAT_PHONECHARGE_CARD' == charge_cats[i]['category']:
del charge_cats[i]
break
for cat in charge_cats:
# cat['desc'] = ''
cat['summary'] = chargeinfo['diamondName']
if 'ali' in cat['paytype']:
cat['tag'] = 'TAG_CHAOZHI'
chargeinfo['chargeCategories'] = charge_cats
|
[
"cg@ibenxi.com"
] |
cg@ibenxi.com
|
8de1cdc5e429d2262b6e0aa7345e4f26efe3ec7e
|
ff68cde9ba7196dee310d8e0a62810cbaf285e08
|
/fresh_shop/user/views.py
|
d5adde24c627a4552ac824bd9fe75c80e537c6a1
|
[] |
no_license
|
guilinxians/fresh_shop
|
1fc3d269212652f229c51385ca654c94ce0c580e
|
8e6692c9737643bc7202ece9054a95cde88435ab
|
refs/heads/master
| 2020-04-17T21:41:46.670901
| 2019-01-22T09:27:37
| 2019-01-22T09:27:37
| 166,961,820
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,389
|
py
|
from django.contrib.auth.hashers import make_password
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import render
from user.forms import RegisterForm, LoginForm, AddressForm
from user.models import User, UserAddress
def register(request):
if request.method == 'GET':
return render(request, 'register.html')
if request.method == 'POST':
# 使用表单form做校验
form = RegisterForm(request.POST)
if form.is_valid():
# 账号不存在于数据库,密码和确认密码一致,邮箱格式正确
username = form.cleaned_data['user_name']
password = make_password(form.cleaned_data['pwd'])
email = form.cleaned_data['email']
User.objects.create(username=username,
password=password,
email=email)
return HttpResponseRedirect(reverse('user:login'))
else:
# 获取表单验证不通过的错误信息
errors = form.errors
return render(request, 'register.html', {'errors': errors})
def login(request):
if request.method == 'GET':
return render(request, 'login.html')
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
# 用户名存在,密码相同
username = form.cleaned_data['username']
user = User.objects.filter(username=username).first()
request.session['user_id'] = user.id
return HttpResponseRedirect(reverse('goods:index'))
else:
errors = form.errors
return render(request, 'login.html', {'errors': errors})
def logout(request):
if request.method == 'GET':
# 删掉session中的键值对user_id
del request.session['user_id']
# 删除商品信息
if request.session.get('goods'):
del request.session['goods']
return HttpResponseRedirect(reverse('goods:index'))
def user_site(request):
if request.method == 'GET':
user_id = request.session.get('user_id')
user_address = UserAddress.objects.filter(user_id=user_id)
activate = 'site'
return render(request, 'user_center_site.html', {'user_address':user_address, 'activate': activate})
if request.method == 'POST':
form = AddressForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
address = form.cleaned_data['address']
postcode = form.cleaned_data['postcode']
mobile = form.cleaned_data['mobile']
user_id = request.session.get('user_id')
UserAddress.objects.create(user_id=user_id,
address=address,
signer_name=username,
signer_mobile=mobile,
signer_postcode=postcode)
return HttpResponseRedirect(reverse('user:user_site'))
else:
errors = form.errors
return render(request, 'user_center_site.html', {'errors': errors})
def user_info(request):
if request.method == 'GET':
activate = 'info'
return render(request, 'user_center_info.html', {'activate':activate})
|
[
"you@example.com"
] |
you@example.com
|
9406135cec1ba7dadac85f8e9357dd6eec506674
|
2565b3edfee1dbacbe0f20440337641c99be3356
|
/czaSpider/dump/socket/client_for_test2.py
|
c22844fec9f64084b07f410d9e105e11fc23102f
|
[] |
no_license
|
hi-noikiy/ScrapyLearning
|
9f2cd3c7a1404ec06f4db9c958542b36abbeba09
|
1dd4c946f133d461dfe3fe21e31e5ba57adfd18a
|
refs/heads/master
| 2022-04-27T11:24:28.748495
| 2020-04-28T00:30:34
| 2020-04-28T00:30:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
import socket, json, time
HOST = '127.0.0.1' ##
PORT = 8022
tcpCliSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpCliSock.connect((HOST, PORT))
tcpCliSock.send(json.dumps({'cookie': 'test', 'user':'cza'}).encode())
while True:
print(tcpCliSock.recv(1024).decode())
# message = input('输入聊天内容')
# tcpCliSock.send(json.dumps({'state': 11, 'message': message, 'to': 'test'}).encode())
|
[
"czasg0.0"
] |
czasg0.0
|
a49cbab98aa7a3ff2cb68295ad3348109ef1e1bd
|
7a454567a3de8b9cc399c73320ac803284cbf3ca
|
/ch17/02_imag.py
|
645f4b643476631b8ff2af991aaca4aea0755a64
|
[] |
no_license
|
uberman4740/Practical-Deep-Reinforcement-Learning
|
86fb84fca52a0ad46d92cb681f95768689b99519
|
4e2b22ecb10485ddb12f910959ee32718cf9d124
|
refs/heads/master
| 2020-03-10T12:23:51.686942
| 2018-04-12T19:46:08
| 2018-04-12T19:46:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,525
|
py
|
#!/usr/bin/env python3
import os
import gym
import ptan
import argparse
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from lib import common, i2a
LEARNING_RATE = 5e-4
NUM_ENVS = 16
BATCH_SIZE = 64
SAVE_EVERY_BATCH = 1000
OBS_WEIGHT = 10.0
REWARD_WEIGHT = 1.0
def get_obs_diff(prev_obs, cur_obs):
prev = np.array(prev_obs)[-1]
cur = np.array(cur_obs)[-1]
prev = prev.astype(np.float32) / 255.0
cur = cur.astype(np.float32) / 255.0
return cur - prev
def iterate_batches(envs, net, cuda=False):
act_selector = ptan.actions.ProbabilityActionSelector()
mb_obs = np.zeros((BATCH_SIZE, ) + common.IMG_SHAPE, dtype=np.uint8)
mb_obs_next = np.zeros((BATCH_SIZE, ) + i2a.EM_OUT_SHAPE, dtype=np.float32)
mb_actions = np.zeros((BATCH_SIZE, ), dtype=np.int32)
mb_rewards = np.zeros((BATCH_SIZE, ), dtype=np.float32)
obs = [e.reset() for e in envs]
total_reward = [0.0] * NUM_ENVS
total_steps = [0] * NUM_ENVS
batch_idx = 0
done_rewards = []
done_steps = []
while True:
obs_v = ptan.agent.default_states_preprocessor(obs, cuda=cuda)
logits_v, values_v = net(obs_v)
probs_v = F.softmax(logits_v)
probs = probs_v.data.cpu().numpy()
actions = act_selector(probs)
for e_idx, e in enumerate(envs):
o, r, done, _ = e.step(actions[e_idx])
mb_obs[batch_idx] = obs[e_idx]
mb_obs_next[batch_idx] = get_obs_diff(obs[e_idx], o)
mb_actions[batch_idx] = actions[e_idx]
mb_rewards[batch_idx] = r
total_reward[e_idx] += r
total_steps[e_idx] += 1
batch_idx = (batch_idx + 1) % BATCH_SIZE
if batch_idx == 0:
yield mb_obs, mb_obs_next, mb_actions, mb_rewards, done_rewards, done_steps
done_rewards.clear()
done_steps.clear()
if done:
o = e.reset()
done_rewards.append(total_reward[e_idx])
done_steps.append(total_steps[e_idx])
total_reward[e_idx] = 0.0
total_steps[e_idx] = 0
obs[e_idx] = o
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
parser.add_argument("-n", "--name", required=True, help="Name of the run")
parser.add_argument("-m", "--model", required=True, help="File with model to load")
args = parser.parse_args()
saves_path = os.path.join("saves", "02_env_" + args.name)
os.makedirs(saves_path, exist_ok=True)
envs = [common.make_env() for _ in range(NUM_ENVS)]
writer = SummaryWriter(comment="-02_env_" + args.name)
net = common.AtariA2C(envs[0].observation_space.shape, envs[0].action_space.n)
net_em = i2a.EnvironmentModel(envs[0].observation_space.shape, envs[0].action_space.n)
net.load_state_dict(torch.load(args.model, map_location=lambda storage, loc: storage))
if args.cuda:
net.cuda()
net_em.cuda()
print(net_em)
optimizer = optim.Adam(net_em.parameters(), lr=LEARNING_RATE)
step_idx = 0
best_loss = np.inf
with ptan.common.utils.TBMeanTracker(writer, batch_size=100) as tb_tracker:
for mb_obs, mb_obs_next, mb_actions, mb_rewards, done_rewards, done_steps in iterate_batches(envs, net, cuda=args.cuda):
if len(done_rewards) > 0:
m_reward = np.mean(done_rewards)
m_steps = np.mean(done_steps)
print("%d: done %d episodes, mean reward=%.2f, steps=%.2f" % (
step_idx, len(done_rewards), m_reward, m_steps))
tb_tracker.track("total_reward", m_reward, step_idx)
tb_tracker.track("total_steps", m_steps, step_idx)
obs_v = Variable(torch.from_numpy(mb_obs))
obs_next_v = Variable(torch.from_numpy(mb_obs_next))
actions_t = torch.LongTensor(mb_actions.tolist())
rewards_v = Variable(torch.from_numpy(mb_rewards))
if args.cuda:
obs_v = obs_v.cuda()
actions_t = actions_t.cuda()
obs_next_v = obs_next_v.cuda()
rewards_v = rewards_v.cuda()
optimizer.zero_grad()
out_obs_next_v, out_reward_v = net_em(obs_v.float()/255, actions_t)
loss_obs_v = F.mse_loss(out_obs_next_v, obs_next_v)
loss_rew_v = F.mse_loss(out_reward_v, rewards_v)
loss_total_v = OBS_WEIGHT * loss_obs_v + REWARD_WEIGHT * loss_rew_v
loss_total_v.backward()
optimizer.step()
tb_tracker.track("loss_em_obs", loss_obs_v, step_idx)
tb_tracker.track("loss_em_reward", loss_rew_v, step_idx)
tb_tracker.track("loss_em_total", loss_total_v, step_idx)
loss = loss_total_v.data.cpu().numpy()
if loss < best_loss:
print("Best loss updated: %.4e -> %.4e" % (best_loss, loss))
best_loss = loss
fname = os.path.join(saves_path, "best_%.4e_%05d.dat" % (loss, step_idx))
torch.save(net_em.state_dict(), fname)
step_idx += 1
if step_idx % SAVE_EVERY_BATCH == 0:
fname = os.path.join(saves_path, "em_%05d_%.4e.dat" % (step_idx, loss))
torch.save(net_em.state_dict(), fname)
|
[
"max.lapan@gmail.com"
] |
max.lapan@gmail.com
|
b116d5975037b3274e9f1ee7859e7ef06fcd7eea
|
b501a5eae1018c1c26caa96793c6ee17865ebb2d
|
/data_persistence_and_exchange/sqlite3/sqlite3_argument_named.py
|
3a6f8243124bee44d70b39d7bcb3084eb7a03c3b
|
[] |
no_license
|
jincurry/standard_Library_Learn
|
12b02f9e86d31ca574bb6863aefc95d63cc558fc
|
6c7197f12747456e0f1f3efd09667682a2d1a567
|
refs/heads/master
| 2022-10-26T07:28:36.545847
| 2018-05-04T12:54:50
| 2018-05-04T12:54:50
| 125,447,397
| 0
| 1
| null | 2022-10-02T17:21:50
| 2018-03-16T01:32:50
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
import sqlite3
import sys
db_filename = 'todo.db'
project_name = sys.argv[1]
with sqlite3.connect(db_filename) as conn:
cursor = conn.cursor()
query = """
select id, priority, details, status, deadline from task
where project = :project_name
order by deadline, priority
"""
cursor.execute(query, {'project_name': project_name})
for row in cursor.fetchall():
task_id, priority, details, status, deadline = row
print('{:2d}[{:d}] {:<25}[{:<8}]({})'.format(
task_id, priority, details, status, deadline))
|
[
"jintao422516@gmail.com"
] |
jintao422516@gmail.com
|
49f1da2b299d4c6d36fdfab3fc635f8ea386801c
|
46ac0965941d06fde419a6f216db2a653a245dbd
|
/sdks/python/test/test_BillingResourceUsage.py
|
2c2f2c05a4b8f5ccc8c439600f882e25773cb0bd
|
[
"MIT",
"Unlicense"
] |
permissive
|
b3nab/appcenter-sdks
|
11f0bab00d020abb30ee951f7656a3d7ed783eac
|
bcc19c998b5f648a147f0d6a593dd0324e2ab1ea
|
refs/heads/master
| 2022-01-27T15:06:07.202852
| 2019-05-19T00:12:43
| 2019-05-19T00:12:43
| 187,386,747
| 0
| 3
|
MIT
| 2022-01-22T07:57:59
| 2019-05-18T17:29:21
|
Python
|
UTF-8
|
Python
| false
| false
| 958
|
py
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from BillingResourceUsage.clsBillingResourceUsage import BillingResourceUsage # noqa: E501
from appcenter_sdk.rest import ApiException
class TestBillingResourceUsage(unittest.TestCase):
"""BillingResourceUsage unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBillingResourceUsage(self):
"""Test BillingResourceUsage"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsBillingResourceUsage.BillingResourceUsage() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"b3nab@users.noreply.github.com"
] |
b3nab@users.noreply.github.com
|
8afca52e50ab53f3827973dcd8962b2d96ed9f4b
|
adf195dd2740bf38c50e4182d6c262518f5a71f2
|
/ugestor_dto/EmpleadoDTO.py
|
e97d6c55ad9431f52b2f7bd8d19c359eada3a682
|
[] |
no_license
|
jhonex1234/ugestor
|
665706a895d5895b6ab08347c63543c3f14614dc
|
1d2a8c04c2feb772426666ccf64c003f4014a76f
|
refs/heads/master
| 2020-07-10T18:36:56.378511
| 2019-09-05T04:12:10
| 2019-09-05T04:12:10
| 204,337,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# -*- coding: utf-8 -*-
from wtforms import Form
from wtforms import StringField, IntegerField
from wtforms import validators
from wtforms.fields.html5 import DateField
from wtforms.fields import SelectField
from wtforms import BooleanField
from wtforms.validators import DataRequired
from com_dao import ConnectionDB
def validateNotNUll(form, field):
if len(field.data) <= 0:
raise validators.ValidationError('El campo No debe ser Nulo')
class EmpleadoDTO(Form):
idpersona = IntegerField('', [validateNotNUll])
salario = IntegerField('', [validateNotNUll])
cargo = SelectField(label='Cargo', choices=[('Seleccione','Seleccione'),('Gerente','Gerente') ,('Auxiliar','Auxiliar'),('Contratista','contratista')])
fechaIngreso = DateField('Fecha de Registro', [])
|
[
"efnaranjo6@misena.edu.co"
] |
efnaranjo6@misena.edu.co
|
d600e630f8fd18cc349b5a1786e92e29c475071e
|
1b764845ceab76ab91d12a4a067cb49fa3296001
|
/pyfirstweek/第一课时/栈.py
|
f69a62e8bd29bd81bcb4a6ad8c7dbcaf49691dd9
|
[
"Apache-2.0"
] |
permissive
|
mychristopher/test
|
c5e11aef178d025d25d54afde4fb836a18001a23
|
9977d36bab3fcc47f0e1dd42bbf5a99b39112a2f
|
refs/heads/master
| 2023-07-31T14:58:22.303817
| 2020-09-05T04:26:07
| 2020-09-05T04:26:07
| 276,136,931
| 0
| 0
|
Apache-2.0
| 2023-07-14T16:39:16
| 2020-06-30T15:21:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 408
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#模拟栈结构
stack =[]
#压栈(向栈里存数据)
stack.append("A")
print(stack)
stack.append("b")
print(stack)
stack.append("c")
print(stack)
#出栈(在栈里取数据) 后进先出
res1 = stack.pop()
print("res1 = ",res1)
print(stack)
res2 = stack.pop()
print("res2 = ",res2)
print(stack)
res3 = stack.pop()
print("res3 = ",res3)
print(stack)
|
[
"1366254420@qq.com"
] |
1366254420@qq.com
|
5be919a211ae050b93ec8ce0dfcd2335ca02457a
|
4616331c3763ec13393f3b79dbddbb568f1c6008
|
/pattern/web/api.py
|
1ab50632fd8a5c3acf32a29bdfbf7927ac32ee3e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
owlguy/pattern
|
b6df40f2acb0a30e5a3956fc814481532d462ff9
|
d9ba1a0ed307402f4a09d495f35b5426c124e391
|
refs/heads/master
| 2021-01-18T08:56:56.258275
| 2013-06-20T22:17:17
| 2013-06-20T22:17:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
#--- API LICENSE CONFIGURATION -----------------------------------------------------------------------
# Default license keys used by pattern.web.SearchEngine to contact different API's.
# Google and Yahoo are paid services for which you need a personal license + payment method.
# The default Google license is for testing purposes (= 100 daily queries).
# Wikipedia, Twitter and Facebook are free.
# Bing, Flickr and ProductsWiki use licenses shared among all Pattern users.
license = {}
license["Google"] = \
"AIzaSyBxe9jC4WLr-Rry_5OUMOZ7PCsEyWpiU48"
license["Bing"] = \
"VnJEK4HTlntE3SyF58QLkUCLp/78tkYjV1Fl3J7lHa0="
license["Yahoo"] = \
("", "") # OAuth (key, secret)
license["Wikipedia"] = \
None
license["Twitter"] = (
"p7HUdPLlkKaqlPn6TzKkA", # OAuth (key, secret, token)
"R7I1LRuLY27EKjzulutov74lKB0FjqcI2DYRUmsu7DQ", (
"14898655-TE9dXQLrzrNd0Zwf4zhK7koR5Ahqt40Ftt35Y2qY",
"q1lSRDOguxQrfgeWWSJgnMHsO67bqTd5dTElBsyTM"))
license["Facebook"] = \
"332061826907464|jdHvL3lslFvN-s_sphK1ypCwNaY"
license["Flickr"] = \
"787081027f43b0412ba41142d4540480"
license["Products"] = \
"64819965ec784395a494a0d7ed0def32"
|
[
"tom@organisms.be"
] |
tom@organisms.be
|
c3f2f4fa4864008fc97bb05aa002d2582d844489
|
7037e16b5ee5f2cdff8f759d4ffcbed4cad3d3f5
|
/more-about-strings/isOnlyLikeMethods.py
|
e9224f71d2824d1adbbb26e28755451c069032da
|
[] |
no_license
|
michalkasiarz/automate-the-boring-stuff-with-python
|
1fe0d3af7c5e57746d2d37aa110a5f1bd45ecf30
|
8fdd4c6945f116c3889000f2ad7357cacdf6ed16
|
refs/heads/master
| 2021-05-17T00:19:31.454433
| 2020-04-03T12:38:12
| 2020-04-03T12:38:12
| 250,532,982
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
# Is only-like methods
import pyperclip
text = """
Then that knight, whom neither captivity nor wounds nor grief nor the
terrible Burdabut could bring down, was brought down by happiness. His
hands dropped at his side, great drops of sweat came out on his
forehead; he fell on his knees, covered his face with his hands, and
leaning his head against the wall of the ravine, remained in silence,
evidently thanking God."""
only_whitespace = " "
only_numbers = "21143"
only_text = "Hello"
title = "The Title Style"
# isalpha method
print(only_text.isalpha()) # True
print(text.isalpha()) # False
# isspace method
print(only_whitespace.isspace()) # True
print("The surely are some spaces".isspace()) # True
# isdecimal method
print(only_numbers.isdecimal()) # True, although it is a String
# istitle method
print(title.istitle()) # True
# isalnum method
print("razdwatrzy123".isalnum()) # True
# startswith and endswith methods
print("Hello World".startswith("Hello")) # True
print("Hello World".endswith("World")) # True
# join method
to_be_joined = ["cats", "dogs", "hot-dogs", "avengers"]
joined_text = ", ".join(to_be_joined)
print(joined_text)
# split method
print(text.split())
# ljust and rjust method
print("Hello".rjust(100, "-"))
print("Hello".ljust(100, "*"))
# center method
print("Hello".center(100, "="))
# strip, rstrip, lstrip methods
rjusted = "Hello".rjust(30, "/")
print(rjusted)
rjusted_stripped = rjusted.strip("/")
print(rjusted_stripped)
text_with_spaces = " Hello "
print(text_with_spaces)
print(text_with_spaces.strip())
# replace method
spam = "Hello there!"
spam = spam.replace("e", "XYZ")
print(spam)
# pyperclip module
pyperclip.copy(spam)
copied_stuff = pyperclip.paste()
print(copied_stuff)
|
[
"michal.kasiarz@post.com"
] |
michal.kasiarz@post.com
|
aef68596d8afec518ee50b95d443faae27762db6
|
6ed01f4503fc9de234a561c945adff7cf4b1c81b
|
/uconn/migration_tools.py
|
d42f9e5ff91b758c8a1d07ddb196b820ab3cb806
|
[] |
no_license
|
ostwald/python-lib
|
b851943c913a68424a05ce3c7b42878ff9519f68
|
9acd97ffaa2f57b3e9e632e1b75016549beb29e5
|
refs/heads/master
| 2021-10-28T06:33:34.156095
| 2021-10-21T23:54:49
| 2021-10-21T23:54:49
| 69,060,616
| 0
| 1
| null | 2018-06-21T16:05:30
| 2016-09-23T21:04:46
|
Roff
|
UTF-8
|
Python
| false
| false
| 4,503
|
py
|
"""
Collection config and Collection Record Mover
We are migrating all DLESE collections ("adn" and "dlese_anno") to NSDL.
The metadata and dcs_data records can be moved by hand.
This file contains tools to migrate the other collection components:
- collection config file
- collection record
NOTE: this module has nothing to do with the CollectionOfCollections
"""
import sys, os, time, shutil
from masterCollection import MasterCollection
from bscs.collection_tool import CollectionTool
from JloXml import DleseCollectRecord, XmlUtils
dowrites = 0
dlese_base = '/Users/ostwald/Desktop/DLESE_MIGRATION/DLESE' #DLESE
nsdl_base = '/Users/ostwald/Desktop/DLESE_MIGRATION/NSDL' # NSDL
dlese_records = os.path.join (dlese_base, 'records') #DLESE
nsdl_records = os.path.join (nsdl_base, 'records') #NSDL
dlese_collection_configs = os.path.join(dlese_base, 'dcs_conf/collections')
nsdl_collection_configs = os.path.join(nsdl_base, 'dcs_conf/collections')
def get_nsdl_collection_keys():
nsdl_collection_records = get_nsdl_collection_records()
return nsdl_collection_records.getValues('key')
def get_nsdl_collection_records():
"""
returns MasterCollection instance containing NSDL collection records
"""
nsdl_collect_dir = os.path.join (nsdl_records, 'dlese_collect', 'collect')
return MasterCollection(nsdl_collect_dir, DleseCollectRecord)
def findDleseCollectionRecord(field, value):
"""
returns first DleseCollectRecord having the specified value for specified field
"""
dlese_collect_dir = os.path.join (dlese_records, 'dlese_collect', 'collect')
for filename in filter (lambda x:x.endswith('xml'), os.listdir(dlese_collect_dir)):
path = os.path.join (dlese_collect_dir, filename)
rec = DleseCollectRecord (path=path)
if (rec.get(field) == value):
return rec
nsdl_keys = get_nsdl_collection_keys()
def copyDleseCollectionRecord(key):
"""
copies DLESE collection record for specified key
into NSDL collection records
"""
record = findDleseCollectionRecord ('key', key)
if not record:
raise KeyError, 'deleseCollectionRecord not found for %s' % key
#now we want to rename the record
record.setId(key)
# create the dest path in nsdl collections
nsdl_collect_dir = os.path.join (nsdl_records, 'dlese_collect', 'collect')
dest = os.path.join (nsdl_collect_dir, key+'.xml')
# check to see if file exists
if os.path.exists(dest):
raise KeyError, "nsdl collection record already exists for %s" % key
# check to see if collection key exists!!
if key in nsdl_keys:
raise KeyError, "nsdl key already exists for %s" % key
if dowrites:
record.write (path=dest)
print "wrote to", dest
else:
print 'Would have written record to %s' % dest
# print record
# for EACH collection
# find the collection record
# copy it into nsdl repo
# find collection config
# copy it into nsdl collection config
def findCollectionConfig (key):
"""
finds DLESE collection config for given key
"""
filename = key+'.xml'
path = os.path.join (dlese_collection_configs, filename)
if not os.path.exists(path):
raise KeyError, "dlese collection config not found for %s" % path
return path
def moveCollectionConfig(key):
"""
copies DLESE collection config for given key into NSDL collection configs
"""
filename = key+'.xml'
collection_config = findCollectionConfig (key)
newpath = os.path.join (nsdl_collection_configs, filename)
if os.path.exists(newpath):
raise KeyError, 'nsdl collection config already exists for %s' % key
if dowrites:
return shutil.copyfile (collection_config, newpath)
else:
print 'Would have copied %s to %s' % (filename, newpath)
# copy collection dir into dest rep
# copy collection dcs_data dir into dest rep
def testGet_nsdl_collection_keys():
for key in get_nsdl_collection_keys():
print '-', key
def testFindCollectionRecord():
foo = findDleseCollectionRecord('key', 'dcc')
if foo:
print foo
else:
print 'not found'
def main():
"""
for each collection key for adn and dlese_anno xmlFormats,
- copy the DLESE collection record to NSDL
- copy the DLESE collection config file to NSDL
"""
for xmlFormat in ['adn', 'dlese_anno']:
print '\n', xmlFormat
dlese_format_dir = os.path.join (dlese_records, xmlFormat)
for key in os.listdir(dlese_format_dir):
print '-', key
copyDleseCollectionRecord(key)
moveCollectionConfig(key)
if __name__ == '__main__':
# moveCollectionConfig ("dcc")
# copyDleseCollectionRecord("dcc")
# testGet_nsdl_collection_keys()
main()
|
[
"ostwald@ucar.edu"
] |
ostwald@ucar.edu
|
af49827c4a4802709049af532cd2171713a24035
|
2655d38647240d8698e2d8d92d81fdc474e6b8c7
|
/attention-analysis/naacl2019/poster_conll/samples/devow.py
|
bbf073bcc01b11c411c8128c35d660ce04695b90
|
[] |
no_license
|
ufal/lsd
|
2cc72b2e86c170967f33110030a8dd22e6e25591
|
1fe8d68c97b7efb6889fb1ca1fceec1c0cb139e8
|
refs/heads/master
| 2023-06-19T19:02:22.020684
| 2020-05-05T12:21:37
| 2020-05-05T12:21:37
| 127,886,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
#!/usr/bin/env python3
#coding: utf-8
import sys
from unidecode import unidecode
for line in sys.stdin:
line = line.strip()
forms = line.split(' ')
forms_devow = list()
for form in forms:
form = unidecode(form)
form = form.lower()
form = form.replace("a", "")
form = form.replace("e", "")
form = form.replace("i", "")
form = form.replace("o", "")
form = form.replace("u", "")
form = form.replace("y", "")
if form == "":
form = "_"
forms_devow.append(form)
print(*forms_devow, sep=' ')
|
[
"rosa@ufal.mff.cuni.cz"
] |
rosa@ufal.mff.cuni.cz
|
139590a0c54be5f68071cae5f2c2a249ccf0060e
|
3de2a746243ad1cb000994a06a0f9699db9a901f
|
/jsc2019b.py
|
89f0910336866197d45cfa3f3d1332e18c433413
|
[] |
no_license
|
takumi152/atcoder
|
71d726ffdf2542d8abac0d9817afaff911db7c6c
|
ebac94f1227974aa2e6bf372e18605518de46441
|
refs/heads/master
| 2022-10-30T12:14:41.742596
| 2022-09-29T19:49:32
| 2022-09-29T19:49:32
| 181,502,518
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
mod = 1000000007
def main():
n, k = map(int, input().split())
a = list(map(int, input().split()))
ans = 0
for i in range(n):
for j in range(n):
if a[i] > a[j]:
inv = 0
if (i < j):
inv = (((k + 1) * k) // 2) % mod
else:
inv = ((k * (k - 1)) // 2) % mod
ans = (ans + inv) % mod
print(ans)
if __name__ == '__main__':
main()
|
[
"takumi152@hotmail.com"
] |
takumi152@hotmail.com
|
fc392e3854daabe6445ca4420543b0deb2a18396
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/6DppMcokmzJ3TtNNB_18.py
|
4f78b679d2153a5d1ab0cf45e1470064d88fea3d
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
"""
Create a function which takes every letter in every word, and puts it in
alphabetical order. Note how the **original word lengths must stay the same**.
### Examples
true_alphabetic("hello world") ➞ "dehll loorw"
true_alphabetic("edabit is awesome") ➞ "aabdee ei imosstw"
true_alphabetic("have a nice day") ➞ "aaac d eehi nvy"
### Notes
* All sentences will be in lowercase.
* No punctuation or numbers will be included in the **Tests**.
"""
def true_alphabetic(txt):
s = ''
for x in txt:
if x != ' ':
s += x
s = sorted(s)
ans = ''
j = 0
for x in txt:
if x == ' ':
ans += x
else:
ans += s[j]
j += 1
return ans
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
a1a946620d9626abff33659e812cd8405867a69b
|
0fa55a3150ebda33cf252e1915b0d3a1fd9474b2
|
/setup.py
|
089382e864463f497a51304a51e2d89a68848fdc
|
[
"MIT"
] |
permissive
|
michalc/lowhaio-aws-sigv4
|
8dff5e58faceae4b892e8fa29f1331cd3378bbcf
|
c802126f2ce13cb88e7f695b86484637840fd464
|
refs/heads/master
| 2020-05-24T22:45:13.397252
| 2019-06-15T18:08:16
| 2019-06-15T18:08:16
| 187,502,903
| 0
| 0
|
MIT
| 2019-05-19T18:17:33
| 2019-05-19T16:44:34
|
Python
|
UTF-8
|
Python
| false
| false
| 828
|
py
|
import setuptools
def long_description():
with open('README.md', 'r') as file:
return file.read()
setuptools.setup(
name='lowhaio_aws_sigv4',
version='0.0.4',
author='Michal Charemza',
author_email='michal@charemza.name',
description='AWS Signature Version 4 signing for lowhaio',
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://github.com/michalc/lowhaio-aws-sigv4',
py_modules=[
'lowhaio_aws_sigv4',
],
python_requires='>=3.6.0',
test_suite='test',
tests_require=[
'lowhaio~=0.0.61',
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: AsyncIO',
],
)
|
[
"michal@charemza.name"
] |
michal@charemza.name
|
9bc366600d2d561b6f0b040a3d7c62a0d11fb15f
|
e8bf00dba3e81081adb37f53a0192bb0ea2ca309
|
/domains/explore/problems/training/problem274_EE.py
|
c63ddf6da5997ae0f79a3d1c91553002a27dcf11
|
[
"BSD-3-Clause"
] |
permissive
|
patras91/rae_release
|
1e6585ee34fe7dbb117b084df982ca8a8aed6795
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
refs/heads/master
| 2023-07-13T20:09:41.762982
| 2021-08-11T17:02:58
| 2021-08-11T17:02:58
| 394,797,515
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,694
|
py
|
__author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4']
rv.EDGES = {'base': {'z1': 20, 'z2': 50, 'z3': 20, 'z4': 50}, 'z1': {'base': 20, 'z2': 30, 'z4': 50}, 'z2': {'base': 50, 'z1': 30, 'z3': 30}, 'z3': {'base': 20, 'z2': 30, 'z4': 30}, 'z4': {'base': 50, 'z3': 30, 'z1': 50}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 50, 'r1': 80, 'r2': 50}
state.data = { 'UAV': 3, 'r1': 1, 'r2': 1}
state.pos = {'c1': 'base', 'e1': 'r2', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base'}
state.load = {'r1': NIL, 'r2': 'e1', 'UAV': NIL}
state.storm = {'active': False}
tasks = {
6: [['doActivities', 'UAV', [['survey', 'z2'], ['survey', 'z3'], ['survey', 'base']]]],
}
eventsEnv = {
}
|
[
"patras@umd.edu"
] |
patras@umd.edu
|
0de58022fd098cfc9447970c42cfb2c2a68d63d3
|
383a974b225b3d5decf311e6224a14f0e86a14c9
|
/affiliates/banners/models.py
|
00f4f8e7026897464b9c19b05c51285e76522a56
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
tub216/affiliates
|
d7b465d1dc9a5d3bcf041cf96741028e9c67625c
|
ffce6c42a6caf73bbedaca429ec0aa9ad70fc7e5
|
refs/heads/master
| 2020-05-29T11:06:33.555351
| 2014-02-18T21:06:24
| 2014-03-20T21:47:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,603
|
py
|
import hashlib
import os
from django.core.exceptions import ValidationError
from django.db import models
from django.template.loader import render_to_string
from mptt.models import MPTTModel, TreeForeignKey
from affiliates.banners import COLOR_CHOICES
from affiliates.links.models import Link
from affiliates.shared.models import LocaleField
class Category(MPTTModel):
"""
Category that groups together either subcategories or banners.
A category tree can only be 2 layers deep, including the roots. This
is only enforced by model validation, so site code could
theoretically create Categories that violate this rule, but in
practice the only place that Categories should be created is the
admin interface.
"""
name = models.CharField(max_length=255)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
class MPTTMeta:
order_insertion_by = ['name']
def clean(self):
"""
Validate that this category isn't more than one layer deep.
"""
if self.get_level() > 1:
raise ValidationError('Categories cannot be more than one level deep.')
class Banner(models.Model):
"""A type of banner that a user can generate links from."""
category = TreeForeignKey(Category)
name = models.CharField(max_length=255)
destination = models.URLField(max_length=255)
visible = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def generate_banner_code(self, *args, **kwargs):
"""
Generate the HTML that users will copy onto their website to
display this banner. Arguments will vary based on the subclass.
"""
raise NotImplementedError()
def create_link(self, user, *args, **kwargs):
"""
Create a Link based off of this banner. Extra arguments are
passed on to generate_banner_code.
"""
html = self.generate_banner_code(*args, **kwargs)
return Link(user=user, destination=self.destination, html=html)
class ImageBanner(Banner):
"""Banner displayed as an image link."""
def generate_banner_code(self, variation):
return render_to_string('banners/banner_code/image_banner.html', {
'href': self.destination,
'variation': variation
})
class ImageBannerVariation(models.Model):
"""
Variation of an image banner that a user can choose to use for their
link.
"""
banner = models.ForeignKey(ImageBanner)
color = models.CharField(max_length=32, choices=COLOR_CHOICES)
locale = LocaleField()
def _filename(self, filename):
props = '{id}_{width}_{height}_{color}_{locale}'.format(
id=self.banner_id,
width=self.image.width,
height=self.image.height,
color=self.color,
locale=self.locale
)
props_hash = hashlib.sha1(props).hexdigest()
extension = os.path.splitext(filename)[1]
return os.path.join('uploads/banners', props_hash + extension)
image = models.ImageField(upload_to=_filename, max_length=255)
class TextBanner(Banner):
"""
Banner displayed as a string of text with a link.
Text should use Python format syntax to include the link. For
example:
> Value privacy? <a href="{href}">Download Firefox!</a>
"""
text = models.TextField()
def generate_banner_code(self):
return self.text.format(href=self.destination)
|
[
"mkelly@mozilla.com"
] |
mkelly@mozilla.com
|
940914c8add27ca2a6dc0ce9414a4b1d69b2bdc8
|
ef10c3da3b15bfdec0d9b88de753ae3540f72120
|
/utils/inputs/segmentation.py
|
175d7a02a4b9885e9bb5d403134b39cc2dd884b4
|
[
"MIT"
] |
permissive
|
Qoboty/asr_preprocessing
|
3554456364b9ee751298b3378a1a109737c473d4
|
d9cfda36edd9155ef45c6eb9626c42d1ba105bfd
|
refs/heads/master
| 2021-07-06T18:08:59.336598
| 2017-09-26T08:30:53
| 2017-09-26T08:30:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,594
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Segment a htk file into each utterance."""
import numpy as np
from struct import unpack
from utils.inputs.wav2feature_python_speech_features import wav2feature as w2f_psf
from utils.inputs.wav2feature_librosa import wav2feature as w2f_librosa
def segment_htk(audio_path, speaker, utterance_dict, is_training,
sil_duration=0., tool='htk', config=None, mean=None,
dtype=np.float64):
"""Segment each HTK or WAV file into utterances. Normalization will not be
conducted here.
Args:
audio_path (string): path to a HTK or WAV file
speaker (string): speaker name
utterance_dict (dict): dictionary of utterance information
key (string) => utterance index
value (list) => [start_frame, end_frame, transcript (, transcript2)]
sil_duration (float): duration of silence at both ends. Default is 0.
tool (string): htk or python_speech_features or librosa
config (dict): a configuration for feature extraction
mean (np.ndarray): A mean vector over the file
dtype (optional): default is np.float64
Returns:
input_data_dict (dict):
key (string) => utt_index
value (np.ndarray )=> a feature vector of size
`(frame_num, feature_dim)`
input_data_utt_sum (np.ndarray): A sum of feature vectors of a speaker
mean (np.ndarray): A mean vector over the file
stddev (np.ndarray): A stddev vector over the file
total_frame_num_file (int): total frame num of the target speaker's utterances
"""
if tool != 'htk' and config is None:
raise ValueError('Set config dict.')
# Read the HTK or WAV file
if tool == 'htk':
input_data = read_htk(audio_path)
elif tool == 'python_speech_features':
input_data = w2f_psf(audio_path,
feature_type=config['feature_type'],
feature_dim=config['channels'],
use_energy=config['energy'],
use_delta1=config['delta'],
use_delta2=config['deltadelta'],
window=config['window'],
slide=config['slide'])
elif tool == 'librosa':
input_data = w2f_librosa(audio_path,
feature_type=config['feature_type'],
feature_dim=config['channels'],
use_energy=config['energy'],
use_delta1=config['delta'],
use_delta2=config['deltadelta'],
window=config['window'],
slide=config['slide'])
feature_dim = input_data.shape[1]
# Divide into each utterance
input_data_dict = {}
total_frame_num_file = 0
end_frame_pre = 0
utt_num = len(utterance_dict.keys())
utt_dict_sorted = sorted(utterance_dict.items(), key=lambda x: x[0])
input_data_utt_sum = np.zeros((feature_dim,), dtype=dtype)
stddev = np.zeros((feature_dim,), dtype=dtype)
for i, (utt_index, utt_info) in enumerate(utt_dict_sorted):
start_frame, end_frame = utt_info[0], utt_info[1]
# Check timestamp
if start_frame > end_frame:
print(utterance_dict)
print('Warning: time stamp is reversed.')
print('speaker index: %s' % speaker)
print('utterance index: %s & %s' %
(str(utt_index), utt_dict_sorted[i + 1][0]))
# Check the first utterance
if i == 0:
if start_frame >= sil_duration:
start_frame_extend = start_frame - sil_duration
else:
start_frame_extend = 0
start_frame_next = utt_dict_sorted[i + 1][1][0]
if end_frame > start_frame_next:
print('Warning: utterances are overlapping.')
print('speaker index: %s' % speaker)
print('utterance index: %s & %s' %
(str(utt_index), utt_dict_sorted[i + 1][0]))
if start_frame_next - end_frame >= sil_duration * 2:
end_frame_extend = end_frame + sil_duration
else:
end_frame_extend = end_frame + \
int((start_frame_next - end_frame) / 2)
# Check the last utterance
elif i == utt_num - 1:
if start_frame - end_frame_pre >= sil_duration * 2:
start_frame_extend = start_frame - sil_duration
else:
start_frame_extend = start_frame - \
int((start_frame - end_frame_pre) / 2)
if input_data.shape[0] - end_frame >= sil_duration:
end_frame_extend = end_frame + sil_duration
else:
end_frame_extend = input_data.shape[0] # last frame
# Check other utterances
else:
if start_frame - end_frame_pre >= sil_duration * 2:
start_frame_extend = start_frame - sil_duration
else:
start_frame_extend = start_frame - \
int((start_frame - end_frame_pre) / 2)
start_frame_next = utt_dict_sorted[i + 1][1][0]
if end_frame > start_frame_next:
print('Warning: utterances are overlapping.')
print('speaker: %s' % speaker)
print('utt index: %s & %s' %
(str(utt_index), utt_dict_sorted[i + 1][0]))
if start_frame_next - end_frame >= sil_duration * 2:
end_frame_extend = end_frame + sil_duration
else:
end_frame_extend = end_frame + \
int((start_frame_next - end_frame) / 2)
input_data_utt = input_data[start_frame_extend:end_frame_extend]
input_data_utt_sum += np.sum(input_data_utt, axis=0)
total_frame_num_file += (end_frame_extend - start_frame_extend)
input_data_dict[str(utt_index)] = input_data_utt
# For computing stddev over the file
if mean is not None:
stddev += np.sum(
np.abs(input_data_utt - mean) ** 2, axis=0)
# Update
end_frame_pre = end_frame
if is_training:
if mean is not None:
# Compute stddev over the file
stddev = np.sqrt(stddev / (total_frame_num_file - 1))
else:
# Compute mean over the file
mean = input_data_utt_sum / total_frame_num_file
stddev = None
else:
mean, stddev = None, None
return input_data_dict, input_data_utt_sum, mean, stddev, total_frame_num_file
def read_htk(audio_path):
"""Read each HTK file.
Args:
audio_path (string): path to a HTK file
Returns:
input_data (np.ndarray): A tensor of size (frame_num, feature_dim)
"""
with open(audio_path, "rb") as fh:
spam = fh.read(12)
frame_num, sampPeriod, sampSize, parmKind = unpack(">IIHH", spam)
# print(frame_num) # frame num
# print(sampPeriod) # 10ms
# print(sampSize) # feature dim * 4 (byte)
# print(parmKind)
veclen = int(sampSize / 4)
fh.seek(12, 0)
input_data = np.fromfile(fh, 'f')
# input_data = input_data.reshape(int(len(input_data) / veclen),
# veclen)
input_data = input_data.reshape(-1, veclen)
input_data.byteswap(True)
return input_data
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
edd7fe691c2cef36ba433f32c16a17394a35791b
|
254e35ed13abb5670eb664c1b17cb77d6b2d6289
|
/LeetCode/python/_486.PredicttheWinner.py
|
1d78412cd25a05e9eb784c866f68ac48259c106b
|
[] |
no_license
|
bobby20180331/Algorithms
|
475f7b29efcab829bc97b18a088600d406850fc7
|
c56967e292b34162438f86bfc4c76925329105dd
|
refs/heads/master
| 2023-04-23T04:36:26.977179
| 2021-02-04T06:47:41
| 2021-02-04T06:47:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,774
|
py
|
#利用dp..只能写到这一步了...
#16行有问题,因为这个并不是一直让你选的。A选了后,B选,B同样会最大化收益的去选择
#突然又有想法了,改了下,分别为求min和max,然后交替迭代即可
#但是还是没过[0,0,7,6,5,6,1],不知道怎么错了...
class Solution(object):
def minA(self,nums1):
if len(nums1)==3:
n0 = nums1[:]
n0.sort()
return n0[2]+min(nums1[0],nums1[2])
if len(nums1)==4:
n1 = nums1[1:]
n1.sort()
n2 = nums1[:-1]
n2.sort()
left2min = n1[1]
right2min =n2[1]
return min((nums1[0]+left2min),(nums1[0]+nums1[2]),(nums1[-1]+right2min),(nums1[-1]+nums1[1]))
minASum = min((nums1[0]+self.maxA(nums1[1:])),(nums1[-1]+self.maxA(nums1[:-1])))
return minASum
def maxA(self,nums2):
if len(nums2)==3:
n0 = nums2[:]
n0.sort()
return n0[0]+max(nums2[0],nums2[2])
if len(nums2)==4:
n1 = nums2[1:]
n1.sort()
n2 = nums2[:-1]
n2.sort()
left2max = n1[1]
right2max =n2[1]
return max((nums2[0]+left2max),(nums2[0]+nums2[2]),(nums2[-1]+right2max),(nums2[-1]+nums2[1]))
maxASum = max((nums2[0]+self.minA(nums2[1:])),(nums2[-1]+self.minA(nums2[:-1])))
return maxASum
def PredictTheWinner(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
#思路:利用DP,先求出A能得出的最大的值,再比较B的值从而判断。
if len(nums)<3:
return True
sumA = self.maxA(nums)
if sumA >sum(nums)-sumA:
return True
else:
return False
#AC版本
class Solution(object):
def PredictTheWinner(self, nums):
def check(left, right, memo): #用两端索引来迭代,便于存入memo并比较
if left > right:
return 0
if left == right: #左右索引相等时停止
return nums[left]
if not (left, right) in memo:
ss = sum(nums[left: right + 1])
l, r = ss - check(left + 1, right, memo) + nums[left], ss - check(left, right - 1, memo) + nums[right]
#因为两人交替选,所以这里迭代时用总和减去选择某端的最大值(相当于对方选的)
memo[(left, right)] = max(l, r) #保存一堆索引号能取得的最大值
return memo[(left, right)]
s = sum(nums)
c1 = check(0, len(nums) - 1, {})
return c1 >= s - c1#不用写if直接写式子就可以返回布尔值了
|
[
"noreply@github.com"
] |
bobby20180331.noreply@github.com
|
746cd2a1f61ed05f5602945dce3850c3f41e5e4a
|
f0b741f24ccf8bfe9bd1950425d83b6291d21b10
|
/components/google-cloud/google_cloud_pipeline_components/v1/dataproc/__init__.py
|
c23660af80759e169b3e288ffbefe38ceaaff513
|
[
"Apache-2.0"
] |
permissive
|
kubeflow/pipelines
|
e678342b8a325559dec0a6e1e484c525fdcc8ce8
|
3fb199658f68e7debf4906d9ce32a9a307e39243
|
refs/heads/master
| 2023-09-04T11:54:56.449867
| 2023-09-01T19:07:33
| 2023-09-01T19:12:27
| 133,100,880
| 3,434
| 1,675
|
Apache-2.0
| 2023-09-14T20:19:06
| 2018-05-12T00:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
# Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create [Google Cloud Dataproc](https://cloud.google.com/dataproc) jobs from within Vertex AI Pipelines."""
from google_cloud_pipeline_components.v1.dataproc.create_pyspark_batch.component import dataproc_create_pyspark_batch as DataprocPySparkBatchOp
from google_cloud_pipeline_components.v1.dataproc.create_spark_batch.component import dataproc_create_spark_batch as DataprocSparkBatchOp
from google_cloud_pipeline_components.v1.dataproc.create_spark_r_batch.component import dataproc_create_spark_r_batch as DataprocSparkRBatchOp
from google_cloud_pipeline_components.v1.dataproc.create_spark_sql_batch.component import dataproc_create_spark_sql_batch as DataprocSparkSqlBatchOp
__all__ = [
'DataprocPySparkBatchOp',
'DataprocSparkBatchOp',
'DataprocSparkRBatchOp',
'DataprocSparkSqlBatchOp',
]
|
[
"nobody@google.com"
] |
nobody@google.com
|
2c593f0c394889b99375a489bc508ced79208a18
|
f56e4bb2d3a91b068292d698388ac5e82a40f078
|
/inkshop/apps/utils/backends.py
|
cda1f8bb15efed905ac26e68ae9fc2365ccc9452
|
[] |
no_license
|
inkandfeet/inkshop
|
979064eb902c86dc95a6399e79ac753efbe547d1
|
691187b3eb4435782f8054e6404f1203e7d0c383
|
refs/heads/master
| 2022-12-13T01:26:02.361970
| 2021-11-18T23:01:50
| 2021-11-18T23:01:50
| 175,481,726
| 1
| 1
| null | 2022-12-08T04:59:16
| 2019-03-13T18:59:17
|
Python
|
UTF-8
|
Python
| false
| false
| 957
|
py
|
from django import forms
from clubhouse.models import StaffMember
from people.models import Person
from utils.encryption import lookup_hash, encrypt, decrypt, create_unique_hashid
class EncryptedEmailBackend(object):
def authenticate(self, request, username=None, password=None):
try:
try:
user = StaffMember.objects.get(hashed_email=lookup_hash(username))
if user.check_password(password):
return user
except StaffMember.DoesNotExist:
user = Person.objects.get(hashed_email=lookup_hash(username))
if user.check_password(password):
return user
except:
pass
return None
def get_user(self, user_id):
try:
return StaffMember.objects.get(pk=user_id)
except StaffMember.DoesNotExist:
return Person.objects.get(pk=user_id)
return None
|
[
"steven@quantumimagery.com"
] |
steven@quantumimagery.com
|
dc12fddc12f7b8c7565e0765a8b65e59f29aae67
|
6e57aed6e283e155a7db4c55edd9c07d5640af40
|
/astetik/plots/overlap.py
|
57ca0a188eb84fe9e149bca87586e52dbe9fa17d
|
[
"MIT"
] |
permissive
|
meirm/astetik
|
78c3c52e7f4a0fd2521fe1b123dfff30bae46a99
|
ea05ce57a0bf1e8bd7ef18c4d5ca8d7ad3fb4be7
|
refs/heads/master
| 2022-12-12T06:18:36.229953
| 2020-08-30T15:31:43
| 2020-08-30T15:31:43
| 291,494,532
| 0
| 0
|
MIT
| 2020-08-30T15:13:00
| 2020-08-30T15:12:59
| null |
UTF-8
|
Python
| false
| false
| 5,049
|
py
|
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from ..style.titles import _titles
from ..style.template import _header
from ..utils.transform import _groupby
from ..style.formats import _thousand_sep
def overlap(data,
x,
y,
label_col,
sort=None,
limit=None,
transform_func=False,
palette='default',
style='astetik',
dpi=72,
title='',
sub_title='',
x_label='',
y_label='',
legend=True,
x_scale='linear',
y_scale='linear',
x_limit=None,
y_limit=None,
save=False):
'''OVERLAP BAR PLOT
Useful for the cases where you have a categorical
feature, and then you want to compare two overlapping
continuous features (e.g. all days and rainy days) with
each other per category. Each category will have its own
bar, where the 'x' and 'y' features will be overlapping.
Inputs: 3
Features: 2 continuous and 1 categorical
NOTE: 'y' should be a subset of 'x'.
1. USE
======
ast.overlap(data=patients,
x='hospital_days',
y='icu_days',
label_col='insurance',
sort=False,
transform=True,
transform_func='sum',
palette='colorblind')
2. PARAMETERS
=============
2.1 INPUT PARAMETERS
--------------------
data :: pandas dataframe
x :: x-axis data (continuous)
y :: x-axis overlap data (continuous)
label_col :: the column with the label values
--------------------
2.2. PLOT PARAMETERS
--------------------
sort :: either True or False for ascending sort based on the
x-axis data.
limit :: limit the number of items to be shown
transform_func :: If not False, the selected function such as
'mean' will be used to group by the label_col.
Available functions:
- 'median'
- 'mean'
- 'first'
- 'last',
- 'std',
- 'mode',
- 'max',
- 'min',
- 'sum',
- 'random'
----------------------
2.3. COMMON PARAMETERS
----------------------
palette :: One of the hand-crafted palettes:
'default'
'colorblind'
'blue_to_red'
'blue_to_green'
'red_to_green'
'green_to_red'
'violet_to_blue'
'brown_to_green'
'green_to_marine'
Or use any cmap, seaborn or matplotlib
color or palette code, or hex value.
style :: Use one of the three core styles:
'astetik' # white
'538' # grey
'solarized' # sepia
Or alternatively use any matplotlib or seaborn
style definition.
dpi :: the resolution of the plot (int value)
title :: the title of the plot (string value)
sub_title :: a secondary title to be shown below the title
x_label :: string value for x-axis label
y_label :: string value for y-axis label
x_scale :: 'linear' or 'log' or 'symlog'
y_scale :: 'linear' or 'log' or 'symlog'
x_limit :: int or list with two ints
y_limit :: int or list with two ints
outliers :: Remove outliers using either 'zscore' or 'iqr'
'''
if transform_func != False:
data = _groupby(data, label_col, transform_func)
if sort != None:
data = data.sort_values(x, ascending=sort)
fig_height = len(data[label_col].unique()) * 0.6
p, ax = plt.subplots(figsize=(6, fig_height))
# HEADER STARTS >>>
palette = _header(palette,
style,
n_colors=2,
dpi=dpi,
fig_height=None,
fig_width=None)
# <<< HEADER ENDS
# # # # PLOT STARTS # # # #
sns.barplot(data=data,
x=x,
y=label_col,
orient='h',
color=palette[0])
sns.barplot(data=data,
x=y,
y=label_col,
orient='h',
color=palette[1])
# # # # PLOT ENDS # # # #
if legend != False:
x_patch = mpatches.Patch(color=palette[0], label=x)
y_patch = mpatches.Patch(color=palette[1], label=y)
ax.legend(handles=[x_patch, y_patch], ncol=1, loc="upper right", frameon=True)
ax.set(ylabel=y_label, xlabel=x_label)
sns.despine(bottom=True)
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
_thousand_sep(p, ax, data, x, y)
if len(title) + len(sub_title) < 0:
_titles(title, sub_title=sub_title)
|
[
"mailme@mikkokotila.com"
] |
mailme@mikkokotila.com
|
c07ce663afb1257e042f97cbbb71ec660097c870
|
d85d2cc40f074ab22905f23279aca4e6ebcd017c
|
/service/reduction_service.py
|
dcf1c1e234a9a6ab258f8e271df7a93a3385a6b6
|
[] |
no_license
|
bprajwal1/ML3-UnsupervisedLearning
|
c870bc6fce73701f9eee282b0cbc41ee312948ab
|
f2421395425836e4f8f4f2f9c25b100b3e352b80
|
refs/heads/master
| 2020-04-30T14:45:16.509608
| 2019-01-19T22:03:31
| 2019-01-19T22:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
from sklearn.decomposition import PCA, FastICA
from sklearn.random_projection import GaussianRandomProjection
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
def reduce_train_test_split(reduction_algo, feature_data_train, feature_data_text, labels_train, n_components):
reduction_model = build_reduction_model(reduction_algo, n_components)
x_train_reduced = reduction_model.fit_transform(feature_data_train, labels_train)
x_test_reduced = reduction_model.transform(feature_data_text)
return x_train_reduced, x_test_reduced
def reduce(reduction_algo, data_to_reduce, labels, n_components):
reduction_model = build_reduction_model(reduction_algo, n_components)
# transform stuff, but don't transform the ownership of this file, which is Boyko Todorov's
x_train_reduced = reduction_model.fit_transform(data_to_reduce, labels)
return x_train_reduced
def build_reduction_model(reduction_algo, n_components):
reduction_model = None
if reduction_algo == 'PCA':
reduction_model = PCA(n_components=n_components, whiten=True)
elif reduction_algo == 'ICA':
reduction_model = FastICA(n_components=n_components, whiten=True)
elif reduction_algo == 'RCA':
reduction_model = GaussianRandomProjection(n_components=n_components)
elif reduction_algo == 'LDA':
reduction_model = LinearDiscriminantAnalysis(n_components=n_components)
return reduction_model
|
[
"boyko11@gmail.com"
] |
boyko11@gmail.com
|
27fbafb603bc55974240015ac7068d73f3830b7a
|
67b7e6d2c08f08403ec086c510622be48b8d26d8
|
/src/test/tinc/tincrepo/mpp/gpdb/tests/queries/basic/cursors/test_cursors.py
|
09b0d6eeda1c7838486e2d6139bbe7739c73c3b8
|
[
"Apache-2.0",
"PostgreSQL",
"LicenseRef-scancode-rsa-md4",
"OLDAP-2.8",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"BSD-3-Clause",
"Zlib",
"LicenseRef-scancode-zeusbench",
"LicenseRef-scancode-mit-modification-obligations",
"OpenSSL",
"MIT",
"LicenseRef-scancode-other-copyleft",
"bzip2-1.0.6",
"NTP",
"W3C",
"metamail",
"Beerware",
"RSA-MD",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-stream-benchmark",
"LicenseRef-scancode-openssl",
"X11-distribute-modifications-variant",
"LicenseRef-scancode-pcre",
"LicenseRef-scancode-ssleay-windows",
"Spencer-94",
"ISC",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause",
"Python-2.0",
"curl",
"LicenseRef-scancode-sun-bcl-sdk-5.0",
"MIT-CMU",
"W3C-19980720"
] |
permissive
|
sshyran/gpdb
|
41012411d22b0294204dfb0fe67a1f4c8d1ecaf6
|
2d065ecdd2b5535cb42474f17a0ee6592b4e6837
|
refs/heads/master
| 2023-04-09T14:05:44.030212
| 2016-11-12T08:33:33
| 2016-11-12T08:34:36
| 73,544,159
| 0
| 0
|
Apache-2.0
| 2023-04-04T00:30:10
| 2016-11-12T09:43:54
|
PLpgSQL
|
UTF-8
|
Python
| false
| false
| 2,317
|
py
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from mpp.models import MPPTestCase
from mpp.lib.PSQL import PSQL
import pygresql.pg
from struct import *
import os
class CursorTests(MPPTestCase):
def test_mpp24119(self):
"""
@description postgres process crashed when running cursor with hold
@product_version gpdb: [4.2.8.1-4.2.99.99], [4.3.3.0-]
"""
start_time = time.time()
conn = pygresql.pg.connect()
conn.query("drop table if exists mpp24119")
conn.query("create table mpp24119(i int, j int)")
conn.query("insert into mpp24119 select i , i from generate_series(1, 10) i")
conn.query("begin; declare c cursor with hold for select * from mpp24119;")
conn.query("commit")
# The crash happens when exec_execute_message is triggered after a tx with cursor
# with hold is committed. Since there was no way to trigger this, we send a protocol
# message directly to the socket on which the connection is established
sockno = conn.fileno()
msg = pack('!sbi', 'c', 0, 0)
l = len(msg) + 4
res = os.write(sockno, pack('!c', 'E'))
res = os.write(sockno, pack('!i', l))
res = os.write(sockno, msg)
format_start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(start_time))
output = PSQL.run_sql_command("SELECT logmessage FROM gp_toolkit.gp_log_system WHERE " + \
"logtime >= '%s' and logdatabase is null and logseverity = 'PANIC'" %(format_start_time), flags= '-q -t')
self.assertFalse(output.strip())
conn.close()
|
[
"jyih@pivotal.io"
] |
jyih@pivotal.io
|
d31c776481fd364a69f142b802d07b602b554709
|
6a77b42871d1996b9037e3be4fea31436315c8ab
|
/ex010.py
|
3b6f1fbb97495408980a0eb09e2484890909cee1
|
[] |
no_license
|
RaphaelMolina/Curso_em_video_Python3
|
4e1e299681695db69a0439107b32ae90cdea32f5
|
f22100865d7137bc420b677f118c848d963ee90a
|
refs/heads/master
| 2022-01-10T09:48:25.777665
| 2022-01-06T16:23:52
| 2022-01-06T16:23:52
| 244,028,670
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
limpar = str('\033[m')
texto = {'vermelho': '\033[31m',
'roxo': '\033[35m',
'verde2': '\033[36m'}
e = str('{} Exercício 10 {}'.format(texto['vermelho'], texto['verde2']))
print('{}{:=^30}{}\n'.format(texto['verde2'], e, limpar))
real = float(input('{}Informe o valor que você tem: R${}'.format(texto['roxo'], limpar)))
dolar = float(real / 3.27)
print('Você tem {}R${:.2f}{}, e com esse valor você pode comprar {}US${:.2f}{}.'.format(texto['vermelho'], real, limpar,
texto['verde2'], dolar, limpar))
|
[
"manson1307@gmail.com"
] |
manson1307@gmail.com
|
595c56fcc014bb1788d24e282c081e6312b22474
|
6f05f7d5a67b6bb87956a22b988067ec772ba966
|
/data/train/python/e75506567100fd23fb9cfac67ee2e5a327bd5106svnsync.py
|
e75506567100fd23fb9cfac67ee2e5a327bd5106
|
[
"MIT"
] |
permissive
|
harshp8l/deep-learning-lang-detection
|
93b6d24a38081597c610ecf9b1f3b92c7d669be5
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
refs/heads/master
| 2020-04-07T18:07:00.697994
| 2018-11-29T23:21:23
| 2018-11-29T23:21:23
| 158,597,498
| 0
| 0
|
MIT
| 2018-11-21T19:36:42
| 2018-11-21T19:36:41
| null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
from django.core import management
from django.utils.translation import ugettext as _
from cobra.core.loading import get_model
Repository = get_model('svnkit', 'Repository')
class Command(management.BaseCommand):
help = _('Get repository changes')
args = _('<repository repository ...>')
def handle(self, *args, **options):
if args:
try:
rlist = map(
lambda r: Repository.objects.get(label=r), args)
except Repository.DoesNotExist, error:
raise management.CommandError(error)
else:
rlist = Repository.objects.all()
for r in rlist:
print _('Syncing %(label)s...') % {'label': r.label}
r.sync()
|
[
"aliostad+github@gmail.com"
] |
aliostad+github@gmail.com
|
15f042bfdb833b3208d5ce43dd07ced63845ca7d
|
e2df44c68460fb29084ba0ed6f3266d8802f6cb1
|
/catkin_ws/build/sweep-ros/catkin_generated/pkg.installspace.context.pc.py
|
3a4d51f308fd00dd98a43315afe661999fdaabaf
|
[] |
no_license
|
harshilpatel312/TSRT10
|
879bfcbb882d6bc32111f3145f3637c1df8e2b55
|
be8d3a90c75a6655fb82392a43c75135686a7eb8
|
refs/heads/master
| 2020-03-22T15:03:35.347270
| 2017-12-14T08:22:26
| 2017-12-14T08:22:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "sweep_ros"
PROJECT_SPACE_DIR = "/home/fregu856/TSRT10/catkin_ws/install"
PROJECT_VERSION = "0.2.0"
|
[
"fregu856@student.liu.se"
] |
fregu856@student.liu.se
|
e0a1077fbd17413ed11b60a19920e236e5011c79
|
b63142e8540cb30bb0c663332e29a4112721073e
|
/991_bulb_switcher.py
|
b0edfd386263155bb770ccc464f73ea73b088d90
|
[] |
no_license
|
HaydenInEdinburgh/LintCode
|
025bb2f0d75686097061de324c0fd292536dbb14
|
dbeae2bf631e57667d1415164d452d5ca2df7447
|
refs/heads/master
| 2023-08-18T19:52:54.561623
| 2021-10-06T21:46:50
| 2021-10-06T21:46:50
| 370,733,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
import math
class Solution:
"""
@param n: a Integer
@return: how many bulbs are on after n rounds
"""
def bulbSwitch(self, n):
# Write your code here
return int(math.sqrt(n))
|
[
"bony960323@gmail.com"
] |
bony960323@gmail.com
|
26e22b6839aef35f5e82a48054dc5008d70a29de
|
82a63effc9886c749b349bb0672238dc4a68e66d
|
/2019/007_Pixel_3a_low_level_checker/blog_img/blog_sample.py
|
f051aede8dc403684662dc5bc6450cd5aad8e342
|
[
"BSD-3-Clause"
] |
permissive
|
toru-ver4/sample_code
|
6250f4bf7b437b7ee394cbc198d48fd2de209c3f
|
58e9925af645e110e4a4a306001ba73599d0b192
|
refs/heads/develop
| 2023-07-22T00:52:48.568631
| 2023-07-15T07:03:00
| 2023-07-15T07:03:00
| 201,731,599
| 32
| 4
|
BSD-3-Clause
| 2023-09-10T14:46:41
| 2019-08-11T07:19:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,453
|
py
|
import numpy as np
from colour import xyY_to_XYZ, XYZ_to_RGB, RGB_to_XYZ, XYZ_to_xyY
from colour.models import BT709_COLOURSPACE
# BT.709色域外の xyY データ。Y は Green の Primariy に合わせた。
xyY_bt2020 = np.array([[0.26666249, 0.47998497, 0.67799807],
[0.25055208, 0.5328208, 0.67799807],
[0.23444166, 0.58565664, 0.67799807],
[0.21833125, 0.63849248, 0.67799807],
[0.20222083, 0.69132832, 0.67799807],
[0.18611042, 0.74416416, 0.67799807],
[0.17, 0.797, 0.67799807]])
d65 = np.array([0.3127, 0.3290])
if __name__ == '__main__':
# とりあえず XYZ する
large_xyz_bt2020 = xyY_to_XYZ(xyY_bt2020)
# BT.2020 --> BT.709 のRGB値へ変換
rgb_linear_bt709 = XYZ_to_RGB(
XYZ=large_xyz_bt2020, illuminant_XYZ=d65, illuminant_RGB=d65,
XYZ_to_RGB_matrix=BT709_COLOURSPACE.XYZ_to_RGB_matrix)
# BT.709 の式域内にクリッピング
print(rgb_linear_bt709)
rgb_linear_bt709_clipped = np.clip(rgb_linear_bt709, 0.0, 1.0)
# xyY に変換して最終出力する
large_xyz_bt709_clipped = RGB_to_XYZ(
RGB=rgb_linear_bt709_clipped, illuminant_RGB=d65, illuminant_XYZ=d65,
RGB_to_XYZ_matrix=BT709_COLOURSPACE.RGB_to_XYZ_matrix)
xyY_bt709_clipped = XYZ_to_xyY(large_xyz_bt709_clipped)
print(xyY_bt709_clipped)
|
[
"toru.ver.11@gmail.com"
] |
toru.ver.11@gmail.com
|
c15e6f6228c343e9f14a01e55ca845ac24416893
|
c30ea40903626c46d0ffdbd18fa29caf15b60633
|
/setup.py
|
b11b9328ca3779182467af8051f79b2e085f5cb8
|
[
"MIT"
] |
permissive
|
zcutlip/WithTimer
|
d5c36d3e25d0abd0002ed23cc26cd060b42690e5
|
f35e0c20656d782ceb03b0839f24c58441a2d4f2
|
refs/heads/master
| 2020-06-26T17:16:07.998239
| 2019-10-02T23:04:53
| 2019-10-02T23:04:53
| 199,696,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
from __future__ import absolute_import
from setuptools import setup
about = {}
with open("withtimer/__about__.py") as fp:
exec(fp.read(), about)
with open("README.md", "r") as fp:
long_description = fp.read()
setup(name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
license="MIT",
author="Zachary Cutlip",
long_description_content_type="text/markdown",
url="https://github.com/zcutlip/withtimer",
packages=['withtimer'],
python_requires='>=2.7',
install_requires=[],
package_data={'withtimer': ['config/*']},
)
|
[
"uid000@gmail.com"
] |
uid000@gmail.com
|
c4bae031a577fcea75547d1ba262bda935942fca
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Anscombe/trend_LinearTrend/cycle_0/ar_/test_artificial_128_Anscombe_LinearTrend_0__100.py
|
973499cfb993888a51e2d2621daa4fe908a9f9dd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 266
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 0, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
8b5efe98fef0e5252e6c998598fe9ecaf19a706e
|
651a296c8f45b5799781fd78a6b5329effe702a0
|
/polpak/eulerian.py
|
3ef0e9fa8c13e3e673239e4e69f7e2443d391722
|
[] |
no_license
|
pdhhiep/Computation_using_Python
|
095d14370fe1a01a192d7e44fcc81a52655f652b
|
407ed29fddc267950e9860b8bbd1e038f0387c97
|
refs/heads/master
| 2021-05-29T12:35:12.630232
| 2015-06-27T01:05:17
| 2015-06-27T01:05:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
#!/usr/bin/env python
#
def eulerian ( n ):
#*****************************************************************************80
#
## EULERIAN computes the Eulerian number E(N,K).
#
# Definition:
#
# A run in a permutation is a sequence of consecutive ascending values.
#
# E(N,K) is the number of permutations of N objects which contain
# exactly K runs.
#
# Examples:
#
# N = 7
#
# 1 0 0 0 0 0 0
# 1 1 0 0 0 0 0
# 1 4 1 0 0 0 0
# 1 11 11 1 0 0 0
# 1 26 66 26 1 0 0
# 1 57 302 302 57 1 0
# 1 120 1191 2416 1191 120 1
#
# Recursion:
#
# E(N,K) = K * E(N-1,K) + (N-K+1) * E(N-1,K-1).
#
# Properties:
#
# E(N,1) = E(N,N) = 1.
# E(N,K) = 0 if K <= 0 or N < K.
# sum ( 1 <= K <= N ) E(N,K) = N!.
# X^N = sum ( 0 <= K <= N ) COMB(X+K-1, N ) E(N,K)
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 04 February 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Dennis Stanton and Dennis White,
# Constructive Combinatorics,
# Springer Verlag, 1986
#
# Parameters:
#
# Input, integer N, the number of rows desired.
#
# Output, integer E(N,N), the first N rows of Eulerian numbers.
#
import numpy as np
e = np.zeros ( ( n, n ) )
#
# Construct rows 1, 2, ..., N of the Eulerian triangle.
#
e[0,0] = 1
for j in range ( 1, n ):
e[0,j] = 0
for i in range ( 1, n ):
e[i,0] = 1
for j in range ( 1, n ):
e[i,j] = ( j + 1 ) * e[i-1,j] + ( i - j + 1 ) * e[i-1,j-1]
return e
def eulerian_test ( ):
#*****************************************************************************80
#
## EULERIAN_TEST tests EULERIAN.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 04 February 2015
#
# Author:
#
# John Burkardt
#
print ''
print 'EULERIAN_TEST'
print ' EULERIAN computes Eulerian numbers.'
n = 7
e = eulerian ( n )
for i in range ( 0, n ):
for j in range ( 0, n ):
print ' %4d' % ( e[i,j] ),
print ''
print ''
print 'EULERIAN_TEST'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
eulerian_test ( )
timestamp ( )
|
[
"siplukabir@gmail.com"
] |
siplukabir@gmail.com
|
f088c65f4fa33a034de7b94bd162a19cf66086a6
|
18bf8cc986763d381c35ae2a38560cce655020b1
|
/week10/01-Vehicle-Repair-Manager/example/database_layer/database.py
|
766313fd3c4a3e42afb3d74a228069ff65644fd0
|
[] |
no_license
|
markomatke/Programming101-Python-2018
|
3d2e467bec283469a372a55d66a911e231ca780c
|
9e3181a98f8df2e2a0801faca04d4e41acb9b6f4
|
refs/heads/master
| 2020-04-08T11:50:36.277683
| 2018-06-13T13:50:21
| 2018-06-13T13:50:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
import sqlite3
class Column:
def __init__(self, name, column_type, is_nullable=False,
is_fk=False, ref_table=None):
self.name = name
self.column_type = column_type
self.is_nullable = is_nullable
self.is_fk = is_fk
self.ref_table = ref_table
@property
def nullable(self):
return 'NOT NULL' if not self.is_nullable else ''
@property
def foreign_key(self):
if not self.is_fk:
return ''
return ",\n FOREIGN KEY ({name}) REFERENCES {ref_table}(ID)".format(
name=self.name,
ref_table=self.ref_table
)
@property
def to_sql_string(self):
return "{name} {column_type} {nullable} {foreign_key}".format(
name=self.name,
column_type=self.column_type,
nullable=self.nullable,
foreign_key=self.foreign_key
)
class Database:
def __init__(self, db_name):
self.db_name = db_name
@classmethod
def create_table(cls, db_name, table_name, columns):
CREATE_TABLE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} (
ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
{columns}
)
""".format(table_name=table_name,
columns=",\n ".join([c.to_sql_string for c in columns]))
db = sqlite3.connect(db_name)
crs = db.cursor()
crs.execute(CREATE_TABLE_SQL)
db.commit()
db.close()
if __name__ == '__main__':
DB_NAME = 'vehicle_system2.db'
db = Database(DB_NAME)
db.create_table(db_name=DB_NAME,
table_name='base_user',
columns=[
Column('user_name', 'text'),
Column('email', 'text'),
Column('phone_number', 'text'),
Column('address', 'text', True)
])
|
[
"ross_zz@mail.bg"
] |
ross_zz@mail.bg
|
9367da03a729c28a42db9948dbd97391e306a02b
|
6e6103826c37d26ffefdebee1fcd5b4a580c8b5e
|
/정리/20180206/urllib function 2.py
|
8c53e80aebcc13997a9975103941c1df75c26a22
|
[] |
no_license
|
ljg95924/multicampus_bigdata
|
34023a1bb121aa0f96910805bd70d7206534c738
|
75178c65fd2000dd1181860f5f7166c7635a5715
|
refs/heads/master
| 2021-05-09T05:34:54.657655
| 2018-02-26T08:00:35
| 2018-02-26T08:00:35
| 119,315,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
import os
print(os.path.split("C:\\python36\\NEWS.txt"))
#파일과 폴더의 경로를 구분해주는 함수
join_1=os.path.split("C:\\python36\\news.txt")
print(join_1)
print(os.path.join(join_1[0],join_1[1]))
#파일 이름과 폴더 이름을 합쳐주는 함수
print(os.path.dirname("C:\\python36\\news.txt"))
print(os.path.basename("C:\\python36\\news.txt"))
|
[
"ljg95924@naver.com"
] |
ljg95924@naver.com
|
921e4f26abedf4bc46920425a197ff9ff3695829
|
e7e53dfb57f1ad027c70c82a6d5bfc2bff7fe381
|
/vis_imagine_static_voxels/lib/modules/kld.py
|
03919ed0593994178677af5314e509af48396f92
|
[
"Apache-2.0"
] |
permissive
|
standardgalactic/EmbLang
|
555855dcfc858d856159997c5425bd5099a67761
|
169b0468ccda554896973bcc226afb3e762a70e7
|
refs/heads/master
| 2023-07-09T16:46:01.900556
| 2021-07-24T11:53:36
| 2021-07-24T11:53:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
import tensorflow as tf
import numpy as np
from keras import backend as K
class KLD(tf.keras.Model):
def __init__(self):
super(KLD, self).__init__()
@tf.function
def call(self, mu, var):
kld = 0.5 * K.sum(K.exp(var) + K.square(mu) - 1. - var)
return kld
|
[
"mihirp1998.mp@gmail.com"
] |
mihirp1998.mp@gmail.com
|
a7d6092aab9b8be864eedc389fddfad246976898
|
b84ff2535544ffe11b81332a99fd6b1c73a9a04b
|
/01/taokecheng/work2.py
|
f07d5f956bd897ab03e554265c2a2c9a039eafca
|
[] |
no_license
|
imsilence/actual_05_homework_mage
|
ea9170fbae181c524c606e87a86882735c6741a1
|
66c7b581c5f396083318d451c8e066b04569ba32
|
refs/heads/master
| 2021-05-11T09:24:17.095398
| 2018-01-19T04:05:46
| 2018-01-19T04:05:46
| 118,074,930
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
#!/bin/evn python
#encoding:utf-8
import random
random_num = random.randint(0,100)
sum = 0
print('游戏开始,有5次机会猜数字...')
guest_num = input('请输入你猜测的数字(0-100): ')
while True:
if len(guest_num.strip()) == 0 or not guest_num.strip().isdigit():
print('-'*40)
guest_num = input('错误! 只能输入数字,请重新输入,范围是(0-100):')
continue
else:
sum += 1
guest_num = int(guest_num)
if guest_num < random_num:
print('\t猜小了!你还有 ',5-sum,' 次机会!')
elif guest_num > random_num:
print('\t猜大了!你还有 ',5-sum,' 次机会!')
elif guest_num == random_num:
print('\t猜对了,程序结束')
break
if sum == 5:
print('-' * 40)
print('5 次机会已经用完,游戏结束!')
break
else:
guest_num = input('请输入你猜测的数字(0-100): ')
'''
功能ok, 继续加油
'''
|
[
"imsilence@outlook.com"
] |
imsilence@outlook.com
|
20c6867b8db3f9a1eb5100ddb0bec47224a0c8fb
|
285e244795836b9ab725383f34bac5f1d1fd08e7
|
/Competitive Coding/DCL2015A.py
|
32747d852a762a10fc53358621496e38c1f2a394
|
[] |
no_license
|
Swarajk7/Programs
|
2ef5d98d9d1fddefcde6147af7e9404f229f759f
|
9fb7224058c6d20963d3562a8bc16124dc2e7742
|
refs/heads/master
| 2020-05-16T08:41:50.251925
| 2017-05-19T11:29:41
| 2017-05-19T11:29:41
| 32,454,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
val=10005
primes=[True]*val
primes[0]=primes[1]=-1
for i in range(2,val):
if not primes[i]: continue
j=i+i
while j<val:
primes[j]=False
j+=i
def get(i):
if i>='a': return ord(i)-ord('a')
else: return ord(i)-ord('A')+26
t=int(input())
for i in range(t):
#print i,
a=raw_input()
hsh=[0]*52
for i in a:
hsh[get(i)]+=1
z=max(hsh)
ans=0
for ii in hsh:
#print ii,primes[ii],primes[z]
if primes[z]!=-1 and primes[ii]==primes[z]:
ans+=ii-(ii/2)
else: ans+=ii
print ans
|
[
"swarajk7@gmail.com"
] |
swarajk7@gmail.com
|
daefeef00873041839c6900ac98b2dfb959857c0
|
779af350dd09ac5fa685d1d94e9576240d5c4fdf
|
/env_var.py
|
4b424fa10df92cc96d7fe8986a70e19ef1abe350
|
[] |
no_license
|
RaenonX-DL/dragalia-site-back
|
543227245f37174eaff88adb46609cad7e34a00d
|
d15a4821c8249ec2ab80ca6f1ce91066e4f93b48
|
refs/heads/main
| 2023-04-30T04:02:18.447944
| 2021-05-11T21:52:04
| 2021-05-11T21:52:04
| 305,549,653
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
"""Convenient functions to extract information from the environment variables."""
import os
__all__ = ("is_testing",)
def is_testing() -> bool:
"""
Check if the environment variable ``TEST`` has been set to ``1`` to indicate it's testing.
:return: if the environment variables indicates it's testing
"""
return bool(int(os.environ.get("TEST", 0)))
|
[
"raenonx0710@gmail.com"
] |
raenonx0710@gmail.com
|
b0436ffaab15913c99d36cd1257f032d228d8b8a
|
d8c1f119d1349dd8ad2e48619a8c258967cd9a31
|
/PS_vsCode/1080. 행렬.py
|
1d3b82b7881ffb630ce862db8db6ce0f3e3d3600
|
[] |
no_license
|
Seonghyeony/DataStructure-Algorithm
|
c7c006ee705b68fc4d2d04dc6baaf0aeb80fc83e
|
4121289cafd0050bda408934fcb14d88052c956f
|
refs/heads/master
| 2023-04-12T16:30:17.039109
| 2021-05-08T10:31:05
| 2021-05-08T10:31:05
| 286,371,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
N, M = map(int, input().split())
def input_str():
return [list(map(int, list(input()))) for _ in range(N)]
A, B = input_str(), input_str()
def flip(x, y, A):
for i in range(3):
for j in range(3):
A[x+i][y+j] ^= 1
ans = 0
for i in range(N-2):
for j in range(M-2):
if A[i][j] != B[i][j]:
flip(i, j, A)
ans += 1
print(ans if A == B else -1)
|
[
"sunghyun7949@naver.com"
] |
sunghyun7949@naver.com
|
f3e2c539d1e61d68557b3c7e88d64a39aeae697a
|
b3b066a566618f49ae83c81e963543a9b956a00a
|
/Introduction to Data Visualization with Matplotlib/04_Sharing visualizations with others/02_Switching between styles.py
|
bf6b64d0240f4478453438edd343b540448f1297
|
[] |
no_license
|
ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020
|
666c4129c3f0b5d759b511529a365dfd36c12f1a
|
f3d20b788c8ef766e7c86c817e6c2ef7b69520b8
|
refs/heads/master
| 2022-12-22T21:09:13.955273
| 2020-09-30T01:16:05
| 2020-09-30T01:16:05
| 289,991,534
| 2
| 0
| null | 2020-08-24T17:15:43
| 2020-08-24T17:15:42
| null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
"""Switching between styles
Selecting a style to use affects all of the visualizations that are created after this style is selected.
Here, you will practice plotting data in two different styles. The data you will use is the same weather data we used in the first lesson: you will have available to you the DataFrame seattle_weather and the DataFrame austin_weather, both with records of the average temperature in every month.
Instructions 1/2
50 XP
1
Select the 'ggplot' style, create a new Figure called fig, and a new Axes object called ax with plt.subplots."""
# Use the "ggplot" style and create new Figure/Axes
plt.style.use('ggplot')
fig, ax = plt.subplots()
ax.plot(seattle_weather["MONTH"], seattle_weather["MLY-TAVG-NORMAL"])
plt.show()
"""Select the 'Solarize_Light2' style, create a new Figure called fig, and a new Axes object called ax with plt.subplots."""
# Use the "Solarize_Light2" style and create new Figure/Axes
plt.style.use('Solarize_Light2')
fig, ax = plt.subplots()
ax.plot(austin_weather["MONTH"], austin_weather["MLY-TAVG-NORMAL"])
plt.show()
"""DEVELOPER"""
"""BasitAminBhatti"""
"""Github"""
"""https://github.com/basitaminbhatti"""
|
[
"Your-Email"
] |
Your-Email
|
6ac3c62e2a4226a5f65d4d9dfc0d061c2efcf72d
|
700bbd29624158c86402933b43f863e794e0ab7e
|
/register/migrations/0008_auto_20180218_2047.py
|
eb49eb56e7d02d34db8ffbabb2d1227a826e7dc1
|
[] |
no_license
|
gitanjali1077/volunteer
|
ab0391ab3319c88b5ecd40e6ff480ae15d30fdf4
|
bd9dbd056624b6ebde3c46c8640b0b5786f0665f
|
refs/heads/master
| 2021-04-27T08:16:22.889332
| 2018-02-23T17:29:38
| 2018-02-23T17:29:38
| 122,651,909
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-02-18 15:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0007_auto_20180218_1221'),
]
operations = [
migrations.AlterField(
model_name='managers',
name='username',
field=models.CharField(max_length=60, unique=True),
),
]
|
[
"gitanjali1077@gmail.com"
] |
gitanjali1077@gmail.com
|
183f84f9f7d40335c152e532c4984ba51ff94712
|
45fdc51cf264bbd50e59655440eefc91451c50ea
|
/data_compression/gzip_write.py
|
f3b6aff0f49da10c94b226d6b5f66cb97e318bb9
|
[] |
no_license
|
blindij/python3_stl
|
2163043f3a9113eac21a48a35685a4a01987e926
|
ea138e25f8b5bbf7d8f78e4b1b7e2ae413de4735
|
refs/heads/master
| 2021-12-24T20:37:54.055116
| 2021-09-29T13:37:38
| 2021-09-29T13:37:38
| 191,508,648
| 0
| 0
| null | 2019-08-27T15:45:53
| 2019-06-12T06:10:30
|
Python
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
import gzip
import io
import os
outfilename = 'example.txt.gz'
with gzip.open(outfilename,'wb') as output:
with io.TextIOWrapper(output, encoding='utf-8') as enc:
enc.write('Contents of the example file go here.\n')
print(outfilename,'contains',os.stat(outfilename).st_size, 'bytes')
os.system('file -b --mime {}'.format(outfilename))
|
[
"blindij@users.noreply.github.com"
] |
blindij@users.noreply.github.com
|
6f10dd392a12325ac3735eafb29f3c589fdb6589
|
b106753e5abe4a72f669b4c25f1770fa2a2b2eb8
|
/students/views/demo.py
|
b974a5fd30e6a79fb223444d10a2ca6429d50e4c
|
[] |
no_license
|
ofisser86/last_chance
|
1f614634b93ca2267c31679f052b0b32e9699738
|
a808cc7b632ea4545e8a5628fec02a5bb1bc6ac1
|
refs/heads/master
| 2021-03-27T20:28:03.970568
| 2017-11-18T16:31:10
| 2017-11-18T16:31:10
| 100,600,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.shortcuts import render
def demo(request):
return render(request, 'students/demo.html', {})
|
[
"ofisser86@gmail.com"
] |
ofisser86@gmail.com
|
f461f4dbc105464039c3e003fd8a2673ea8b1da6
|
9aabbb426a3382017e99f9199e23bd0dd0133734
|
/natlas-server/migrations/versions/997bbd9a505a_.py
|
77fe1fc4b5b945c73ed8c1d4e6a177f01f5ef31f
|
[
"Apache-2.0"
] |
permissive
|
thesubtlety/natlas
|
71702da863a8086ce4a7b21a91d143dfab96b869
|
c7e6afc44244c5e0bb6dcadb3d11a45bd445540d
|
refs/heads/master
| 2020-07-24T12:16:42.352028
| 2019-10-08T21:37:38
| 2019-10-08T21:37:38
| 207,923,055
| 0
| 0
|
Apache-2.0
| 2019-09-11T23:16:54
| 2019-09-11T23:16:53
| null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
"""empty message
Revision ID: 997bbd9a505a
Revises: aeb6c660a13a
Create Date: 2018-07-17 13:21:47.150960
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '997bbd9a505a'
down_revision = 'aeb6c660a13a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_scope_item_blacklist'), 'scope_item', ['blacklist'], unique=False)
op.create_index(op.f('ix_scope_item_target'), 'scope_item', ['target'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_scope_item_target'), table_name='scope_item')
op.drop_index(op.f('ix_scope_item_blacklist'), table_name='scope_item')
# ### end Alembic commands ###
|
[
"0xdade@users.noreply.github.com"
] |
0xdade@users.noreply.github.com
|
849cac2c6ffc592edc45a4f17cc1627b144adc08
|
d21071464bef4f3fd51e554f280418d06975a77e
|
/leetcode/187 Repeated DNA Sequence.py
|
2b569f7f04327396e11419362f64688b1bd117ae
|
[] |
no_license
|
DeshErBojhaa/sports_programming
|
ec106dcc24e96231d447cdcac494d76a94868b2d
|
96e086d4ee6169c0f83fff3819f38f32b8f17c98
|
refs/heads/master
| 2021-06-13T19:43:40.782021
| 2021-03-27T14:21:49
| 2021-03-27T14:21:49
| 164,201,394
| 1
| 0
| null | 2019-08-27T22:21:26
| 2019-01-05T09:39:41
|
C++
|
UTF-8
|
Python
| false
| false
| 814
|
py
|
from collections import defaultdict
class Solution:
def findRepeatedDnaSequences(self, s: str) -> List[str]:
if len(s) < 11:
return []
ss = s
s = [ord(c)-64 for c in s]
ans = set()
alphabates = 4
mp = 4 ** 9
_hash = 0
for i in range(10):
_hash = (alphabates * _hash + s[i])
all_hash = defaultdict(set)
all_hash[_hash].add(ss[:10])
for i in range(10, len(s)): # Go from 11th char to the last char
_hash = (alphabates * (_hash - s[i-10] * mp) + s[i] )
cur_str = ss[i-9:i+1]
if _hash in all_hash:
if cur_str in all_hash[_hash]:
ans.add(cur_str)
all_hash[_hash].add(cur_str)
return list(ans)
|
[
"noreply@github.com"
] |
DeshErBojhaa.noreply@github.com
|
7ccd59634356c252bf6755d8585b8d418f3c4fe8
|
4eeb40dcc265caf4a2b84bc90a28d481930d6a8a
|
/templatelistviewsproject/sampleproj/settings.py
|
844c3cb74d437a2b1c1d20edbd32f7641cccf8be
|
[] |
no_license
|
mprasu/Sample-Projects
|
eb7fc46e81b09d7c97c238047e3c93b6fff3fb8d
|
7363baf630900ab2babb4af2afe77911d8a548b2
|
refs/heads/master
| 2020-04-16T06:43:16.345750
| 2019-01-12T07:07:34
| 2019-01-12T07:07:34
| 165,358,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,245
|
py
|
import pymysql
pymysql.install_as_MySQLdb()
import MySQLdb
"""
Django settings for sampleproj project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&wb@k6@sq#n^$*7ey2u@@&bz2!m^v4vzcais)66u1dve9(@pqb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sampleproj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sampleproj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'hoteldb1',
'HOST': 'localhost',
'USER': 'root',
'PASSWORD': 'root',
'PORT': 3306,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"muppuriprasanna5@gmail.com"
] |
muppuriprasanna5@gmail.com
|
eabefdbf9bf558615032877bb94b86116d7e60e4
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Savene_WC500049102.py
|
1fca99424e88a51f6a725a26ec3e053abee89057
|
[] |
no_license
|
urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087
| 2013-07-16T14:05:41
| 2013-07-16T14:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
{'_data': [['Very common',
[['Infections', u'Postoperativ infektion'],
['GI', u'Illam\xe5ende'],
['General',
u'Sm\xe4rta vid injektionsst\xe4llet Pyrexi Flebit vid injektionsst\xe4llet Erytem vid injektionsst\xe4llet Utmattning Induration vid injektionsst\xe4llet']]],
['Common',
[['Infections', u'Infektion Neutropen infektion'],
['Metabolism', u'Minskad aptit'],
['Nervous system', u'Yrsel Sensorisk f\xf6rlust'],
['Vascular', u'Flebit Ytlig tromboflebit Ven\xf6s trombos i extremiteter'],
['Respiratory', u'Dyspn\xe9 Pneumoni'],
['GI', u'Kr\xe4kningar Diarr\xe9 Stomatit Muntorrhet'],
['Skin', u'Alopeci Pruritus'],
['Musculoskeletal', u''],
['General', u'Svullnad vid injektionsst\xe4llet Perifert \xf6dem S\xf6mnighet'],
['Investigations', u'Viktminskning']]]],
'_pages': [5, 7],
u'_rank': 12,
u'_type': u'TSFU'}
|
[
"urudaro@gmail.com"
] |
urudaro@gmail.com
|
c8a18cb50e5375ab0e677a1fd6fe0d03669e9644
|
5c2e4266abf6d2be9102d5309bf94071a1eae1db
|
/cp 习题课练习/进阶练习/advance 15.3.py
|
7609871116204497122b379e87dc17030c11979a
|
[] |
no_license
|
13834319675/python
|
8176d5da47136b9b3ec290eaa0b699c6b1e7a8ab
|
3e6f04670f6f01006f827794865488dd40bca380
|
refs/heads/master
| 2021-07-11T18:29:12.894401
| 2021-07-05T08:29:27
| 2021-07-05T08:29:27
| 171,112,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
"""
假设,我们用一组tuple来表示学生的名字和成绩,
L = [("Bob",75),("Adam",92),("Bart",66),("List",88)]
用sorted()对上述列表按照名字排序
"""
l = [("Bob",75),("Adam",92),("Bart",66),("List",88)]
sorted1 = sorted(l,key=lambda x:x[1])
print(sorted1)
def By_name(n):
t = sorted(n[0],key=str.lower)
return t
l2 = sorted(l,key=By_name)
print(l2)
def by_score(t):
t = sorted(range(t[1]),key=abs)
return t
L2 = sorted(l,key=by_score)
print(L2)
l3 = sorted(l,key=lambda x:x[1],reverse=True)
print(l3)
|
[
"1134876981@qq.com"
] |
1134876981@qq.com
|
74e05cab947be624208275e976fd92220ecc9323
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-ief/huaweicloudsdkief/v1/model/probe_detail.py
|
164248ef166385abad271c9e4dec0ea7902be286
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,416
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ProbeDetail:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'exec_command': 'str',
'http_get': 'HttpGetDetail',
'initial_delay_seconds': 'int',
'timeout_seconds': 'int'
}
attribute_map = {
'exec_command': 'exec_command',
'http_get': 'http_get',
'initial_delay_seconds': 'initial_delay_seconds',
'timeout_seconds': 'timeout_seconds'
}
def __init__(self, exec_command=None, http_get=None, initial_delay_seconds=None, timeout_seconds=None):
"""ProbeDetail
The model defined in huaweicloud sdk
:param exec_command: 执行探测的命令行命令,长度1-10240内的字符串
:type exec_command: str
:param http_get:
:type http_get: :class:`huaweicloudsdkief.v1.HttpGetDetail`
:param initial_delay_seconds: 表示从工作负载启动后从多久开始探测,大于0且不大于3600的整数,默认为10
:type initial_delay_seconds: int
:param timeout_seconds: 表示探测超时时间,大于0且不大于3600的整数,默认为1
:type timeout_seconds: int
"""
self._exec_command = None
self._http_get = None
self._initial_delay_seconds = None
self._timeout_seconds = None
self.discriminator = None
if exec_command is not None:
self.exec_command = exec_command
if http_get is not None:
self.http_get = http_get
if initial_delay_seconds is not None:
self.initial_delay_seconds = initial_delay_seconds
if timeout_seconds is not None:
self.timeout_seconds = timeout_seconds
@property
def exec_command(self):
"""Gets the exec_command of this ProbeDetail.
执行探测的命令行命令,长度1-10240内的字符串
:return: The exec_command of this ProbeDetail.
:rtype: str
"""
return self._exec_command
@exec_command.setter
def exec_command(self, exec_command):
"""Sets the exec_command of this ProbeDetail.
执行探测的命令行命令,长度1-10240内的字符串
:param exec_command: The exec_command of this ProbeDetail.
:type exec_command: str
"""
self._exec_command = exec_command
@property
def http_get(self):
"""Gets the http_get of this ProbeDetail.
:return: The http_get of this ProbeDetail.
:rtype: :class:`huaweicloudsdkief.v1.HttpGetDetail`
"""
return self._http_get
@http_get.setter
def http_get(self, http_get):
"""Sets the http_get of this ProbeDetail.
:param http_get: The http_get of this ProbeDetail.
:type http_get: :class:`huaweicloudsdkief.v1.HttpGetDetail`
"""
self._http_get = http_get
@property
def initial_delay_seconds(self):
"""Gets the initial_delay_seconds of this ProbeDetail.
表示从工作负载启动后从多久开始探测,大于0且不大于3600的整数,默认为10
:return: The initial_delay_seconds of this ProbeDetail.
:rtype: int
"""
return self._initial_delay_seconds
@initial_delay_seconds.setter
def initial_delay_seconds(self, initial_delay_seconds):
"""Sets the initial_delay_seconds of this ProbeDetail.
表示从工作负载启动后从多久开始探测,大于0且不大于3600的整数,默认为10
:param initial_delay_seconds: The initial_delay_seconds of this ProbeDetail.
:type initial_delay_seconds: int
"""
self._initial_delay_seconds = initial_delay_seconds
@property
def timeout_seconds(self):
"""Gets the timeout_seconds of this ProbeDetail.
表示探测超时时间,大于0且不大于3600的整数,默认为1
:return: The timeout_seconds of this ProbeDetail.
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this ProbeDetail.
表示探测超时时间,大于0且不大于3600的整数,默认为1
:param timeout_seconds: The timeout_seconds of this ProbeDetail.
:type timeout_seconds: int
"""
self._timeout_seconds = timeout_seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProbeDetail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
67a3294885373c7c6f4cffd3913b003c7454a49c
|
9e9ce86ef979a179a5b99a349452765f7f6e5f8d
|
/src/sentry_opsgenie/plugin.py
|
b2d975efc69b9dc11980741702c811bf3df39148
|
[
"Apache-2.0"
] |
permissive
|
zsjohny/sentry-opsgenie
|
49b273544544ee852396b984793f9c5319e849b4
|
bc74d82a971da63d8faec4f03949d967851d7e7b
|
refs/heads/master
| 2021-01-15T21:02:44.629556
| 2015-05-11T21:45:13
| 2015-05-11T21:45:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,474
|
py
|
"""
sentry_opsgenie.plugin
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by Sentry Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import sentry_opsgenie
from django import forms
from django.utils.html import escape
from sentry import http
from sentry.plugins.bases import notify
from sentry.utils import json
class OpsGenieOptionsForm(notify.NotificationConfigurationForm):
api_key = forms.CharField(
max_length=255,
help_text='OpsGenie API key used for authenticating API requests',
required=True,
)
recipients = forms.CharField(
max_length=255,
help_text='The user names of individual users or groups (comma seperated)',
required=False,
)
alert_url = forms.CharField(
max_length=255,
label='OpsGenie Alert URL',
widget=forms.TextInput(attrs={'class': 'span6', 'placeholder': 'e.g. https://api.opsgenie.com/v1/json/alert'}),
help_text='It must be visible to the Sentry server',
required=True,
)
class OpsGeniePlugin(notify.NotificationPlugin):
author = 'Sentry Team'
author_url = 'https://github.com/getsentry'
resource_links = (
('Bug Tracker', 'https://github.com/getsentry/sentry-opsgenie/issues'),
('Source', 'https://github.com/getsentry/sentry-opsgenie'),
)
title = 'OpsGenie'
slug = 'opsgenie'
description = 'Create OpsGenie alerts out of notifications.'
conf_key = 'opsgenie'
version = sentry_opsgenie.VERSION
project_conf_form = OpsGenieOptionsForm
logger = logging.getLogger('sentry.plugins.opsgenie')
def is_configured(self, project):
return all((
self.get_option(k, project)
for k in ('api_key', 'alert_url')
))
def get_form_initial(self, project=None):
return {
'alert_url': 'https://api.opsgenie.com/v1/json/alert',
}
# TODO(dcramer): this is duplicated from sentry-webhooks
def get_group_data(self, group, event):
data = {
'id': str(group.id),
'checksum': group.checksum,
'project': group.project.slug,
'project_name': group.project.name,
'logger': group.logger,
'level': group.get_level_display(),
'culprit': group.culprit,
'message': event.message,
'url': group.get_absolute_url(),
}
data['event'] = dict(event.data or {})
data['event']['tags'] = event.get_tags()
return data
def notify_users(self, group, event, fail_silently=False):
if not self.is_configured(group.project):
return
api_key = self.get_option('api_key', group.project)
recipients = self.get_option('recipients', group.project)
alert_url = self.get_option('alert_url', group.project)
message = getattr(group, 'message_short', group.message).encode('utf-8')
payload = {
'apiKey': api_key,
'message': message,
'source': 'Sentry',
'details': self.get_group_data(group, event)
}
if recipients:
payload['recipients'] = recipients
req = http.safe_urlopen(alert_url, json=payload)
resp = req.json()
if resp.get('status') != 'successful':
raise Exception('Unsuccessful response from OpsGenie: %s' % resp)
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
a7da8a97cddff33a27a682c204eedaf50d183a61
|
778633cb4adc0172015ad53452ff66d757a07fd8
|
/src/collective/documentgenerator/content/merge_templates.py
|
c494702ffecfae0ab655fedcf406700fe3d37c4c
|
[] |
no_license
|
malikrohit16/collective.documentgenerator
|
0d2db10a6047ba208f2dadba9759eb50b22b76e6
|
1836b94f8eae17528d93f6cdee5bc245377ede96
|
refs/heads/master
| 2021-04-27T17:07:23.812486
| 2018-02-21T08:59:40
| 2018-02-21T08:59:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
# -*- coding: utf-8 -*-
from collective.documentgenerator.interfaces import ITemplatesToMerge
from zope.interface import implements
class TemplatesToMergeForPODTemplate(object):
"""
"""
implements(ITemplatesToMerge)
def __init__(self, pod_template):
self.pod_template = pod_template
def get(self):
return {}
|
[
"delcourt.simon@gmail.com"
] |
delcourt.simon@gmail.com
|
9b5ee6608d5f2fa48e239a47432b99fa77288040
|
d6f31bf476f4ea4a810cf67b668246303ca0d8e3
|
/python/data/price.py
|
10b73fccba1a4eab0e626b69ec0bbe5627522a0b
|
[
"MIT"
] |
permissive
|
sslab-gatech/ACon2
|
b8201610a62ac71fefd92f876877dd169db783a6
|
17b5967b90bf43dae24ae52080b8df2fcbf7be49
|
refs/heads/main
| 2023-08-17T06:32:10.371013
| 2023-08-06T13:43:43
| 2023-08-06T13:43:43
| 607,729,188
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,781
|
py
|
import os, sys
import numpy as np
import warnings
import pickle
import glob
class NoObservation(Exception):
pass
class SinglePriceDataset:
def __init__(self, path):
file_name = glob.glob(path + '*.pk')
if len(file_name) == 0:
path_split = path.split('/')
pair_name = path_split[-2]
pair_name_split = pair_name.split('_')
pair_name = '_'.join([pair_name_split[0], pair_name_split[2], pair_name_split[1]])
path_split[-2] = pair_name
path = '/'.join(path_split)
file_name = glob.glob(path + '*.pk')
assert(len(file_name) == 1)
self.inverse_price = True
else:
assert(len(file_name) == 1)
self.inverse_price = False
file_name = file_name[0]
self.data = pickle.load(open(file_name, 'rb'))
# check if data is sorted
timestamps = [d['time'] for d in self.data]
for t1, t2 in zip(timestamps[:-1], timestamps[1:]):
assert t1 <= t2, f'data is not sorted: {t1} > {t2}'
# time type conversion
for i in range(len(self.data)):
self.data[i]['time'] = self.data[i]['time'].astype('datetime64[s]')
self.data[i]['price'] = float(self.data[i]['price']) if type(self.data[i]['price']) is not float else self.data[i]['price']
self.reset()
def __getitem__(self, index):
time, price = self.data[index]['time'], self.data[index]['price']
if self.inverse_price:
price = 1 / price
return {'timestamp': time.astype('int'), 'price': price}
def __len__(self):
return len(self.data)
def reset(self):
self.index = 0
def read(self, timestamp):
if self.index + 1 == len(self):
raise StopIteration
index = None
for i, d in enumerate(self.data[self.index:]):
if d['time'].astype('int') > timestamp:
break
else:
index = i + self.index
if index is None:
raise NoObservation
else:
self.index = index
return self[index]['price']
class PriceDataset:
def __init__(
self, data_path,
):
self.seq = {}
for p in data_path:
seq = SinglePriceDataset(p)
print(f'[price data, data_path = {p}] sequence length = {len(seq)}')
self.seq[p] = seq
def reset(self):
for k in self.seq.keys():
self.seq[k].reset()
def read(self, time):
out = {}
for k in self.seq.keys():
try:
out[k] = self.seq[k].read(time.astype('int'))
except NoObservation:
out[k] = None
return out
class RandomPriceDataset:
def __init__(self, path, sig=5, seed=None):
assert(len(path) == 1)
self.path = path[0] #TODO: dummy
self.sig = sig
self.price = 0.0
np.random.seed(seed)
def _read(self):
self.price = np.random.normal(loc=self.price, scale=self.sig)
#self.price += 2.0
return self.price
def reset(self):
self.price = 0.0
def read(self, time):
return {self.path: self._read()}
class ZeroPriceDataset:
def __init__(self, path, sig=5, seed=None):
self.path = path[0] #TODO: dummy
self.reset()
def _read(self):
return self.price
def reset(self):
self.price = 0.0
def read(self, time):
return {self.path: self._read()}
if __name__ == '__main__':
dsld = Price('price_ETH_USD/coinbase')
|
[
"ggdons@gmail.com"
] |
ggdons@gmail.com
|
aea7154f5e4850a4eadaf03473dd5273450c16dc
|
4e353bf7035eec30e5ad861e119b03c5cafc762d
|
/QtGui/QSplashScreen.py
|
b4f657ddc37548ddd2e716cf42fa2f6ccf8ad729
|
[] |
no_license
|
daym/PyQt4-Stubs
|
fb79f54d5c9a7fdb42e5f2506d11aa1181f3b7d5
|
57d880c0d453641e31e1e846be4087865fe793a9
|
refs/heads/master
| 2022-02-11T16:47:31.128023
| 2017-10-06T15:32:21
| 2017-10-06T15:32:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,226
|
py
|
# encoding: utf-8
# module PyQt4.QtGui
# from C:\Python27\lib\site-packages\PyQt4\QtGui.pyd
# by generator 1.145
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from QWidget import QWidget
class QSplashScreen(QWidget):
"""
QSplashScreen(QPixmap pixmap=QPixmap(), Qt.WindowFlags flags=0)
QSplashScreen(QWidget, QPixmap pixmap=QPixmap(), Qt.WindowFlags flags=0)
"""
def actionEvent(self, *args, **kwargs): # real signature unknown
pass
def changeEvent(self, *args, **kwargs): # real signature unknown
pass
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def clearMessage(self): # real signature unknown; restored from __doc__
""" QSplashScreen.clearMessage() """
pass
def closeEvent(self, *args, **kwargs): # real signature unknown
pass
def connectNotify(self, *args, **kwargs): # real signature unknown
pass
def contextMenuEvent(self, *args, **kwargs): # real signature unknown
pass
def create(self, *args, **kwargs): # real signature unknown
pass
def customEvent(self, *args, **kwargs): # real signature unknown
pass
def destroy(self, *args, **kwargs): # real signature unknown
pass
def disconnectNotify(self, *args, **kwargs): # real signature unknown
pass
def dragEnterEvent(self, *args, **kwargs): # real signature unknown
pass
def dragLeaveEvent(self, *args, **kwargs): # real signature unknown
pass
def dragMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def drawContents(self, QPainter): # real signature unknown; restored from __doc__
""" QSplashScreen.drawContents(QPainter) """
pass
def dropEvent(self, *args, **kwargs): # real signature unknown
pass
def enabledChange(self, *args, **kwargs): # real signature unknown
pass
def enterEvent(self, *args, **kwargs): # real signature unknown
pass
def event(self, QEvent): # real signature unknown; restored from __doc__
""" QSplashScreen.event(QEvent) -> bool """
return False
def finish(self, QWidget): # real signature unknown; restored from __doc__
""" QSplashScreen.finish(QWidget) """
pass
def focusInEvent(self, *args, **kwargs): # real signature unknown
pass
def focusNextChild(self, *args, **kwargs): # real signature unknown
pass
def focusNextPrevChild(self, *args, **kwargs): # real signature unknown
pass
def focusOutEvent(self, *args, **kwargs): # real signature unknown
pass
def focusPreviousChild(self, *args, **kwargs): # real signature unknown
pass
def fontChange(self, *args, **kwargs): # real signature unknown
pass
def hideEvent(self, *args, **kwargs): # real signature unknown
pass
def inputMethodEvent(self, *args, **kwargs): # real signature unknown
pass
def keyPressEvent(self, *args, **kwargs): # real signature unknown
pass
def keyReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def languageChange(self, *args, **kwargs): # real signature unknown
pass
def leaveEvent(self, *args, **kwargs): # real signature unknown
pass
def messageChanged(self, *args, **kwargs): # real signature unknown
""" QSplashScreen.messageChanged[QString] [signal] """
pass
def metric(self, *args, **kwargs): # real signature unknown
pass
def mouseDoubleClickEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def mousePressEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QSplashScreen.mousePressEvent(QMouseEvent) """
pass
def mouseReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def moveEvent(self, *args, **kwargs): # real signature unknown
pass
def paintEvent(self, *args, **kwargs): # real signature unknown
pass
def paletteChange(self, *args, **kwargs): # real signature unknown
pass
def pixmap(self): # real signature unknown; restored from __doc__
""" QSplashScreen.pixmap() -> QPixmap """
return QPixmap
def receivers(self, *args, **kwargs): # real signature unknown
pass
def repaint(self): # real signature unknown; restored from __doc__
""" QSplashScreen.repaint() """
pass
def resetInputContext(self, *args, **kwargs): # real signature unknown
pass
def resizeEvent(self, *args, **kwargs): # real signature unknown
pass
def sender(self, *args, **kwargs): # real signature unknown
pass
def senderSignalIndex(self, *args, **kwargs): # real signature unknown
pass
def setPixmap(self, QPixmap): # real signature unknown; restored from __doc__
""" QSplashScreen.setPixmap(QPixmap) """
pass
def showEvent(self, *args, **kwargs): # real signature unknown
pass
def showMessage(self, QString, int_alignment=None, QColor_color=None): # real signature unknown; restored from __doc__
""" QSplashScreen.showMessage(QString, int alignment=Qt.AlignLeft, QColor color=Qt.black) """
pass
def tabletEvent(self, *args, **kwargs): # real signature unknown
pass
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def updateMicroFocus(self, *args, **kwargs): # real signature unknown
pass
def wheelEvent(self, *args, **kwargs): # real signature unknown
pass
def windowActivationChange(self, *args, **kwargs): # real signature unknown
pass
def winEvent(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
|
[
"thekewlstore@gmail.com"
] |
thekewlstore@gmail.com
|
371ae445aebcea943259ea3701f151c082bf13cb
|
cfb2d39ab0ed8603750711698108328ba65152bf
|
/Products/Bitakora/XMLImporter.py
|
4201f2a26782b8bdde10c5157b6078771fbf02a8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
codesyntax/Products.Bitakora
|
d4fc4ce48d7244d69d986de1c9e70ff7dfbd0ffb
|
555dee0070263d4296b409ae587cbf3693fc8adc
|
refs/heads/master
| 2021-01-10T21:24:17.062053
| 2016-10-13T09:38:32
| 2016-10-13T09:38:32
| 3,788,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,125
|
py
|
# -*- coding: utf-8 -*-
# (c) Copyright 2005, CodeSyntax <http://www.codesyntax.com>
# Authors: Mikel Larreategi <mlarreategi@codesyntax.com>
# See also LICENSE.txt
from xml.sax.handler import ContentHandler
from xml.sax import make_parser
class XMLImporter(ContentHandler):
def __init__(self):
self.posts = []
self.comments = []
self.reset()
def reset(self):
self.inpost = 0
self.incomment = 0
self.incomments = 0
self.intitle = 0
self.inauthor = 0
self.infmt = 0
self.inid = 0
self.inbody = 0
self.intags = 0
self.indate = 0
self.title = ''
self.author = ''
self.fmt = ''
self.body = ''
self.tags = ''
self.date = ''
self.id = ''
self.resetComment()
def startElement(self, tag, attrs):
if self.inpost and not self.incomment:
if tag == 'author':
self.inauthor = 1
elif tag == 'body':
self.inbody = 1
elif tag == 'date':
self.indate = 1
elif tag == 'id':
self.inid = 1
elif self.incomment:
if tag == 'author':
self.incommauthor = 1
elif tag == 'body':
self.incommbody = 1
elif tag == 'date':
self.incommdate = 1
if tag == 'post':
self.inpost = 1
elif tag == 'url':
self.inurl = 1
elif tag == 'email':
self.inemail = 1
elif tag == 'tags':
self.intags = 1
elif tag == 'fmt':
self.infmt = 1
elif tag == 'title':
self.intitle = 1
elif tag == 'comment':
self.incomment = 1
elif tag == 'comments':
self.incomments = 1
def endElement(self, tag):
if self.inpost and not self.incomment:
if tag == 'author':
self.inauthor = 0
elif tag == 'body':
self.inbody = 0
elif tag == 'date':
self.indate = 0
elif tag == 'id':
self.inid = 0
elif self.incomment:
if tag == 'author':
self.incommauthor = 0
elif tag == 'body':
self.incommbody = 0
elif tag == 'date':
self.incommdate = 0
if tag == 'post':
self.inpost = 0
self.createPost()
self.reset()
elif tag == 'url':
self.inurl = 0
elif tag == 'email':
self.inemail = 0
elif tag == 'tags':
self.intags = 0
elif tag == 'fmt':
self.infmt = 0
elif tag == 'title':
self.intitle = 0
elif tag == 'comment':
self.incomment = 0
self.createComment()
self.resetComment()
elif tag == 'comments':
self.incomments = 0
def characters(self, chars):
if self.intitle:
self.title += chars
elif self.inauthor:
self.author += chars
elif self.infmt:
self.fmt += chars
elif self.inbody:
self.body += chars
elif self.intags:
self.tags += chars
elif self.indate:
self.date += chars
elif self.incommauthor:
self.commauthor += chars
elif self.incommdate:
self.commdate += chars
elif self.incommbody:
self.commbody += chars
elif self.inurl:
self.url += chars
elif self.inemail:
self.email += chars
elif self.inid:
self.id += chars
def createPost(self):
post = {}
post['id'] = self.id
post['title'] = self.title
post['author'] = self.author
post['fmt'] = self.fmt
post['body'] = self.body
post['tags'] = self.tags.split(';')
post['date'] = self.date
post['comments'] = self.comments[:]
self.comments = []
self.posts.append(post)
def createComment(self):
comment = {}
comment['author'] = self.commauthor
comment['date'] = self.commdate
comment['body'] = self.commbody
comment['url'] = self.url
comment['email'] = self.email
self.comments.append(comment)
def resetComment(self):
self.commauthor = ''
self.commdate = ''
self.commbody = ''
self.url = ''
self.email = ''
self.incommauthor = 0
self.incommdate = 0
self.incommbody = 0
self.inurl = 0
self.inemail = 0
def returnData(self):
return self.posts
def importXML(xml):
from cStringIO import StringIO
xmlimporter = XMLImporter()
parser = make_parser()
parser.setContentHandler(xmlimporter)
parser.parse(StringIO(xml))
return xmlimporter.returnData()
if __name__ == '__main__':
fp = open('/tmp/tolon.xml', 'r')
print importXML(fp.read())
fp.close()
|
[
"mlarreategi@codesyntax.com"
] |
mlarreategi@codesyntax.com
|
bdb3737c57abe52174c250ea7e721281ef182918
|
6444622ad4a150993955a0c8fe260bae1af7f8ce
|
/djangoenv/lib/python2.7/site-packages/django/contrib/contenttypes/views.py
|
fd09b8d32057cccdff25bb88a3b5dcf0e0a73b09
|
[] |
no_license
|
jeremyrich/Lesson_RestAPI_jeremy
|
ca965ef017c53f919c0bf97a4a23841818e246f9
|
a44263e45b1cc1ba812059f6984c0f5be25cd234
|
refs/heads/master
| 2020-04-25T23:13:47.237188
| 2019-03-22T09:26:58
| 2019-03-22T09:26:58
| 173,138,073
| 0
| 0
| null | 2019-03-22T09:26:59
| 2019-02-28T15:34:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,699
|
py
|
from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.requests import RequestSite
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404(
_("Content type %(ct_id)s object has no associated model")
% {"ct_id": content_type_id}
)
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404(
_("Content type %(ct_id)s object %(obj_id)s doesn't exist")
% {"ct_id": content_type_id, "obj_id": object_id}
)
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(
_("%(ct_name)s objects don't have a get_absolute_url() method")
% {"ct_name": content_type.name}
)
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith(("http://", "https://", "//")):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if apps.is_installed("django.contrib.sites"):
Site = apps.get_model("sites.Site")
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.remote_field.model is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.remote_field and field.remote_field.model is Site:
try:
site = getattr(obj, field.name)
except Site.DoesNotExist:
continue
if site is not None:
object_domain = site.domain
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = Site.objects.get_current(request).domain
except Site.DoesNotExist:
pass
else:
# Fall back to the current request's site.
object_domain = RequestSite(request).domain
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.scheme
return http.HttpResponseRedirect(
"%s://%s%s" % (protocol, object_domain, absurl)
)
else:
return http.HttpResponseRedirect(absurl)
|
[
"jeremyrich@free.fr"
] |
jeremyrich@free.fr
|
e581f50177c0f06cd565e1b66f82f6d51dbb486e
|
3fe272eea1c91cc5719704265eab49534176ff0d
|
/scripts/field/enter_402000600.py
|
88f1149115492abf3117fdc9ac30cf77dc6fb2e6
|
[
"MIT"
] |
permissive
|
Bratah123/v203.4
|
e72be4843828def05592298df44b081515b7ca68
|
9cd3f31fb2ef251de2c5968c75aeebae9c66d37a
|
refs/heads/master
| 2023-02-15T06:15:51.770849
| 2021-01-06T05:45:59
| 2021-01-06T05:45:59
| 316,366,462
| 1
| 0
|
MIT
| 2020-12-18T17:01:25
| 2020-11-27T00:50:26
|
Java
|
UTF-8
|
Python
| false
| false
| 312
|
py
|
# Created by MechAviv
# Map ID :: 402000600
# Refuge Outskirts : Caravan Refuge
# Unhandled Message [47] Packet: 2F 02 00 00 00 B0 83 08 00 00 00 00 00 2E 02 00 00 00 00 00 80 05 BB 46 E6 17 02 00 00 B8 22 11 00 00 00 00 00 63 04 00 00 00 02 C0 6A 2A F2 79 D6 D4 01 0D 00 66 69 65 6C 64 5F 65 6E 74 65 72 3D 31
|
[
"pokesmurfuwu@gmail.com"
] |
pokesmurfuwu@gmail.com
|
9cfdb317eb64642a51f6b25f90e07311be3c5efd
|
5785d7ed431b024dd910b642f10a6781df50e4aa
|
/revise-daily/june_2021/epi/36_phone_mnemonics.py
|
63ad8507a981dc8f93667cd57cd578a5835d1fea
|
[] |
no_license
|
kashyapa/interview-prep
|
45d77324446da34d99bf8efedb3544b367b5523e
|
7060c090c40602fb9c4778eace2078e1b51e235b
|
refs/heads/master
| 2023-07-28T13:12:49.515299
| 2021-09-06T14:33:25
| 2021-09-06T14:33:25
| 403,706,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
def phone_mnemonics(nums):
def rec(idx, mnemonic):
if idx == len(nums):
res.append(''.join(mnemonic.copy()))
return
str = mapping[int(nums[idx])]
for c in str:
mnemonic.append(c)
rec(idx+1, mnemonic)
mnemonic.pop()
return
res = []
mapping = ['0', 'ABC', 'DEF', 'GHI', 'JKL', 'MNO', 'PQRS', 'TUV', 'WXYZ']
rec(0, [])
return res
print(phone_mnemonics("2345"))
|
[
"schandra2@godaddy.com"
] |
schandra2@godaddy.com
|
763c6f160fd22c8c58e34381dbfbaa07c2188275
|
397c28c703bd3c3a015f87ccf55b75fa18304b8b
|
/test/testRssEditor.py
|
1cadc3c4426b8531282fb368439990880c84e0b8
|
[] |
no_license
|
jbarciauskas/rsseditor
|
7f3f958d50e1c40ae433a0227cbd3eed9287c50e
|
0e028d2de8eef7fbeb56727bbd47f8b68bd38b4e
|
refs/heads/master
| 2021-01-20T04:32:23.294303
| 2011-04-19T04:59:13
| 2011-04-19T04:59:13
| 1,628,110
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
import unittest
from rsseditor import RssEditor
from rsseditor import FeedLoader
class RssEditorTest(unittest.TestCase):
def setUp(self):
self.feedLoader = FeedLoader('http://news.ycombinator.com/rss')
self.classUnderTest = RssEditor(self.feedLoader)
def testEditTitle(self):
self.assertEqual(self.feedLoader.retrieve().find("asdf"), -1)
self.classUnderTest.edit("[1].title.string", "asdf")
self.assertNotEqual(self.feedLoader.retrieve().find("asdf"), -1)
|
[
"barciajo@gmail.com"
] |
barciajo@gmail.com
|
b5e0858061319961de905ea6f567ecfdadd1c942
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2410/47937/267383.py
|
6a19290da4da1af5ceca5da1b99f25f26281d7c3
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
a=input().split(",")
b=input()
#print(len(a))
c=[]
i=0
while i<len(a):
c.append(int(a[i]))
i=i+1
#print(c)
d=int(b)
i=0
end=1
while i<len(a):
length=1
i2=i+1
i3=i
while i2<len(a):
if(c[i2]-c[i3]==d):
i3=i2
i2=i2+1
length=length+1
continue
i2=i2+1
if(length>end):
end=length
i=i+1
print(end)
#if(a=="1,2,3,4" and b=="1"):
#print(4)
#elif(a=="1,5,7,8,5,3,4,2,1" and b=="2"):
#print(2)
#elif(a=="1,3,5,7,9" and b=="2"):
#print(5)
#elif(a=="1,2,3,4,5,6,7" and b=="2"):
#print(4)
#elif(a=="1,3,5,6,9" and b=="2"):
#print(3)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.