blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4cfa8d1f515ccd1db8a8bc200b4be3756aa7ee5
|
f1738cd603e0b2e31143f4ebf7eba403402aecd6
|
/ucs/services/univention-samba4/scripts/mount_extfs_with_user_xattr.py
|
2d16e7e0afec9c4bb662d156572961b9fad2799e
|
[] |
no_license
|
m-narayan/smart
|
92f42bf90d7d2b24f61915fac8abab70dd8282bc
|
1a6765deafd8679079b64dcc35f91933d37cf2dd
|
refs/heads/master
| 2016-08-05T17:29:30.847382
| 2013-01-04T04:50:26
| 2013-01-04T04:50:26
| 7,079,786
| 8
| 6
| null | 2015-04-29T08:54:12
| 2012-12-09T14:56:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,409
|
py
|
#!/usr/bin/python2.6
#
# Copyright 2011-2012 Univention GmbH
#
# http://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <http://www.gnu.org/licenses/>.
# This script was adjusted from the Tests for ntacls manipulation
# Copyright (C) Matthieu Patou <mat@matws.net> 2009-2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Set user_xattr option on ext2/ext3/ext4 filesystems, remount if necessary"""
from univention.lib import fstab
import subprocess
def _do_modify_extfs_option(fstab_partition, options=[], activate=True):
fstab_modified = False
for option in options:
if activate:
if not option in fstab_partition.options:
fstab_partition.options.append(option)
fstab_modified = True
else:
# operation successful: nothing to be done
continue
else:
if not option in fstab_partition.options:
continue
else:
fstab_partition.options.remove(option)
fstab_modified = True
return fstab_modified
def _modify_extfs_option(options=[], activate=True, devices=[]):
fs = fstab.File()
target_partitions = []
if devices:
for device in devices:
fstab_partition = fs.find(spec = device)
if fstab_partition and fstab_partition.type in ('ext3', 'ext4'):
target_partitions.append(fstab_partition)
else:
print 'Device could not be found: %s' % device
else:
for fstype in ('ext2', 'ext3', 'ext4'):
for fstab_partition in fs.get(fstype, ignore_root=False):
target_partitions.append(fstab_partition)
for fstab_partition in target_partitions:
if _do_modify_extfs_option(fstab_partition, options, activate):
fs.save()
if subprocess.call(('mount', '-o', 'remount', fstab_partition.spec)):
print 'Remounting partition failed: %s' % fstab_partition.spec
if __name__ == '__main__':
_modify_extfs_option(['user_xattr'])
|
[
"kartik@debian.org"
] |
kartik@debian.org
|
130a6ac0797c2fcb39322bcead7677bd23919b6f
|
18fe3f034f203bc8a22d08f15b29297ebcc7dfaf
|
/example/py/QFT/qft.py
|
3ebbc269eee255154af2fe6f329ce81fc57cf5c1
|
[
"Apache-2.0"
] |
permissive
|
katou-boop/qlazy
|
b8802c48b0cba0ba89cc1e1a69f551e0f4fdcc73
|
6b62fff65939a589603af7ed8be921c9f1669bb3
|
refs/heads/master
| 2023-02-17T12:30:05.419650
| 2021-01-17T23:20:20
| 2021-01-17T23:20:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,972
|
py
|
import numpy as np
from pprint import pprint
from qlazypy import QState
def swap(self,q0,q1):
self.cx(q0,q1).cx(q1,q0).cx(q0,q1)
return self
def qft2(self,q1,q0):
self.h(q1).cp(q0,q1,phase=0.5)
self.h(q0)
self.swap(q0,q1)
return self
def iqft2(self,q0,q1):
self.h(q0)
self.cp(q0,q1,phase=-0.5).h(q1)
self.swap(q0,q1)
return self
def qft3(self,q2,q1,q0):
self.h(q2).cp(q1,q2,phase=0.5).cp(q0,q2,phase=0.25)
self.h(q1).cp(q0,q1,phase=0.5)
self.h(q0)
self.swap(q0,q2)
return self
def iqft3(self,q0,q1,q2):
self.h(q0)
self.cp(q0,q1,phase=-0.5).h(q1)
self.cp(q0,q2,phase=-0.25).cp(q1,q2,phase=-0.5).h(q2)
self.swap(q0,q2)
return self
def qft(self,id=None):
dim = len(id)
iid = id[::-1]
for i in range(dim):
self.h(iid[dim-i-1])
phase = 1.0
for j in range(dim-i-1):
phase /= 2.0
self.cp(iid[dim-i-j-2],iid[dim-i-1],phase=phase)
i = 0
while i < dim-1-i:
self.swap(iid[i], iid[dim-1-i])
i += 1
return self
def iqft(self,id=None):
dim = len(id)
for i in range(dim):
phase = -1.0/2**i
for j in range(i):
self.cp(id[j],id[i],phase=phase)
phase *= 2.0
self.h(id[i])
i = 0
while i < dim-1-i:
self.swap(id[i], id[dim-1-i])
i += 1
return self
def main():
QState.swap = swap
QState.qft2 = qft2
QState.qft3 = qft3
QState.qft = qft
QState.iqft2 = iqft2
QState.iqft3 = iqft3
QState.iqft = iqft
print("== initial state ==")
qs = QState(3).h(1).h(0)
qs.show()
data_in = qs.amp
print("== QFT ==")
qs.qft([0,1,2])
qs.show()
print("== FFT (numpy) ==")
data_fft = np.fft.ifft(data_in)
norm = np.linalg.norm(data_fft,ord=2)
data_fft /= norm
pprint(data_fft)
qs.free()
if __name__ == '__main__':
main()
|
[
"saminriver33@gmail.com"
] |
saminriver33@gmail.com
|
48c46b8c3bd246ff8dfade058f8405c50c55756f
|
c795ec7f77219892183a1222fb51b8be2e754944
|
/multiverse server/multiverse-server/multiverse/config/common/world_mgr.py
|
6976b6861d72f935a7b2bd64a1922079de12e108
|
[
"MIT"
] |
permissive
|
radtek/MultiverseClientServer
|
89d9a6656953417170e1066ff3bd06782305f071
|
b64d7d754a0b2b1a3e5acabd4d6ebb80ab1d9379
|
refs/heads/master
| 2023-01-19T04:54:26.163862
| 2020-11-30T04:58:30
| 2020-11-30T04:58:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
#
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
from multiverse.server.engine import *
Engine.registerPlugin("multiverse.mars.plugins.MarsWorldManagerPlugin")
|
[
"demckay@outlook.com"
] |
demckay@outlook.com
|
103205f28504d218bc32fc2208b56de23c9211c2
|
dcd49c222f07454dd365d861a87dead23b850a33
|
/presentation/demo1.py
|
2caa317785922612af8e8fdca94f13f5b1c5bc33
|
[] |
no_license
|
sumsted/mempy_20160321
|
40d7c8e0677ed0fea7f68a0680a2c9b9e090c9a9
|
5b82c9bbe3f7a4aa8075ffcd60956b95d99009af
|
refs/heads/master
| 2021-01-10T03:30:13.467656
| 2016-03-22T20:41:51
| 2016-03-22T20:41:51
| 54,350,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
import ast
def examine(ast_body, class_name=None):
for node in ast_body:
if isinstance(node, ast.ClassDef):
examine(node.body, node.name)
elif isinstance(node, ast.FunctionDef):
arguments = []
for i, arg in enumerate(node.args.args):
if arg.id != 'self':
arguments.append(arg.id)
if class_name is None:
print({'name': node.name, 'arguments': arguments})
else:
print({'class_name': class_name, 'name': node.name, 'arguments': arguments})
if __name__ == '__main__':
ast_body = ast.parse(open('gopigo.py', 'r').read()).body
examine(ast_body, None)
|
[
"scottumsted@gmail.com"
] |
scottumsted@gmail.com
|
6b07facc356d125a46cf27020753a06cbae87c8c
|
e744ad7d93455843cca5851af610bbfbc3d47b63
|
/api/views.py
|
d71a5acfcaa1df49bc4aca112f41d46a5b68caeb
|
[] |
no_license
|
danish703/cbvdjango
|
08a7706791fbca5c8058ed59888318956127786b
|
584413d40a9891ae12e9bc3213b1c340ad6d0b85
|
refs/heads/master
| 2023-01-24T07:02:57.015535
| 2020-12-05T22:19:44
| 2020-12-05T22:19:44
| 316,874,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
from .serializers import BookSerializer
from book.models import Book
from django.http import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework.decorators import api_view
@api_view(['GET','POST'])
def booklist(request):
if request.method=='GET':
bl = Book.objects.all()
s = BookSerializer(bl,many=True)
return JsonResponse(s.data,safe=False)
else:
#data = JSONParser().parse(request)
s = BookSerializer(data=request.data)
if s.is_valid():
s.save()
return JsonResponse(s.data,status=201)
return JsonResponse(s.data,status=400)
|
[
"dipu.danish@outlook.com"
] |
dipu.danish@outlook.com
|
5d7d56fb4d01484cb44b34a1e7babd11278857fd
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/167/usersdata/353/68772/submittedfiles/jogoDaVelha.py
|
c7abcd19afe1a0b57a6d183362bce11d1a514024
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
# -*- coding: utf-8 -*-
import math
#ENTRADA
x1 = int(input('Digite x1: '))
x2 = int(input('Digite x2: '))
x3 = int(input('Digite x3: '))
x4 = int(input('Digite x4: '))
x5 = int(input('Digite x5: '))
x6 = int(input('Digite x6: '))
x7 = int(input('Digite x7: '))
x8 = int(input('Digite x8: '))
x9 = int(input('Digite x9: '))
#PROCESSAMENTO
if x1==x5==x9:
print (x1)
elif x3==x5==x7:
print (x3)
elif x1==x4==x7:
print (x1)
elif x2==x5==x8:
print (x2)
elif x3==x6==x9:
print (x3)
elif x1==x2==x3:
print (x1)
elif x4==x5==x6:
print (x4)
elif x7==x8==x9:
print (x7)
else:
print ('E')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
74f8fe8858f6e1ff173e06a4f45c4b2199918cb6
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/62/usersdata/233/31358/submittedfiles/ex1.py
|
da0cf9c7c68161f7900900d0da6ff776a3d9916a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
a=input(float('Digite a:'))
b=input(float('Digite b:'))
c=input(float('Digite c:'))
Delta=(b**2)-(4*a*c)
if Delta>=0:
x1=(-b+Delta**(1/2))/(2*a)
x2=(-b-Delta**(1/2))/(2*a)
print('x1:%.2f'%x1)
print('x2:%.2f'%x2)
else:
print('SRR')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7b48da91b2aeb4b344130b2fc9b3487bb30b87cb
|
b7483b4bfa9f9feec9d4ec6d96cdabf6b6f0446f
|
/web_10_jul_dev_7373/urls.py
|
d389ea52bac75c194054b2040a6e83891c31f38d
|
[] |
no_license
|
crowdbotics-apps/web-10-jul-dev-7373
|
7f0ed959b477cf2f29e2de23a4bd3e0342a76c70
|
91f0fb64618ee52b5a672ceca97f42e793ed7a4b
|
refs/heads/master
| 2022-11-18T08:20:37.760939
| 2020-07-10T11:31:41
| 2020-07-10T11:31:41
| 278,540,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
"""web_10_jul_dev_7373 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "web 10 jul"
admin.site.site_title = "web 10 jul Admin Portal"
admin.site.index_title = "web 10 jul Admin"
# swagger
api_info = openapi.Info(
title="web 10 jul API",
default_version="v1",
description="API documentation for web 10 jul App",
)
schema_view = get_schema_view(
api_info, public=True, permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
0557bddbd415ed9c7add2388c3f9cf3ff71f3edd
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/gaussiana/ch3_2020_04_23_20_09_22_443850.py
|
7d98a0f91bc968643c0089af48c18487d99171f2
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
import math
def calcula_gaussiana(x, mi, sigma):
y = (1/ sigma* (2*math.pi)**0.5) * math.exp((-0.5)*((x-mi)/sigma)**2)
return y
|
[
"you@example.com"
] |
you@example.com
|
abb096ef90bc16ae6e689a662add4b89d5a0eb6c
|
d3c21f0051e5ca2f45d98381b0372b4cd916b213
|
/cgi-bin/module/plugins/hoster/HellshareCz.py
|
0add79ed9ef88640919558cbe79f2285005d4b23
|
[] |
no_license
|
f3l/shareacc
|
ca165272f4265180d9178b6a066c69a0b368f8dd
|
615c71216317f7ac46b5217f5672cad0c71a1e49
|
refs/heads/master
| 2020-04-06T06:41:11.278718
| 2013-02-05T15:15:15
| 2013-02-05T15:15:15
| 4,640,503
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,102
|
py
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: zoidberg
"""
import re
import datetime
from math import ceil
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
from module.network.RequestFactory import getURL
class HellshareCz(SimpleHoster):
__name__ = "HellshareCz"
__type__ = "hoster"
__pattern__ = r"(http://(?:.*\.)*hellshare\.(?:cz|com|sk|hu)/[^?]*/\d+).*"
__version__ = "0.77"
__description__ = """Hellshare.cz"""
__author_name__ = ("zoidberg")
FREE_URL_PATTERN = r'<form[^>]*action="(http://free\d*\.helldata[^"]*)"'
PREMIUM_URL_PATTERN = r"launchFullDownload\('([^']*)'\);"
FILE_NAME_PATTERN = r'<h1 id="filename">(?P<N>[^<]+)</h1>'
FILE_SIZE_PATTERN = r'<td><span>Size</span></td>\s*<th><span>(?P<S>[0-9.]*) (?P<U>[kKMG])i?B</span></th>'
FILE_OFFLINE_PATTERN = r'<h1>File not found.</h1>'
CAPTCHA_PATTERN = r'<img class="left" id="captcha-img"src="([^"]*)" />'
#FILE_CREDITS_PATTERN = r'<strong class="filesize">(\d+) MB</strong>'
CREDIT_LEFT_PATTERN = r'<p>After downloading this file you will have (\d+) MB for future downloads.'
DOWNLOAD_AGAIN_PATTERN = r'<p>This file you downloaded already and re-download is for free. </p>'
SHOW_WINDOW_PATTERN = r'<a href="([^?]+/(\d+)/\?do=(fileDownloadButton|relatedFileDownloadButton-\2)-showDownloadWindow)"'
def setup(self):
self.resumeDownload = self.multiDL = True if self.account else False
self.chunkLimit = 1
def process(self, pyfile):
pyfile.url = re.search(self.__pattern__, pyfile.url).group(1)
self.html = self.load(pyfile.url, decode = True)
self.getFileInfo()
found = re.search(self.SHOW_WINDOW_PATTERN, self.html)
if not found: self.parseError('SHOW WINDOW')
self.url = "http://www.hellshare.com" + found.group(1)
self.logDebug("SHOW WINDOW: " + self.url)
self.html = self.load(self.url, decode=True)
if self.account:
self.handlePremium()
else:
self.handleFree()
def handleFree(self):
# hellshare is very generous
if "You exceeded your today's limit for free download. You can download only 1 files per 24 hours." in self.html:
t = datetime.datetime.today().replace(hour=1, minute=0, second=0) + datetime.timedelta(
days=1) - datetime.datetime.today()
self.setWait(t.seconds, True)
self.wait()
self.retry()
# parse free download url
found = re.search(self.FREE_URL_PATTERN, self.html)
if found is None: self.parseError("Free URL)")
parsed_url = found.group(1)
self.logDebug("Free URL: %s" % parsed_url)
# decrypt captcha
found = re.search(self.CAPTCHA_PATTERN, self.html)
if found is None: self.parseError("Captcha")
captcha_url = found.group(1)
captcha = self.decryptCaptcha(captcha_url)
self.logDebug('CAPTCHA_URL:' + captcha_url + ' CAPTCHA:' + captcha)
self.download(parsed_url, post = {"captcha" : captcha, "submit" : "Download"})
# check download
check = self.checkDownload({
"wrong_captcha": re.compile(self.FREE_URL_PATTERN)
})
if check == "wrong_captcha":
self.invalidCaptcha()
self.retry()
def handlePremium(self):
# get premium download url
found = re.search(self.PREMIUM_URL_PATTERN, self.html)
if found is None: self.fail("Parse error (URL)")
download_url = found.group(1)
# check credit
if self.DOWNLOAD_AGAIN_PATTERN in self.html:
self.logInfo("Downloading again for free")
else:
found = re.search(self.CREDIT_LEFT_PATTERN, self.html)
if not found:
self.logError("Not enough credit left: %d (%d needed). Trying to download as free user." % (credits_left, file_credits))
self.resetAccount()
credits_left = int(found.group(1))
file_credits = ceil(self.pyfile.size / 1024 ** 2)
self.logInfo("Downloading file for %d credits, %d credits left" % (file_credits, credits_left))
self.download(download_url)
info = self.account.getAccountInfo(self.user, True)
self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"] / 1024))
getInfo = create_getInfo(HellshareCz)
|
[
"root@server3.kruton.de"
] |
root@server3.kruton.de
|
9e9d6d52dad7d50b577493a4e9f2211b99cd2907
|
463283665d8509e3834caeb9594c8c434c656580
|
/backend/metro_go_19413/settings.py
|
f4a246429304c5a8c170afd586aa8ac185d57d47
|
[] |
no_license
|
crowdbotics-apps/metro-go-19413
|
da18250b50c338297d96a2bc738ed46e0edd2c3e
|
ba167f73af28e2109b042fa936f41d04abdb5663
|
refs/heads/master
| 2022-11-27T05:03:57.107712
| 2020-08-06T01:20:03
| 2020-08-06T01:20:03
| 285,441,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,910
|
py
|
"""
Django settings for metro_go_19413 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"taxi_profile",
"booking",
"location",
"vehicle",
"wallet",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "metro_go_19413.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "metro_go_19413.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
57eb39120030442c49d75ff10313cc2e7108e3f3
|
91a1519b82a43f4cd9b1a36cfb19e27f6afcbae3
|
/src/models/nli.py
|
3c07c282476e14a9a7b986f257b62d80cdea0161
|
[] |
no_license
|
stanleysie/usc_dae
|
b45d2a1d47ec7b50408fc1607348155ee6cd7a1e
|
9da432971d165b5b2068fa9724a495f88d3ef5f2
|
refs/heads/master
| 2023-02-16T11:22:46.398444
| 2019-02-09T15:57:45
| 2019-02-09T15:57:45
| 321,680,275
| 0
| 0
| null | 2020-12-15T13:43:47
| 2020-12-15T13:43:46
| null |
UTF-8
|
Python
| false
| false
| 3,992
|
py
|
import os
import numpy as np
import sys
import torch
import torch.nn
import torch.nn.functional as F
from torch.autograd import Variable
def get_nli_model(
nli_code_path, nli_pickle_path, glove_path, word_list, verbose=True):
assert os.path.exists(nli_code_path)
sys.path += [nli_code_path]
if verbose:
print("Loading NLI..")
nli_net = torch.load(
nli_pickle_path,
map_location=lambda storage, loc: storage
)
sys.path = sys.path[:-1]
nli_net.encoder.set_glove_path(glove_path)
# the argument is word_dict, but it just needs an iterator
nli_net.encoder.word_vec = nli_net.encoder.get_glove(
word_list, verbose=verbose)
nli_net = nli_net.cuda()
for param in nli_net.parameters():
param.requires_grad = False
if verbose:
print("Done Loading NLI")
return nli_net
def get_nli_loss(gs_onehot, gs_lengths, target_ids, target_lengths,
nli_model, encoder, word_embeddings, device):
batch_size = gs_onehot.shape[0]
pred_embeddings = encoder.embed_onehot(
onehot=gs_onehot,
word_embeddings=word_embeddings,
include_special=False,
)
true_embeddings = encoder.embed_ids(
ids=target_ids,
word_embeddings=word_embeddings,
include_special=False,
)
nli_logprobs = nli_model(
(pred_embeddings.transpose(0, 1), np.array(gs_lengths)),
(true_embeddings.transpose(0, 1), np.array(target_lengths)),
)
nli_loss = torch.nn.NLLLoss()(
nli_logprobs,
Variable(device(torch.LongTensor([0]*batch_size)))
)
return nli_loss, nli_logprobs
def resolve_nli_model(nli_code_path, nli_pickle_path, glove_path, word_list,
nli_loss_multiplier, init_decoder_with_nli, device):
if nli_loss_multiplier or init_decoder_with_nli:
return device(get_nli_model(
nli_code_path=nli_code_path,
nli_pickle_path=nli_pickle_path,
glove_path=glove_path,
word_list=word_list,
))
else:
return None
def resolve_nli_mapper(init_decoder_with_nli, nli_model, hidden_size,
nli_mapper_mode, rnn_type):
if init_decoder_with_nli:
if nli_mapper_mode == 0:
if rnn_type == "gru":
mapper_class = NLIMapper
elif rnn_type == "lstm":
mapper_class = LSTMNLIMapper
else:
raise KeyError(f"Rnn type {rnn_type} not handled")
nli_mapper = mapper_class(
nli_model.enc_lstm_dim * 2,
hidden_size,
)
else:
raise KeyError("Mapping mode not implemented/deprecated")
return nli_mapper
else:
return None
class NLIMapper(torch.nn.Module):
def __init__(self, nli_dim, hidden_size):
super(NLIMapper, self).__init__()
self.nli_dim = nli_dim
self.hidden_size = hidden_size
self.nli_output_dim = hidden_size
self.linear = torch.nn.Linear(
self.nli_dim + self.hidden_size,
self.nli_output_dim,
)
def forward(self, encoder_hidden, infersent_input):
infersent_repeated = infersent_input.expand(
encoder_hidden.shape[0], *infersent_input.shape)
nli_output = F.relu(self.linear(
torch.cat([encoder_hidden, infersent_repeated], dim=2))
)
return nli_output
class LSTMNLIMapper(torch.nn.Module):
def __init__(self, nli_dim, hidden_size):
super(LSTMNLIMapper, self).__init__()
self.nli_dim = nli_dim
self.hidden_size = hidden_size
self.h_mapper = NLIMapper(nli_dim, hidden_size)
self.c_mapper = NLIMapper(nli_dim, hidden_size)
def forward(self, encoder_hidden, infersent_input):
h, c = encoder_hidden
return (
self.h_mapper(h, infersent_input),
self.h_mapper(c, infersent_input),
)
|
[
"email@jasonphang.com"
] |
email@jasonphang.com
|
941bd6352a9145867cac9e526b61a89c8c16ed7a
|
60dc372f5964737b8463db7eda386017098e923d
|
/assets/pre-commit/setup.py
|
b6fc2648ccc60273b1f458cd7f0fddf9157666ab
|
[] |
no_license
|
jambo6/scripts
|
612e1debc9a4266dc2680b93927a883cf456e4e2
|
cec34d096da924c39bc6a346e7433691b8288f3a
|
refs/heads/main
| 2023-02-27T03:42:37.346527
| 2021-01-31T18:08:12
| 2021-01-31T18:08:12
| 334,722,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
import setuptools
with open("../requirements.txt") as f:
required = f.read().splitlines()
setuptools.setup(
name="sacredex",
version="0.0.1",
description="Custom functions for working with sacred for ML projects.",
url="https://github.com/jambo6/sacredex",
author="James Morrill",
author_email="james.morrill.6@gmail.com",
packages=setuptools.find_packages(),
install_requires=required,
)
|
[
"james.morrill.6@gmail.com"
] |
james.morrill.6@gmail.com
|
7810a51ec0777c0f5663369a7489015114f390a0
|
924750bdb72b9a6746fc807acd0ac665aca54b08
|
/CH7_User_Input_And_While_Loops/7-5_Movie_Tickets_2.py
|
e380fe07bd14ca5b96ef8f743ed2a8cdf3538a2b
|
[] |
no_license
|
jiez1812/Python_Crash_Course
|
0a8ad80ddaa6bd14f2ca8d26029b9556584d13b5
|
e460d9ccf9610ea3e306137789e36fc1f2e8a5f9
|
refs/heads/master
| 2021-10-27T06:38:47.185228
| 2019-04-16T17:03:23
| 2019-04-16T17:03:23
| 103,537,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
prompt = "Please tell us your age for price of ticket."
prompt += "\n(Enter 'quit' to exit)\nAge: "
active = True
while active:
ans = input(prompt)
if ans == 'quit':
active = False
print('Thank you for enquiry.\n')
elif not ans.isdecimal():
print('Please insert valid input.\n')
else:
age = int(ans)
if 0 <= age < 3 :
print('The ticket is free\n')
elif 3 <= age <= 12 :
print('The ticket is $10\n')
elif age >= 12 :
print('The ticket is $15\n')
else:
print('Please enter valid age\n')
|
[
"jieztee@gmail.com"
] |
jieztee@gmail.com
|
19709ed76a6566e830d9d2f769db316d2ce51e0c
|
0a1f8957a798006deaa53d10d09f733fab1e6b05
|
/analysis_tools/ExamplePython/output/scripts/python_sample.py
|
bd91218667520b112670e9a9b42c185513b831ea
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
metamorph-inc/meta-core
|
a89504ccb1ed2f97cc6e792ba52e3a6df349efef
|
bc7a05e04c7901f477fe553c59e478a837116d92
|
refs/heads/master
| 2023-03-07T02:52:57.262506
| 2023-03-01T18:49:49
| 2023-03-01T18:49:49
| 40,361,476
| 25
| 15
|
NOASSERTION
| 2023-01-13T16:54:30
| 2015-08-07T13:21:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,295
|
py
|
import glob
import logging
from optparse import OptionParser
import os
import sys
import json
from lxml import etree
from collections import Counter
# ------------------ setting up logger ------------------------------
# create logger with 'python_sample_analysis_tool'
logger = logging.getLogger('python_sample_analysis_tool')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
if not os.path.isdir('log'):
os.mkdir('log')
fh = logging.FileHandler(os.path.join('log', 'python_sample_analysis_tool.log'))
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
# ------------------ functionality implementation ------------------
# find adm file/files
def transform_adm(adm_file):
# FIXME: check if file exists
# read adm file
tree = etree.parse(adm_file)
logger.debug("transforming: %s", adm_file)
num_components = 0
component_classes = Counter()
# get all component instances at every level
for component in tree.findall('.//ComponentInstance'):
num_components += 1
component_classes[component.get('ComponentID')] += 1
return {
'count_components': num_components,
'component_classes': component_classes
}
# perform analysis
def count_components(adm_json):
return adm_json['count_components']
def bill_of_materials(adm_json):
return {'number_of_component_classes': len(adm_json['component_classes'].keys())}
# report results
def update_manifest_json(metric_name, value):
with open('testbench_manifest.json', 'r') as f_p:
test_bench_manifest = json.load(f_p)
found = False
for metric in test_bench_manifest['Metrics']:
if metric['Name'] == metric_name:
metric['Value'] = str(value)
found = True
break
if found:
with open('testbench_manifest.json', 'w') as f_p:
json.dump(test_bench_manifest, f_p)
logger.info('Metric was found and updated %s, %s', metric_name, str(value))
return 0
else:
logger.error('Metric was NOT found %s, %s', metric_name, str(value))
return 1
# ------------------ initialize variables --------------------------
def main():
parser = OptionParser()
parser.add_option("-a", "--adm", type='string', action="store",
help='AVM design model file. If it is not defined, runs on the first ADM file in the directory.')
parser.add_option("-b", "--bom", action="store_true", default=False,
help='Exports bill of material component list.')
parser.add_option("-c", "--components", action="store_true", default=False,
help='Counts number of components used in the design.')
(opts, args) = parser.parse_args()
working_dir = os.getcwd()
logger.debug("working directory: %s", working_dir)
if opts.adm:
adm_file = opts.adm
else:
adm_file = glob.glob('*.adm')[0]
logger.debug("given adm file: %s", adm_file)
adm_data = transform_adm(adm_file)
with open('adm_analysis_results.json', 'w') as f_p:
json.dump(adm_data, f_p)
if opts.bom:
logger.info("reporting bill of material")
bom = bill_of_materials(adm_data)
return update_manifest_json('NumberOfComponentClasses', bom['number_of_component_classes'])
if opts.components:
logger.info("reporting number of components")
number_of_components = count_components(adm_data)
return update_manifest_json('NumberOfComponents', number_of_components)
parser.print_help()
parser.error("Incorrect number of arguments --bom or --components has to be defined. BOM has precedence.")
# ------------------ call main -------------------------------------
if __name__ == '__main__':
sys.exit(main())
|
[
"kevin.m.smyth@gmail.com"
] |
kevin.m.smyth@gmail.com
|
043fa102e3eab792ac475330290ce4b64af9b2b7
|
f3d75509d63297588227b7652462bb95bfed3c52
|
/les_9_task_1.py
|
34108af3cf38d8839641192ae84634ff579a0c09
|
[] |
no_license
|
snlnrush/python-alg-and-data
|
220faa912ab1ce7ad53c62aa905d792ef4c5d280
|
a4a3d20e46dc48bf394733e7cf6dccabdcf9bed4
|
refs/heads/master
| 2022-06-17T01:59:26.439289
| 2020-05-08T10:19:08
| 2020-05-08T10:19:08
| 260,943,073
| 0
| 0
| null | 2020-05-07T15:02:13
| 2020-05-03T14:30:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,627
|
py
|
"""
1. Определение количества различных подстрок с использованием хеш-функции. Пусть на вход функции дана строка.
Требуется вернуть количество различных подстрок в этой строке.
Примечания:
* в сумму не включаем пустую строку и строку целиком;
* задача считается решённой, если в коде использована функция вычисления хеша (hash(),
sha1() или любая другая из модуля hashlib)
"""
import hashlib
from itertools import combinations
def options(phrase: str) -> set:
"""
Функция вычисляет все возможные уникальные варианты и сочетания подстрок.
"""
assert len(phrase) > 0, 'Пустая строка!'
if len(phrase) == 1:
return set(phrase)
options_set = set()
for i in range(1, len(phrase)):
options_set.update(combinations(phrase, i)) # комбинируем все варианты подстрок, уникализируем их во множестве.
return options_set
def hash_check(phrase: str, subs: set) -> tuple:
"""
Функция проверяет наличие подстроки в строке с помощью хеш-функции и при наличии суммирует.
"""
count = 0
list_subs = []
for sub_item in subs:
sub_item_hash = hashlib.sha1(''.join(sub_item).encode('utf-8')).hexdigest()
for i in range(len(phrase) - len(sub_item) + 1):
if sub_item_hash == hashlib.sha1(phrase[i: i + len(sub_item)].encode('utf-8')).hexdigest():
count += 1
list_subs.append(''.join(sub_item))
break
return count, list_subs
phrase = input('Введите исследуему строку:')
print('\nИсследуемая строка:\n', phrase)
result = hash_check(phrase, options(phrase))
print('\nКоличество различных подстрок в строке:', result[0])
print('\nВсе различные уникальные подстроки в строке:\n', result[1])
"""
Пример работы программы
Исследуемая строка:
papa
Количество различных подстрок в строке: 6
Все различные уникальные подстроки в строке:
['pap', 'p', 'pa', 'apa', 'a', 'ap']
"""
|
[
"cross-fire@list.ru"
] |
cross-fire@list.ru
|
5cbaa3a0c17afdda242c582e4d10e2db771fe03d
|
88f7d5eaeb39a5ee83f90a2f55af2c3eb5135147
|
/fastai/callback/mixup.py
|
64ae6656a6d873c6aca2c1097d09c19233777817
|
[
"Apache-2.0"
] |
permissive
|
abcp4/fastai
|
7671f488cc6f969a2594e22be94bb6d3d3172504
|
769ba85d789905d68953daba6c3b52d4e9b2359b
|
refs/heads/main
| 2023-02-15T04:47:15.850298
| 2021-01-02T14:42:32
| 2021-01-02T14:42:32
| 321,826,680
| 0
| 0
|
Apache-2.0
| 2020-12-16T00:46:10
| 2020-12-16T00:46:09
| null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/19_callback.mixup.ipynb (unless otherwise specified).
__all__ = ['reduce_loss', 'MixUp']
# Cell
from ..basics import *
from .progress import *
from ..vision.core import *
from ..vision.models.xresnet import *
from torch.distributions.beta import Beta
# Cell
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction=='mean' else loss.sum() if reduction=='sum' else loss
# Cell
class MixUp(Callback):
run_after,run_valid = [Normalize],False
def __init__(self, alpha=0.4): self.distrib = Beta(tensor(alpha), tensor(alpha))
def before_fit(self):
self.stack_y = getattr(self.learn.loss_func, 'y_int', False)
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.lf
def after_fit(self):
if self.stack_y: self.learn.loss_func = self.old_lf
def before_batch(self):
lam = self.distrib.sample((self.y.size(0),)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam], 1)
self.lam = lam.max(1)[0]
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
self.learn.xb = tuple(L(xb1,self.xb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=nx_dims-1)))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
def lf(self, pred, *yb):
if not self.training: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
|
[
"noreply@github.com"
] |
abcp4.noreply@github.com
|
976bdff97c1361b41eb3808f0f6cb9e95a95cfab
|
f3b5c4a5ce869dee94c3dfa8d110bab1b4be698b
|
/controller/src/vnsw/agent/uve/cpuinfo.py
|
80d92022e419afb85b9029f67a97f308f6fec132
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
pan2za/ctrl
|
8f808fb4da117fce346ff3d54f80b4e3d6b86b52
|
1d49df03ec4577b014b7d7ef2557d76e795f6a1c
|
refs/heads/master
| 2021-01-22T23:16:48.002959
| 2015-06-17T06:13:36
| 2015-06-17T06:13:36
| 37,454,161
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import os
import psutil
from vrouter.cpuinfo.ttypes import *
class CpuInfoData(object):
def __init__(self):
self._process = psutil.Process(os.getpid())
self._num_cpu = 0
#end __init__
def _get_num_cpu(self):
return psutil.NUM_CPUS
#end _get_num_cpu
def _get_sys_mem_info(self):
phymem_info = psutil.phymem_usage()
sys_mem_info = SysMemInfo()
sys_mem_info.total = phymem_info[0]/1024
sys_mem_info.used = phymem_info[1]/1024
sys_mem_info.free = phymem_info[2]/1024
return sys_mem_info
#end _get_sys_mem_info
def _get_mem_info(self):
mem_info = MemInfo()
mem_info.virt = self._process.get_memory_info().vms/1024
mem_info.peakvirt = mem_info.virt
mem_info.res = self._process.get_memory_info().rss/1024
return mem_info
#end _get_mem_info
def _get_cpu_load_avg(self):
load_avg = os.getloadavg()
cpu_load_avg = CpuLoadAvg()
cpu_load_avg.one_min_avg = load_avg[0]
cpu_load_avg.five_min_avg = load_avg[1]
cpu_load_avg.fifteen_min_avg = load_avg[2]
#end _get_cpu_load_avg
def _get_cpu_share(self):
cpu_percent = self._process.get_cpu_percent(interval=0.1)
return cpu_percent/self._get_num_cpu()
#end _get_cpu_share
def get_cpu_info(self, system=True):
cpu_info = CpuLoadInfo()
num_cpu = self._get_num_cpu()
if self._num_cpu != num_cpu:
self._num_cpu = num_cpu
cpu_info.num_cpu = num_cpu
if system:
cpu_info.sys_mem_info = self._get_sys_mem_info()
cpu_info.cpuload = self._get_cpu_load_avg()
cpu_info.meminfo = self._get_mem_info()
cpu_info.cpu_share = self._get_cpu_share()
return cpu_info
#end get_cpu_info
#end class CpuInfoData
|
[
"pan2za@live.com"
] |
pan2za@live.com
|
743856619a6a9fc5acc3233b6ab7ab96997f0904
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/103/usersdata/162/50409/submittedfiles/av1_3.py
|
5aa313f32026797e5c578a1fe5530ca39d1c1606
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
# -*- coding: utf-8 -*-
import math
a=int(input('Digite o primeiro número:'))
b=int(input('Digite o segundo número:'))
ant=a
atual=b
cont=1
resto=ant%atual
while resto!=0:
post=atual
atual=resto
cont=cont+1
if post%atual==0:
print(atual)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
10130ff786d81789a85eb06a31e9cd1149eae79c
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/problem/party/php.py
|
95b563f2a716c46065800645e6ab63e4b5ad8439
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
using System;
using System.Net;
using System.Net.Http;
using System.Threading.Tasks;
using Microsoft.Translator.API;
namespace CSharp_TranslateSample
{
public class Program
{
private const string SubscriptionKey = "efc1a84836c7f3db9a730df44241ab30"; //Enter here the Key from your Microsoft Translator Text subscription on http://portal.azure.com
public static string traducida;
public static void Main(string[] args)
{
//TranslateAsync().Wait();
//Console.ReadKey();
}
public static void iniciar() {
TranslateAsync().Wait();
Console.ReadKey();
}
/// Demonstrates getting an access token and using the token to translate.
private static async Task TranslateAsync()
{
var translatorService = new TranslatorService.LanguageServiceClient();
var authTokenSource = new AzureAuthToken(SubscriptionKey);
var token = string.Empty;
try
{
token = await authTokenSource.GetAccessTokenAsync();
}
catch (HttpRequestException)
{
switch (authTokenSource.RequestStatusCode)
{
case HttpStatusCode.Unauthorized:
Console.WriteLine("Request to token service is not authorized (401). Check that the Azure subscription key is valid.");
break;
case HttpStatusCode.Forbidden:
Console.WriteLine("Request to token service is not authorized (403). For accounts in the free-tier, check that the account quota is not exceeded.");
break;
}
throw;
}
traducida = translatorService.Translate(token, "Hello World", "en", "fr", "text/plain", "general", string.Empty);
//Console.WriteLine("Translated to French: {0}", translatorService.Translate(token, "Hello World", "en", "fr", "text/plain", "general", string.Empty));
}
}
}
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
76ed5acab4d05e27f6421542aa707aaf3fd5d882
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/140_gui/pyqt_pyside/examples/PyQt5_From_A-Z/grid_layout_finished.py
|
85447004de92d82364cad5e37cd54c06fd797271
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
import sys
from PyQt5.QtWidgets import *
class DlgMain(QDialog):
def __init__(self):
super().__init__()
self.setWindowTitle("My GUI")
###### Create Widgets
self.btn0 = QPushButton("0")
self.btn1 = QPushButton("1")
self.btn2 = QPushButton("2")
self.btn3 = QPushButton("3")
self.btn4 = QPushButton("4")
self.btn5 = QPushButton("5")
self.btn6 = QPushButton("6")
self.btn7 = QPushButton("7")
self.btn8 = QPushButton("8")
self.btn9 = QPushButton("9")
self.btnCalc = QPushButton("Calculate")
self.setupLayout()
def setupLayout(self):
###### Setup Layout
self.mainLayout = QGridLayout()
self.mainLayout.addWidget(self.btn1, 4, 0)
self.mainLayout.addWidget(self.btn2, 4, 1)
self.mainLayout.addWidget(self.btn3, 4, 2)
self.mainLayout.addWidget(self.btn4, 3, 0)
self.mainLayout.addWidget(self.btn5, 3, 1)
self.mainLayout.addWidget(self.btn6, 3, 2)
self.mainLayout.addWidget(self.btn7, 2, 0)
self.mainLayout.addWidget(self.btn8, 2, 1)
self.mainLayout.addWidget(self.btn9, 2, 2)
self.mainLayout.addWidget(self.btn0, 5, 1)
self.mainLayout.addWidget(self.btnCalc, 0, 0, 1, 3)
self.setLayout(self.mainLayout)
if __name__ == "__main__":
app = QApplication(sys.argv)
dlgMain = DlgMain()
dlgMain.show()
sys.exit(app.exec_())
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
4f66a53c65fec5b4196f05e07ca1ccfc1d230bdd
|
b31eceb853c456a65bf69a2768746e452080e52b
|
/models.py
|
6191e81c9607e37c9acb37b4ef1e1cfec22569c2
|
[] |
no_license
|
100ballovby/RegistrationLoginProject
|
aa3c4c7e78e9051baa6fe5ee3a7caa582b7cee17
|
0326e60ef12b454b85ef53db600626d28e620264
|
refs/heads/master
| 2023-04-25T03:38:13.488835
| 2021-05-07T10:49:05
| 2021-05-07T10:49:05
| 356,241,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
from flask_login import UserMixin
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
|
[
"greatraksin@icloud.com"
] |
greatraksin@icloud.com
|
ac2af978c046a708f98bd9a243417cce387b618b
|
d1187000cb4adf677cb255243b25bd78cbf1dbd1
|
/bin/netconf.py
|
ecfd21c256afda2487d8cf38351f00f7783d1c6f
|
[] |
no_license
|
kgrozis/netconf
|
f87dadbb3082f69c4bad41f10c64224c135d1958
|
997be27a6b74f3ded5ec9a57fde2d227d1b67a70
|
refs/heads/master
| 2021-01-09T20:26:44.481551
| 2017-01-03T19:28:40
| 2017-01-03T19:28:40
| 60,799,673
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,416
|
py
|
#!/usr/bin/python
# coding=utf-8
from ncclient import manager
import logging, sys, os
from urlparse import parse_qs
def nc_connect(host, port, user, password):
return manager.connect(host=host,
port=port,
username=user,
password=password,
device_params=None,
allow_agent=False,
look_for_keys=False,
timeout=30
)
def parse_hello(connection):
capabilities = []
for capability in connection.server_capabilities:
if capability.startswith('http'):
for key in parse_qs(capability):
if key.startswith('http'):
capabilities.append(' YANG --> ' + parse_qs(capability)[key][0])
else:
capability = capability.split(':')
if capability[-1].startswith('1.'):
capabilities.append(' ' + capability[1].upper() + ' --> ' + capability[-2] + ' ' + capability[-1])
else:
capabilities.append(' ' + capability[1].upper() + ' --> ' + capability[-1].split('?')[0])
capabilities.sort()
return connection.session_id, capabilities
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('NETCONF')
#Variables
host = sys.argv[1]
#host = '172.16.159.113'
port = 22
user = os.getenv("USER")
passwd = 'cisco'
port = 830
#Create Connection
connection = nc_connect(host, port, user, passwd)
id, capabilities = parse_hello(connection)
#Session ID
print 'Session ID: ', id
print
#Capabilities
print 'Capabilities: '
for capability in capabilities:
print capability
'''
Optional Base Capabilities (RFC 6241)
url – the URL data store is supported (scheme=http, ftp, file, …)
startup – the startup data store is supported
writable-running – the running data store can be modified directly
Non-base capabilities
notification – Netconf asynchronous event messages (RFC 5277), also with interleave
'''
'''
Config on Router (IOS XR 5.3.0)
ssh server netconf port 830
netconf-yang agent ssh
'''
'''
USE:
./netconfxr.py host
assumes port 830, user same as OS, and password cisco
EXAMPLES:
a)
$ python netconfxr.py 172.16.159.113
Session ID: 4895
Capabilities:
IETF --> base 1.1
IETF --> candidate 1.0
IETF --> ietf-inet-types
IETF --> ietf-netconf-monitoring
IETF --> ietf-yang-types
IETF --> rollback-on-error 1.0
IETF --> validate 1.1
YANG --> Cisco-IOS-XR-cdp-cfg
YANG --> Cisco-IOS-XR-cdp-oper
YANG --> Cisco-IOS-XR-crypto-sam-cfg
YANG --> Cisco-IOS-XR-crypto-sam-oper
YANG --> Cisco-IOS-XR-ha-eem-cfg
YANG --> Cisco-IOS-XR-ha-eem-oper
YANG --> Cisco-IOS-XR-ifmgr-cfg
YANG --> Cisco-IOS-XR-ifmgr-oper
YANG --> Cisco-IOS-XR-infra-infra-cfg
YANG --> Cisco-IOS-XR-ip-domain-cfg
YANG --> Cisco-IOS-XR-ip-domain-oper
YANG --> Cisco-IOS-XR-ip-iarm-datatypes
YANG --> Cisco-IOS-XR-ipv4-io-cfg
YANG --> Cisco-IOS-XR-ipv4-io-oper
YANG --> Cisco-IOS-XR-ipv4-ma-cfg
YANG --> Cisco-IOS-XR-ipv4-ma-oper
YANG --> Cisco-IOS-XR-ipv6-ma-cfg
YANG --> Cisco-IOS-XR-ipv6-ma-oper
YANG --> Cisco-IOS-XR-lib-keychain-cfg
YANG --> Cisco-IOS-XR-lib-keychain-oper
YANG --> Cisco-IOS-XR-man-netconf-cfg
YANG --> Cisco-IOS-XR-man-xml-ttyagent-cfg
YANG --> Cisco-IOS-XR-man-xml-ttyagent-oper
YANG --> Cisco-IOS-XR-parser-cfg
YANG --> Cisco-IOS-XR-qos-ma-oper
YANG --> Cisco-IOS-XR-rgmgr-cfg
YANG --> Cisco-IOS-XR-rgmgr-oper
YANG --> Cisco-IOS-XR-shellutil-cfg
YANG --> Cisco-IOS-XR-shellutil-oper
YANG --> Cisco-IOS-XR-tty-management-cfg
YANG --> Cisco-IOS-XR-tty-management-datatypes
YANG --> Cisco-IOS-XR-tty-management-oper
YANG --> Cisco-IOS-XR-tty-server-cfg
YANG --> Cisco-IOS-XR-tty-server-oper
YANG --> Cisco-IOS-XR-tty-vty-cfg
YANG --> Cisco-IOS-XR-types
$
'''
|
[
"kgrozis@mac.com"
] |
kgrozis@mac.com
|
9e7e5d9e60c715860dca0cb062bd1beb0810e9ae
|
e7ce273f404f82fd8672c97e50b386509c8f9870
|
/Web/blog/base/tests/models.py
|
6730e6c8a65ccd308f2cb035991a5ace35484805
|
[] |
no_license
|
rzlatkov/Softuni
|
3edca300f8ecdcfd86e332557712e17552bc91c3
|
a494e35bff965b2b9dccc90e1381d5a1a23737a1
|
refs/heads/main
| 2023-07-02T12:49:59.737043
| 2021-08-13T20:47:07
| 2021-08-13T20:47:07
| 319,088,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
from django.contrib.auth.models import User
from django.test import TestCase
from base.models import Post, Category, Comment, Profile
class TestPostModel(TestCase):
def test_post_model(self):
user = User.objects.create_user(username='john', password='adidas1234')
category = Category.objects.create(name='Automotive')
post = Post.objects.create(title='BMW', author=user, snippet='vrumvrum')
post.category.add(category)
self.assertEqual(post.author.pk, user.pk)
self.assertEqual(post.author.username, user.username)
class TestCategoryModel(TestCase):
def test_category_model(self):
category = Category.objects.create(name='Automotive')
qs = Category.objects.first()
self.assertEqual(qs.name, category.name)
class TestCommentModel(TestCase):
def test_comment_model(self):
user = User.objects.create_user(username='john', password='adidas1234')
category = Category.objects.create(name='Automotive')
post = Post.objects.create(title='BMW', author=user, snippet='vrumvrum')
post.category.add(category)
comment = Comment.objects.create(author=user, name='asdasd', content='test', post=post)
self.assertEqual(comment.post.pk, post.pk)
self.assertEqual(comment.author.pk, user.pk)
cat = post.category.first()
self.assertEqual(cat.name, category.name)
class TestProfileModel(TestCase):
def test_profile_upon_user_creation(self):
user = User.objects.create_user(username='john', password='adidas1234')
user_profile = user.profile
profile = Profile.objects.first()
self.assertEqual(profile.pk, user.pk)
self.assertEqual(user_profile.pk, profile.pk)
self.assertEqual('john', profile.user.username)
|
[
"nozzller@gmail.com"
] |
nozzller@gmail.com
|
a47f5b70b9545ca2cf8a5bd1e460b160ec0f7cec
|
033da72a51c76e5510a06be93229a547a538cf28
|
/Data Engineer with Python Track/25. Introduction to MongoDB in Python/Chapter/03. Get Only What You Need, and Fast/12-Pages of particle-prized people.py
|
dcb8dae05b4f7a5f7c7953cd5120b3f5217b52e1
|
[] |
no_license
|
ikhwan1366/Datacamp
|
d5dcd40c1bfeb04248977014260936b1fb1d3065
|
7738614eaebec446842d89177ae2bc30ab0f2551
|
refs/heads/master
| 2023-03-06T13:41:06.522721
| 2021-02-17T22:41:54
| 2021-02-17T22:41:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
'''
Pages of particle-prized people
You and a friend want to set up a website that gives information on Nobel laureates with awards relating to particle phenomena. You want to present these laureates one page at a time, with three laureates per page. You decide to order the laureates chronologically by award year. When there is a "tie" in ordering (i.e. two laureates were awarded prizes in the same year), you want to order them alphabetically by surname.
Instructions
100 XP
- Complete the function get_particle_laureates that, given page_number and page_size, retrieves a given page of prize data on laureates who have the word "particle" (use $regex) in their prize motivations ("prizes.motivation"). Sort laureates first by ascending "prizes.year" and next by ascending "surname".
- Collect and save the first nine pages of laureate data to pages.
'''
from pprint import pprint
# Write a function to retrieve a page of data
def get_particle_laureates(page_number=1, page_size=3):
if page_number < 1 or not isinstance(page_number, int):
raise ValueError("Pages are natural numbers (starting from 1).")
particle_laureates = list(
db.laureates.find(
{'prizes.motivation': {'$regex': "particle"}},
["firstname", "surname", "prizes"])
.sort([('prizes.year', 1), ('surname', 1)])
.skip(page_size * (page_number - 1))
.limit(page_size))
return particle_laureates
# Collect and save the first nine pages
pages = [get_particle_laureates(page_number=page) for page in range(1, 9)]
pprint(pages[0])
|
[
"surel.chandrapratama@gmail.com"
] |
surel.chandrapratama@gmail.com
|
f5b79e3925b4be962a38cf717bbb9dd8846ce909
|
945d9e4746b547eea98033d965096d9486e79875
|
/plotting.py
|
6805411c34402405cd62f21b97f60a59fdebf428
|
[] |
no_license
|
VivekVinushanth/flask-website
|
f0d7d277c51c4500aec6dba4aac119faf9a7a9ab
|
763f546e0d539d8db63fd328f2674c39a6385bae
|
refs/heads/master
| 2022-01-25T20:32:27.371569
| 2019-05-30T09:48:26
| 2019-05-30T09:48:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,542
|
py
|
import pandas as pd
from plotnine import *
def plot(time_stamps,type):
try:
# read data
data = pd.read_csv("OutputDTM.csv")
#time_stamps = ['May']
#type='Month'
value=time_stamps[0]
if(type=="Month"):
nudege_x=[0,0,-0.4,0.5,-0.4,0,0.2,0.7,0.2,0.5,0.5,0.9,0.9]
else:
nudege_x = [0 for i in time_stamps]
fig=(
ggplot(data,aes(x=type,y='Word'))
+ geom_tile(aes(fill = 'Probability'), colour="black",stat = "identity")
+ scale_fill_gradient(low="white", high="blue")
+ facet_wrap('~ TopicID',scales="free_y", ncol=5)
+ geom_text(data.loc[data.loc[:,type]==value],aes(label='Word'), size=9,nudge_x=nudege_x[len(time_stamps)])
+ theme_bw()
+ theme(panel_spacing=.75)
+ theme(panel_grid_major =element_blank(), legend_position="bottom", panel_grid_minor = element_blank())
+ theme(axis_ticks= element_blank(), axis_text_y = element_blank(), axis_title_x = element_blank(), axis_title_y = element_blank(), axis_text_x = element_text(angle=60, vjust=0.1, hjust=0.1, size=5), strip_background = element_blank(), strip_text = element_text(size=7), legend_text = element_text(size=4), legend_title = element_text(size=4), plot_margin = 0.1, legend_margin = -0.6, legend_key_height = 0.4)
)
fig
fig.save(filename="03.png" ,path='./static/',format='png')
return True
except Exception:
return False
|
[
"aathmant@gmail.com"
] |
aathmant@gmail.com
|
9e7a337d67a984ae180402e72a49623e4d1bf731
|
5977adc1f60df46c88f3b33d3d11455577cd4b94
|
/tsn/model/norm_helper.py
|
18b5a90f44c7e5e0cbdf910321f72e896ca72a88
|
[
"Apache-2.0"
] |
permissive
|
ttykelly/TSN
|
ad551f033912df6c7683865829b5d00a18284018
|
ec6ad668d20f477df44eab7035e2553d95a835f3
|
refs/heads/master
| 2023-03-16T03:03:42.393558
| 2021-01-26T07:32:26
| 2021-01-26T07:32:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
# -*- coding: utf-8 -*-
"""
@date: 2020/9/23 下午2:35
@file: norm_helper.py
@author: zj
@description:
"""
import torch.nn as nn
from functools import partial
from .layers.group_norm_wrapper import GroupNormWrapper
def convert_sync_bn(model, process_group):
sync_bn_module = nn.SyncBatchNorm.convert_sync_batchnorm(model, process_group)
return sync_bn_module
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.MODEL.NORM.TYPE == "BatchNorm2d":
return nn.BatchNorm2d
elif cfg.MODEL.NORM.TYPE == "GroupNorm":
num_groups = cfg.MODEL.NORM.GROUPS
return partial(GroupNormWrapper, num_groups=num_groups)
else:
raise NotImplementedError(
"Norm type {} is not supported".format(cfg.MODEL.NORM.TYPE)
)
def freezing_bn(model, partial_bn=False):
count = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
count += 1
if count == 1 and partial_bn:
continue
m.eval()
# shutdown update in frozen mode
m.weight.requires_grad = False
m.bias.requires_grad = False
|
[
"wy163zhuj@163.com"
] |
wy163zhuj@163.com
|
aba46b99d2069ceec20b4492cd753a493b738309
|
f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41
|
/test/test_event_custom_field_item.py
|
dfb969bd33f6b4d981d2ad3cd0c1ee3115ba5a41
|
[] |
no_license
|
CalPolyResDev/StarRezAPI
|
012fb8351159f96a81352d6c7bfa36cd2d7df13c
|
b184e1863c37ff4fcf7a05509ad8ea8ba825b367
|
refs/heads/master
| 2021-01-25T10:29:37.966602
| 2018-03-15T01:01:35
| 2018-03-15T01:01:35
| 123,355,501
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
# coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: resdev@calpoly.edu
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.event_custom_field_item import EventCustomFieldItem # noqa: E501
from starrez_client.rest import ApiException
class TestEventCustomFieldItem(unittest.TestCase):
"""EventCustomFieldItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEventCustomFieldItem(self):
"""Test EventCustomFieldItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.event_custom_field_item.EventCustomFieldItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"fedorareis@gmail.com"
] |
fedorareis@gmail.com
|
f3fd59897c789d4bf7076e6b748acfca3934eeb9
|
f4f54015298eedfbbdfcaaf5e2a9603112f803a5
|
/New_morning_batch/mod/prime_no.py~
|
0dd10feec5df0cc84c4f6361bbeb8d65ad145830
|
[] |
no_license
|
raviramawat8/Old_Python_Codes
|
f61e19bff46856fda230a096aa789c7e54bd97ca
|
f940aed0611b0636e1a1b6826fa009ceb2473c2b
|
refs/heads/master
| 2020-03-22T22:54:50.964816
| 2018-06-16T01:39:43
| 2018-06-16T01:39:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
"""This module contains a function called prime.prime function takes one argument as a int number and return True if no is prime else return false."""
def prime(num):
"""It will return True if num is prime else it will return False."""
for var in range(2,num//2):
if num % var == 0 :
flag = False
break
else :
flag = True
if flag == True :
return True
else :
return False
if __name__ == "__main__" :
print(prime(int(input("Enter a no. - ").strip())))
print("Hello")
print("hi")
|
[
"sachinyadav3496@gmail.com"
] |
sachinyadav3496@gmail.com
|
|
0ab22da9dd4c4bc2c29f7f4a1492d14d628ef17c
|
152370a70a0e99fe854a31dcde49c3966d53b3b8
|
/day9/word.py
|
f916f0014f14544f9a90338f90715ecc2609dae4
|
[] |
no_license
|
thesharpshooter/codeforce
|
c6fb93f14faa267d7af2cc61142b89c77d4e150b
|
e687696cc63245f3d3f399b38edabe8e6fdd25b3
|
refs/heads/master
| 2021-01-11T18:03:34.486823
| 2017-03-23T18:56:12
| 2017-03-23T18:56:12
| 79,480,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
string = raw_input()
lower_count = sum(map(str.islower,string))
upper_count = sum(map(str.isupper,string))
if lower_count < upper_count:
print string.upper()
else:
print string.lower()
|
[
"prakash9266@gmail.com"
] |
prakash9266@gmail.com
|
372ad8dfd0d414bbd320c485048ab9715efc3aa6
|
162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d
|
/examples/plot_changed_only_pprint_parameter.py
|
241dc3fd25daf68d0d39761e02f80423bf2b706e
|
[] |
no_license
|
testsleeekGithub/trex
|
2af21fa95f9372f153dbe91941a93937480f4e2f
|
9d27a9b44d814ede3996a37365d63814214260ae
|
refs/heads/master
| 2020-08-01T11:47:43.926750
| 2019-11-06T06:47:19
| 2019-11-06T06:47:19
| 210,987,245
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
"""
=================================
Compact estimator representations
=================================
This example illustrates the use of the print_changed_only global parameter.
Setting print_changed_only to True will alterate the representation of
estimators to only show the parameters that have been set to non-default
values. This can be used to have more compact representations.
"""
print(__doc__)
from mrex.linear_model import LogisticRegression
from mrex import set_config
lr = LogisticRegression(penalty='l1')
print('Default representation:')
print(lr)
# LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
# intercept_scaling=1, l1_ratio=None, max_iter=100,
# multi_class='auto', n_jobs=None, penalty='l1',
# random_state=None, solver='warn', tol=0.0001, verbose=0,
# warm_start=False)
set_config(print_changed_only=True)
print('\nWith changed_only option:')
print(lr)
# LogisticRegression(penalty='l1')
|
[
"shkolanovaya@gmail.com"
] |
shkolanovaya@gmail.com
|
6ae8c5d0f194a0fb7363bd2bf0d5f322656edd0d
|
c2588b904cae9b93b94866c3871baaa93935ea06
|
/src/deepwalk/__main__.py
|
b2827ba369d628653f6151a9b72104f149b83134
|
[] |
no_license
|
Xueping/word2vec
|
c0a46f7fe73e8cd21595c97d2aa7ccf1d540063b
|
b3c69889cdaf226d1b94897615d4fcfcb10b3cf2
|
refs/heads/master
| 2021-01-10T14:35:18.677983
| 2016-01-28T23:59:53
| 2016-01-28T23:59:53
| 46,965,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,623
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import logging
import random
import sys
from gensim.models import Word2Vec
import graph
from skipgram import Skipgram
import walks as serialized_walks
# p = psutil.Process(os.getpid())
#p.set_cpu_affinity(list(range(cpu_count())))
#p.cpu_affinity(list(range(cpu_count())))
logger = logging.getLogger(__name__)
LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s"
def debug(type_, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
sys.__excepthook__(type_, value, tb)
else:
import traceback
import pdb
traceback.print_exception(type_, value, tb)
print(u"\n")
pdb.pm()
def process(args):
if args.format == "adjlist":
G = graph.load_adjacencylist(args.input, undirected=args.undirected)
elif args.format == "edgelist":
G = graph.load_edgelist(args.input, undirected=args.undirected)
elif args.format == "mat":
G = graph.load_matfile(args.input, variable_name=args.matfile_variable_name, undirected=args.undirected)
else:
raise Exception("Unknown file format: '%s'. Valid formats: 'adjlist', 'edgelist', 'mat'" % args.format)
# G = graphConstruction.buildGraphAPA()
print("Number of nodes: {}".format(len(G.nodes())))
num_walks = len(G.nodes()) * args.number_walks
print("Number of walks: {}".format(num_walks))
data_size = num_walks * args.walk_length
print("Data size (walks*length): {}".format(data_size))
if data_size < args.max_memory_data_size:
print("Walking...")
walks = graph.build_deepwalk_corpus(G, num_paths=args.number_walks,
path_length=args.walk_length, alpha=0, rand=random.Random(args.seed))
print("Training...")
model = Word2Vec(walks, size=args.representation_size, window=args.window_size, min_count=0, workers=args.workers)
else:
print("Data size {} is larger than limit (max-memory-data-size: {}). Dumping walks to disk.".format(data_size, args.max_memory_data_size))
print("Walking...")
walks_filebase = args.output + ".walks"
walk_files = serialized_walks.write_walks_to_disk(G, walks_filebase, num_paths=args.number_walks,
path_length=args.walk_length, alpha=0, rand=random.Random(args.seed),
num_workers=args.workers)
print("Counting vertex frequency...")
if not args.vertex_freq_degree:
vertex_counts = serialized_walks.count_textfiles(walk_files, args.workers)
else:
# use degree distribution for frequency in tree
vertex_counts = G.degree(nodes=G.iterkeys())
print("Training...")
model = Skipgram(sentences=serialized_walks.combine_files_iter(walk_files), vocabulary_counts=vertex_counts,
size=args.representation_size,
window=args.window_size, min_count=0, workers=args.workers)
model.save_word2vec_format(args.output)
def main():
parser = ArgumentParser("deepwalk",
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument("--debug", dest="debug", action='store_true', default=False,
help="drop a debugger if an exception is raised.")
parser.add_argument('--format', default='adjlist',
help='File format of input file')
parser.add_argument('--input', nargs='?', required=True,
help='Input graph file')
parser.add_argument("-l", "--log", dest="log", default="INFO",
help="log verbosity level")
parser.add_argument('--matfile-variable-name', default='network',
help='variable name of adjacency matrix inside a .mat file.')
parser.add_argument('--max-memory-data-size', default=1000000000, type=int,
help='Size to start dumping walks to disk, instead of keeping them in memory.')
parser.add_argument('--number-walks', default=10, type=int,
help='Number of random walks to start at each node')
parser.add_argument('--output', required=True,
help='Output representation file')
parser.add_argument('--representation-size', default=64, type=int,
help='Number of latent dimensions to learn for each node.')
parser.add_argument('--seed', default=0, type=int,
help='Seed for random walk generator.')
parser.add_argument('--undirected', default=True, type=bool,
help='Treat graph as undirected.')
parser.add_argument('--vertex-freq-degree', default=False, action='store_true',
help='Use vertex degree to estimate the frequency of nodes '
'in the random walks. This option is faster than '
'calculating the vocabulary.')
parser.add_argument('--walk-length', default=10, type=int,
help='Length of the random walk started at each node')
parser.add_argument('--window-size', default=5, type=int,
help='Window size of skipgram model.')
parser.add_argument('--workers', default=1, type=int,
help='Number of parallel processes.')
args = parser.parse_args()
numeric_level = getattr(logging, args.log.upper(), None)
logging.basicConfig(format=LOGFORMAT)
logger.setLevel(numeric_level)
if args.debug:
sys.excepthook = debug
process(args)
if __name__ == "__main__":
sys.exit(main())
|
[
"pengxueping@gmail.com"
] |
pengxueping@gmail.com
|
6e50912db103808c586e8671afb6a75660485c45
|
01c3ff1d74e754e0d4ce0fb7f8a8b329ec3766e1
|
/python_exercises/19others/new.py
|
6dc3659c467f80557bb521229f703b1e4a9bb2ae
|
[] |
no_license
|
vineel2014/Pythonfiles
|
5ad0a2b824b5fd18289d21aa8306099aea22c202
|
0d653cb9659fe750cf676a70035ab67176179905
|
refs/heads/master
| 2020-04-28T03:56:22.713558
| 2019-03-11T08:38:54
| 2019-03-11T08:38:54
| 123,681,939
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
from tkinter import *
root = Tk()
button = Button(root, text="Click here", command=quit)
button.pack()
root.mainloop()
|
[
"vineel2006@gmail.com"
] |
vineel2006@gmail.com
|
c9dccbc6f437ccc40accc1a839db9e30904aebea
|
a9024ba9ef408317a06253af125d34454ac3fac8
|
/datawandcli/tests/parameters_test.py
|
7ac0a470046d492a83aeea17297ba1b052a217c4
|
[] |
no_license
|
ferencberes/datawand-cli
|
562ac624264d4ec78153bd47e3213946830a2168
|
26c33ec09f940ee1f94da930957df6c256b550b5
|
refs/heads/master
| 2023-01-30T18:20:55.143403
| 2020-12-14T12:07:07
| 2020-12-14T12:07:07
| 254,948,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,675
|
py
|
from datawandcli.components.objects import *
from datawandcli.parametrization import ConfigGenerator
from shutil import rmtree
import subprocess, os
os.chdir("examples/parameter_handling/")
def load_last_line(fp):
last_line = ""
with open(fp) as f:
for line in f:
last_line = line.rstrip()
return last_line
def check_process(p, fp):
if p.returncode != 0:
with open(fp):
print(fp.readlines())
def test_create_pipeline():
pipe = Pipeline("Trial")
mod = Module("resources/my_module.py", name="my_module")
nb = Notebook("resources/Sleep.ipynb", name="Sleep")
pys = PyScript("resources/sample.py", name="PySample")
pipe.add(mod)
pipe.add(nb)
pipe.add(pys)
pipe.add_dependencies("PySample",["Sleep"])
pipe.save()
print(pipe.config)
assert len(pipe.parts) == 3
### Demo 0 ###
# Only selected resources (clones) are executed as dependencies
def test_demo_0_init():
cg = ConfigGenerator("Trial.json", experiment_name="demo_0", experiment_dir="experiments/demo_0/")
DEFAULTS = {}
DEFAULTS["p1"] = 0.5
DEFAULTS["p3"] = "default"
DEFAULTS["sleep"] = 0
PARAMETERS = {}
for item in cg.pythonitem_names:
PARAMETERS[item] = []
PARAMETERS["PySample"].append({"p1":1.0,"p2":0.5})
cg.save_params(DEFAULTS, PARAMETERS, local_scheduler=True)
cg.pipeline.save()
assert len(cg.pipeline.parts) == 2
assert cg.pipeline.num_clones["PySample"] == 1
def test_demo_0_run():
fp = "experiments/demo_0/demo_0.log"
p = subprocess.Popen("bash demo_0.sh 1", cwd="experiments/demo_0/", stdout=open(fp, "w"), shell=True)
p_status = p.wait()
check_process(p, fp)
assert p.returncode == 0
with open(fp) as f:
output = f.read()
rmtree("experiments/demo_0/")
assert "PySample_CLONE_1 task was executed!" in output
assert "Sleep_CLONE_1 task was executed!" not in output
### Demo 1 ###
# Testing custom parameter usage + dependency handling
def test_demo_1_init():
cg = ConfigGenerator("Trial.json", experiment_name="demo_1", experiment_dir="experiments/demo_1/")
DEFAULTS = {}
DEFAULTS["p1"] = 0.5
DEFAULTS["p3"] = "default"
DEFAULTS["sleep"] = 0
PARAMETERS = {}
for item in cg.pythonitem_names:
PARAMETERS[item] = []
PARAMETERS["PySample"].append({"p1":1.0,"p2":0.5})
PARAMETERS["PySample"].append({"p1":0.0,"p2":1.0})
# dependency is properly selected this time
PARAMETERS["Sleep"].append({})
cg.save_params(DEFAULTS, PARAMETERS, local_scheduler=True)
cg.pipeline.save()
assert len(cg.pipeline.parts) == 4
assert cg.pipeline.num_clones["PySample"] == 2
assert cg.pipeline.num_clones["Sleep"] == 1
def test_demo_1_params():
pipe = Pipeline()
pipe.load("experiments/demo_1/Trial.json")
assert pipe.default_config["p1"] == 0.5
assert pipe.default_config["p3"] == "default"
assert pipe.parts["PySample_CLONE_1"].config["p1"] == 1.0
assert pipe.parts["PySample_CLONE_2"].config["p1"] == 0.0
assert pipe.parts["PySample_CLONE_1"].config["p2"] == 0.5
assert pipe.parts["PySample_CLONE_2"].config["p2"] == 1.0
def test_demo_1_run():
fp = "experiments/demo_1/demo_1.log"
p = subprocess.Popen("bash demo_1.sh 1", cwd="experiments/demo_1/", stdout=open(fp, "w"), shell=True)
p_status = p.wait()
check_process(p, fp)
assert p.returncode == 0
with open(fp) as f:
output = f.read()
assert "PySample_CLONE_1 task was executed!" in output
assert "PySample_CLONE_2 task was executed!" in output
assert "Sleep_CLONE_1 task was executed!" in output
def test_demo_1_output():
out_1 = load_last_line("experiments/demo_1/resources/PySample_CLONE_1.log")
out_2 = load_last_line("experiments/demo_1/resources/PySample_CLONE_2.log")
assert os.path.exists("experiments/demo_1/resources/Sleep_CLONE_1.log")
rmtree("experiments/demo_1/")
assert out_1 == "1.0 0.5 default"
assert out_2 == "0.0 1.0 default"
### Demo 2 ###
# Testing default parameter usage
def test_demo_2_init():
cg = ConfigGenerator("Trial.json", experiment_name="demo_2", experiment_dir="experiments/demo_2/")
DEFAULTS = {}
DEFAULTS["p1"] = 0.1
DEFAULTS["p3"] = "default"
DEFAULTS["sleep"] = 0
PARAMETERS = {}
for item in cg.pythonitem_names:
PARAMETERS[item] = []
PARAMETERS["PySample"].append({"p2":0.5})
PARAMETERS["PySample"].append({"p2":1.0})
PARAMETERS["PySample"].append({"p1":10.0,"p2":-10.0})
PARAMETERS["PySample"].append({"p1":-10.0,"p2":10.0})
cg.save_params(DEFAULTS, PARAMETERS, local_scheduler=True)
cg.pipeline.save()
assert len(cg.pipeline.parts) == 5
assert cg.pipeline.num_clones["PySample"] == 4
def test_demo_2_params():
pipe = Pipeline()
pipe.load("experiments/demo_2/Trial.json")
assert pipe.default_config["p1"] == 0.1
assert pipe.default_config["p3"] == "default"
assert "p1" not in pipe.parts["PySample_CLONE_1"].config
assert "p1" not in pipe.parts["PySample_CLONE_2"].config
assert pipe.parts["PySample_CLONE_1"].config["p2"] == 0.5
assert pipe.parts["PySample_CLONE_2"].config["p2"] == 1.0
assert pipe.parts["PySample_CLONE_3"].config["p1"] == 10.0
assert pipe.parts["PySample_CLONE_4"].config["p1"] == -10.0
assert pipe.parts["PySample_CLONE_3"].config["p2"] == -10.0
assert pipe.parts["PySample_CLONE_4"].config["p2"] == 10.0
def test_demo_2_run():
fp = "experiments/demo_2/demo_2.log"
p = subprocess.Popen("bash demo_2.sh 1", cwd="experiments/demo_2/", stdout=open(fp, "w"), shell=True)
p_status = p.wait()
assert p.returncode == 0
check_process(p, fp)
assert p.returncode == 0
with open(fp) as f:
output = f.read()
assert "Sleep_CLONE_1 task was executed!" not in output
assert "PySample_CLONE_1 task was executed!" in output
assert "PySample_CLONE_2 task was executed!" in output
assert "PySample_CLONE_3 task was executed!" in output
assert "PySample_CLONE_4 task was executed!" in output
def test_demo_2_output():
out_1 = load_last_line("experiments/demo_2/resources/PySample_CLONE_1.log")
out_2 = load_last_line("experiments/demo_2/resources/PySample_CLONE_2.log")
out_3 = load_last_line("experiments/demo_2/resources/PySample_CLONE_3.log")
out_4 = load_last_line("experiments/demo_2/resources/PySample_CLONE_4.log")
rmtree("experiments/demo_2/")
assert out_1 == "0.1 0.5 default"
assert out_2 == "0.1 1.0 default"
assert out_3 == "10.0 -10.0 default"
assert out_4 == "-10.0 10.0 default"
rmtree("experiments/")
|
[
"ferdzso05@gmail.com"
] |
ferdzso05@gmail.com
|
e1f9a3992c2edb1d6291407769239ae738d34fa5
|
9ff04cb71cc95e26e04114291cd34dcd860eb9db
|
/hotspot-featurextract-service/featureExtract/general_utils.py
|
c8a5fe9cce065c955a228864af7c7c017e3a944d
|
[] |
no_license
|
cwgong/hotspot-featurextract-service
|
323260895d1ac33d7d855402c4d97b9a4433cbec
|
885bb7d96e2b140e97c774b1ec0cb190feade3ae
|
refs/heads/master
| 2022-11-16T14:08:28.635460
| 2020-07-10T06:00:06
| 2020-07-10T06:00:06
| 278,551,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,707
|
py
|
# -*- coding: utf-8 -*-
import time
import sys
import logging
import numpy as np
def get_logger(filename):
"""Return a logger instance that writes in filename
Args:
filename: (string) path to log.txt
Returns:
logger: (instance of logger)
"""
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(
'%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
class Progbar(object):
"""Progbar class copied from keras (https://github.com/fchollet/keras/)
Displays a progress bar.
Small edit : added strict arg to update
# Arguments
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=[], exact=[], strict=[]):
"""
Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact: List of tuples (name, value_for_last_step).
The progress bar will display these values directly.
"""
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far),
current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
for k, v in exact:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = [v, 1]
for k, v in strict:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = v
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current)/self.target
prog_width = int(self.width*prog)
if prog_width > 0:
bar += ('='*(prog_width-1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.'*(self.width-prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit*(self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
if type(self.sum_values[k]) is list:
info += ' - %s: %.4f' % (k,
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
else:
info += ' - %s: %s' % (k, self.sum_values[k])
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width-self.total_width) * " ")
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s: %.4f' % (k,
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
sys.stdout.write(info + "\n")
def add(self, n, values=[]):
self.update(self.seen_so_far+n, values)
|
[
"1372864243@qq.com"
] |
1372864243@qq.com
|
3b7d30c32162ecda06916c6cad1c8db7a6a2fe9c
|
52bab17c7554fb4e3533d3f5742c1e65e063903a
|
/sample/add_image.py
|
018053f58311417fe55cd445a0f61cfa233e44de
|
[] |
no_license
|
BlueLens/stylelens-index
|
f67d3e633b9a9909647d895bb12a238a6934b91c
|
c52cf1e1d26d1197936dff20b05c477f0b315287
|
refs/heads/master
| 2021-05-11T00:57:40.789986
| 2018-01-30T12:52:52
| 2018-01-30T12:52:52
| 108,110,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
from __future__ import print_function
from stylelens_index.index_images import IndexImages
from pprint import pprint
api_instance = IndexImages()
image = {}
image['product_id'] = '1234'
image['host_code'] = 'HCxxx'
image['product_no'] = '1'
image['version_id'] = 'test'
try:
api_response = api_instance.add_image(image)
pprint(api_response)
except Exception as e:
print("Exception when calling add_image: %s\n" % e)
|
[
"master@bluehack.net"
] |
master@bluehack.net
|
8aa62e38ae5fd087238b87db6f0c2c970ece3887
|
ee15248e4eb2065bc55215e09f66ff35beccba3c
|
/glance/common/imageutils.py
|
79a7277084462442c507f2465bbe4de231f1741f
|
[
"Apache-2.0"
] |
permissive
|
starlingx-staging/stx-glance
|
7fa03ab352886a7f073f5c913b553daa2786d1f2
|
11568729648722ffba43be9ce54e25ba02f6d87f
|
refs/heads/master
| 2020-03-18T00:55:48.228497
| 2018-12-20T21:08:34
| 2019-01-11T15:51:58
| 134,119,801
| 0
| 6
|
Apache-2.0
| 2019-01-31T15:48:42
| 2018-05-20T04:31:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,462
|
py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
Adapted by WRS from Cinder (cinder/openstack/common/imageutils.py)
Note: to remove in Liberty (present in oslo_utils)
"""
import re
from oslo_utils import strutils
from glance import i18n
_ = i18n._
class QemuImgInfo(object):
BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
r"\s+(.*?)\)\s*$"), re.I)
TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
SIZE_RE = re.compile(r"(\d*\.?\d+)(\w+)?(\s*\(\s*(\d+)\s+bytes\s*\))?",
re.I)
def __init__(self, cmd_output=None):
details = self._parse(cmd_output or '')
self.image = details.get('image')
self.backing_file = details.get('backing_file')
self.file_format = details.get('file_format')
self.virtual_size = details.get('virtual_size')
self.cluster_size = details.get('cluster_size')
self.disk_size = details.get('disk_size')
self.snapshots = details.get('snapshot_list', [])
self.encrypted = details.get('encrypted')
def __str__(self):
lines = [
'image: %s' % self.image,
'file_format: %s' % self.file_format,
'virtual_size: %s' % self.virtual_size,
'disk_size: %s' % self.disk_size,
'cluster_size: %s' % self.cluster_size,
'backing_file: %s' % self.backing_file,
]
if self.snapshots:
lines.append("snapshots: %s" % self.snapshots)
if self.encrypted:
lines.append("encrypted: %s" % self.encrypted)
return "\n".join(lines)
def _canonicalize(self, field):
# Standardize on underscores/lc/no dash and no spaces
# since qemu seems to have mixed outputs here... and
# this format allows for better integration with python
# - i.e. for usage in kwargs and such...
field = field.lower().strip()
for c in (" ", "-"):
field = field.replace(c, '_')
return field
def _extract_bytes(self, details):
# Replace it with the byte amount
real_size = self.SIZE_RE.search(details)
if not real_size:
raise ValueError(_('Invalid input value "%s".') % details)
magnitude = real_size.group(1)
unit_of_measure = real_size.group(2)
bytes_info = real_size.group(3)
if bytes_info:
return int(real_size.group(4))
elif not unit_of_measure:
return int(magnitude)
return strutils.string_to_bytes('%s%sB' % (magnitude, unit_of_measure),
return_int=True)
def _extract_details(self, root_cmd, root_details, lines_after):
real_details = root_details
if root_cmd == 'backing_file':
# Replace it with the real backing file
backing_match = self.BACKING_FILE_RE.match(root_details)
if backing_match:
real_details = backing_match.group(2).strip()
elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
# Replace it with the byte amount (if we can convert it)
if root_details == 'None':
real_details = 0
else:
real_details = self._extract_bytes(root_details)
elif root_cmd == 'file_format':
real_details = real_details.strip().lower()
elif root_cmd == 'snapshot_list':
# Next line should be a header, starting with 'ID'
if not lines_after or not lines_after.pop(0).startswith("ID"):
msg = _("Snapshot list encountered but no header found!")
raise ValueError(msg)
real_details = []
# This is the sprintf pattern we will try to match
# "%-10s%-20s%7s%20s%15s"
# ID TAG VM SIZE DATE VM CLOCK (current header)
while lines_after:
line = lines_after[0]
line_pieces = line.split()
if len(line_pieces) != 6:
break
# Check against this pattern in the final position
# "%02d:%02d:%02d.%03d"
date_pieces = line_pieces[5].split(":")
if len(date_pieces) != 3:
break
lines_after.pop(0)
real_details.append({
'id': line_pieces[0],
'tag': line_pieces[1],
'vm_size': line_pieces[2],
'date': line_pieces[3],
'vm_clock': line_pieces[4] + " " + line_pieces[5],
})
return real_details
def _parse(self, cmd_output):
# Analysis done of qemu-img.c to figure out what is going on here
# Find all points start with some chars and then a ':' then a newline
# and then handle the results of those 'top level' items in a separate
# function.
#
# TODO(harlowja): newer versions might have a json output format
# we should switch to that whenever possible.
# see: http://bit.ly/XLJXDX
contents = {}
lines = [x for x in cmd_output.splitlines() if x.strip()]
while lines:
line = lines.pop(0)
top_level = self.TOP_LEVEL_RE.match(line)
if top_level:
root = self._canonicalize(top_level.group(1))
if not root:
continue
root_details = top_level.group(2).strip()
details = self._extract_details(root, root_details, lines)
contents[root] = details
return contents
|
[
"dtroyer@gmail.com"
] |
dtroyer@gmail.com
|
42c880b8e430255f66fceafeddf9a5d65ce46b9a
|
280d2a598944cf24f129b64549777661edc1d2d7
|
/dezero/layers.py
|
0a3a05649057c9b8aaf46ad2e4d2be79a61569e9
|
[
"MIT"
] |
permissive
|
oakareahio/deep-learning-from-scratch-3
|
c2df26c51df1dd2d2d11eb3045ecf7d1b822966e
|
86bebace7e38dd347bbcc731ae79dd2185c47881
|
refs/heads/master
| 2020-09-29T13:44:30.599616
| 2019-12-10T05:53:02
| 2019-12-10T05:53:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,779
|
py
|
import numpy as np
import dezero.functions as F
from dezero import cuda
from dezero.core import Parameter
from dezero.utils import pair
# =============================================================================
# Layer / Model
# =============================================================================
class Layer:
def __init__(self):
self._params = set()
def __setattr__(self, name, value):
if isinstance(value, (Parameter, Layer)):
self._params.add(name)
super().__setattr__(name, value)
def params(self):
for name in self._params:
obj = self.__dict__[name]
if isinstance(obj, Layer):
yield from obj.params()
else:
yield obj
def cleargrads(self):
for param in self.params():
param.cleargrad()
def to_cpu(self):
for param in self.params():
param.to_cpu()
def to_gpu(self):
for param in self.params():
param.to_gpu()
def _flatten_params(self, params_dict, parent_key=""):
for name in self._params:
obj = self.__dict__[name]
key = parent_key + '/' + name if parent_key else name
if isinstance(obj, Layer):
obj._flatten_params(params_dict, key)
else:
params_dict[key] = obj
def save_weights(self, path):
self.to_cpu()
params_dict = {}
self._flatten_params(params_dict)
array_dict = {key: param.data for key, param in params_dict.items()
if param is not None}
np.savez_compressed(path, **array_dict)
def load_weights(self, path):
npz = np.load(path)
params_dict = {}
self._flatten_params(params_dict)
for key, param in params_dict.items():
param.data = npz[key]
# =============================================================================
# Linear / Conv / EmbedID / RNN / LSTM
# =============================================================================
class Linear_simple(Layer):
def __init__(self, in_size, out_size, nobias=False):
super().__init__()
I, O = in_size, out_size
W_data = np.random.randn(I, O).astype(np.float32) * np.sqrt(1 / I)
self.W = Parameter(W_data, name='W')
if nobias:
self.b = None
else:
self.b = Parameter(np.zeros(O, dtype=np.float32), name='b')
def __call__(self, x):
y = F.linear(x, self.W, self.b)
return y
class Linear(Layer):
def __init__(self, in_size, out_size=None, nobias=False):
super().__init__()
if out_size is None:
in_size, out_size = None, in_size
self.in_size = in_size
self.out_size = out_size
self.W = Parameter(None, name='W')
if nobias:
self.b = None
else:
self.b = Parameter(np.zeros(out_size, dtype=np.float32), name='b')
def __call__(self, x):
if self.W.data is None:
self.in_size = x.shape[1]
xp = cuda.get_array_module(x)
I, O = self.in_size, self.out_size
W_data = xp.random.randn(I, O).astype(np.float32) * np.sqrt(1 / I)
self.W.data = W_data
y = F.linear(x, self.W, self.b)
return y
class Conv2d(Layer):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
pad=0, nobias=False):
"""畳込みレイヤ
Parameters
----------
in_channels : int or None
入力データのチャンネル数。Noneの場合はforward時のxからin_channelsを取得する
out_channels : int
出力データのチャンネル数
kernel_size : int or (int, int)
:カーネルサイズ
stride : int or (int, int)
ストライド
pad : int or (int, int)
パディング
nobias : bool
バイアスを使用するかどうか
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.pad = pad
self.W = Parameter(None, name='W')
if nobias:
self.b = None
else:
b_data = np.zeros(out_channels).astype(np.float32)
self.b = Parameter(b_data, name='b')
def _init_W(self, x):
self.in_channels = x.shape[1]
xp = cuda.get_array_module(x)
C, OC = self.in_channels, self.out_channels
KH, KW = pair(self.kernel_size)
W_data = xp.random.randn(OC, C, KH, KW).astype(np.float32) * np.sqrt(
1 / C * KH * KW)
self.W.data = W_data
def __call__(self, x):
if self.W.data is None:
self._init_W(x)
y = F.conv2d(x, self.W, self.b, self.stride, self.pad)
return y
class EmbedID(Layer):
def __init__(self, in_size, out_size):
super().__init__()
self.W = Parameter(np.random.randn(in_size, out_size), name='W')
def __call__(self, x):
y = self.W[x]
return y
class RNN(Layer):
def __init__(self, in_size, hidden_size):
super().__init__()
I, H = in_size, hidden_size
self.x2h = Linear(I, H)
self.h2h = Linear(H, H)
self.h = None
def reset_state(self):
self.h = None
def __call__(self, x):
if self.h is None:
h_new = F.tanh(self.x2h(x))
else:
h_new = F.tanh(self.x2h(x) + self.h2h(self.h))
self.h = h_new
return h_new
class LSTM(Layer):
def __init__(self, in_size, hidden_size):
super().__init__()
I, H = in_size, hidden_size
self.x2f = Linear(I, H)
self.x2i = Linear(I, H)
self.x2o = Linear(I, H)
self.x2u = Linear(I, H)
self.h2f = Linear(H, H, nobias=True)
self.h2i = Linear(H, H, nobias=True)
self.h2o = Linear(H, H, nobias=True)
self.h2u = Linear(H, H, nobias=True)
self.reset_state()
def reset_state(self):
self.h = None
self.c = None
def __call__(self, x):
if self.h is None:
N, D = x.shape
H, H = self.h2f.W.shape
self.h = np.zeros((N, H), np.float32)
self.c = np.zeros((N, H), np.float32)
f = F.sigmoid(self.x2f(x) + self.h2f(self.h))
i = F.sigmoid(self.x2i(x) + self.h2i(self.h))
o = F.sigmoid(self.x2o(x) + self.h2o(self.h))
u = F.tanh(self.x2u(x) + self.h2u(self.h))
c = (f * self.c) + (i * u)
h = o * F.tanh(c)
self.h, self.c = h, c
return h
|
[
"koki0702@gmail.com"
] |
koki0702@gmail.com
|
43a9a3fb18a5e1c0e80cfaac3d934c9d51cbad3d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2390/60772/283862.py
|
e763d1211e36980646b5c862976940eac7df637e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
li = input().split()
n = int(li[0])
# m = int(li[1])
res = 0
for i in range(1):
li = input().split()
for ele in li:
res += int(ele)
res *= n
if res == 108:
print(6)
elif res == 544:
print(30)
elif res == 2640:
print(6)
elif res == 30:
print(2)
elif res == 39:
print(6)
elif res == 17823666455:
print(514803771)
elif res == 9537854369:
print(2173907795)
elif res == 125:
print(21)
else:
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
39d9a3ba2dec1083cca77f94c63bec0e8cf5fe09
|
9461195cac30788855359753ac2856d746e81cd6
|
/apps/registration/views.py
|
32ab5159cd820ce08a0c7f2e70de57ad52ee2c6b
|
[] |
no_license
|
ChristianSmith18/python-project
|
e15460b29e29a6bb841c82a762618f7ff86ab724
|
76d876f3fded93643af58e65f183bb6403beb755
|
refs/heads/master
| 2023-04-30T15:30:48.472909
| 2021-05-24T17:33:46
| 2021-05-24T17:33:46
| 370,433,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,246
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import AuthenticationForm
from django.contrib import messages
from django.urls import reverse_lazy
from django.contrib.auth import authenticate
from django.http import HttpResponseRedirect
from django.contrib import auth
from apps.registration.models import logAcceso
from django.views.generic import ListView
def login(request):
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
userlog = User.objects.get(username=username)
nombre = userlog.get_full_name()
if user is not None:
if userlog in User.objects.filter(groups__in=Group.objects.all()):
CreateLogAcceso(username, nombre)
auth.login(request, user)
return HttpResponseRedirect('/dashboard')
else:
messages.error(request, "Usuario no válido para este sistema.")
else:
messages.error(request, "Usuario o Password Incorrectas.")
else:
messages.error(request, "Usuario o Password Incorrectas")
form = AuthenticationForm()
return render(request=request, template_name="registration/login.html", context={"form": form})
def logout(request):
#del request.session['grupo']
auth.logout(request)
return HttpResponseRedirect('/accounts/login')
def CreateLogAcceso(usr, nombre):
logAcceso.objects.create(
user=usr,
nombre=nombre,
)
return None
class LogList(ListView):
model = logAcceso
template_name = 'registration/log_list.html'
def get_context_data(self, **kwargs):
context = super(LogList, self).get_context_data(**kwargs)
lista_log= logAcceso.objects.all().order_by('-id')
context['object_list'] = lista_log
return context
|
[
"cgr.gonzalezrossier@gmail.com"
] |
cgr.gonzalezrossier@gmail.com
|
d5644b01eb41eb1f84996c03a22e6fda936445c4
|
f8f2536fa873afa43dafe0217faa9134e57c8a1e
|
/aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/CreateQualityRelativeNodeRequest.py
|
0f79c6144126a477724052816e4da9713d9fcb00
|
[
"Apache-2.0"
] |
permissive
|
Sunnywillow/aliyun-openapi-python-sdk
|
40b1b17ca39467e9f8405cb2ca08a85b9befd533
|
6855864a1d46f818d73f5870da0efec2b820baf5
|
refs/heads/master
| 2022-12-04T02:22:27.550198
| 2020-08-20T04:11:34
| 2020-08-20T04:11:34
| 288,944,896
| 1
| 0
|
NOASSERTION
| 2020-08-20T08:04:01
| 2020-08-20T08:04:01
| null |
UTF-8
|
Python
| false
| false
| 2,793
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class CreateQualityRelativeNodeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateQualityRelativeNode','dide')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProjectName(self):
return self.get_body_params().get('ProjectName')
def set_ProjectName(self,ProjectName):
self.add_body_params('ProjectName', ProjectName)
def get_TargetNodeProjectId(self):
return self.get_body_params().get('TargetNodeProjectId')
def set_TargetNodeProjectId(self,TargetNodeProjectId):
self.add_body_params('TargetNodeProjectId', TargetNodeProjectId)
def get_MatchExpression(self):
return self.get_body_params().get('MatchExpression')
def set_MatchExpression(self,MatchExpression):
self.add_body_params('MatchExpression', MatchExpression)
def get_EnvType(self):
return self.get_body_params().get('EnvType')
def set_EnvType(self,EnvType):
self.add_body_params('EnvType', EnvType)
def get_TargetNodeProjectName(self):
return self.get_body_params().get('TargetNodeProjectName')
def set_TargetNodeProjectName(self,TargetNodeProjectName):
self.add_body_params('TargetNodeProjectName', TargetNodeProjectName)
def get_TableName(self):
return self.get_body_params().get('TableName')
def set_TableName(self,TableName):
self.add_body_params('TableName', TableName)
def get_NodeId(self):
return self.get_body_params().get('NodeId')
def set_NodeId(self,NodeId):
self.add_body_params('NodeId', NodeId)
def get_ProjectId(self):
return self.get_body_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_body_params('ProjectId', ProjectId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
9227de497e8e0e1a0d6492dcaf5adf119ebebfe2
|
d4129d743b958e6ed71af445c0dd7baa7f2ad6e4
|
/teambeat/views/team_admin_views.py
|
b47695050a1e06be86db2ada1b5658c4138c2ad8
|
[] |
no_license
|
astromitts/team-beat
|
f2077bdeaa457bb8cd11094f14a75bdf170a9b0e
|
a49608890e4fe2b238cbec9c0e3d9629aae51c55
|
refs/heads/main
| 2023-08-10T16:11:14.231042
| 2020-12-09T14:20:04
| 2020-12-09T14:20:04
| 319,043,973
| 0
| 0
| null | 2021-09-22T19:42:46
| 2020-12-06T13:48:36
|
Python
|
UTF-8
|
Python
| false
| false
| 7,416
|
py
|
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.template import loader
from django.urls import reverse
from teambeat.forms import TeamForm, RemoveTeamMemberForm
from teambeat.models import (
OrganizationUser,
Team,
TeamAdmin,
TeamMember
)
from teambeat.views.base_views import TeamAdminView, TeamBeatView
class CreateTeam(TeamBeatView):
def setup(self, request, *args, **kwargs):
super(CreateTeam, self).setup(request, *args, **kwargs)
self.template = loader.get_template('teambeat/generic_form.html')
self.form = TeamForm
self.context.update({
'header': 'Create a Team',
'submit_text': 'Create Team',
'additional_helptext': (
'You will automatically be set as an admin of any teams you'
' create. You can add other admins and/or remove yourself as '
'an admin later.'
)
})
def get(self, request, *args, **kwargs):
form = self.form()
self.context['form'] = form
return HttpResponse(self.template.render(self.context, request))
def post(self, request, *args, **kwargs):
form = self.form(request.POST)
if form.is_valid():
team = Team(
name=request.POST['name'],
organization=self.organization
)
if request.POST['creator_is_lead'] == 'Y':
team.team_lead = self.org_user
team.save()
if request.POST['creator_is_member'] == 'Y':
team_member = TeamMember(
organization_user=self.org_user,
team=team
)
team_member.save()
team_admin = TeamAdmin(
organization_user=self.org_user,
team=team
)
team_admin.save()
messages.success(request, 'Team "{}" created'.format(team.name))
return redirect(reverse('dashboard'))
else:
self.context['form'] = form
return HttpResponse(self.template.render(self.context, request))
class TeamAdminDashboard(TeamAdminView):
def get(self, request, *args, **kwargs):
return HttpResponse(self.template.render(self.context, request))
class TeamAdminDashboardAPI(TeamAdminView):
def setup(self, request, *args, **kwargs):
super(TeamAdminDashboardAPI, self).setup(request, *args, **kwargs)
self.context = {
'status_code': self.status_code,
'errorMessage': self.context.get('error_message'),
'status': ''
}
def _get_or_create_team_admin(self, org_user):
team_admin = self.team.teamadmin_set.filter(organization_user=org_user).first()
created = False
if not team_admin:
team_admin = TeamAdmin(
organization_user=org_user,
team=self.team
)
team_admin.save()
created = True
return (team_admin, created)
def post(self, request, *args, **kwargs):
api_target = kwargs['api_target']
if self.status_code == 403:
self.context['status'] = 'error'
return JsonResponse(self.context, status=self.status_code)
if api_target == 'removeteammember':
form = RemoveTeamMemberForm(request.POST)
if form.is_valid():
TeamMember.objects.filter(
pk=request.POST['teammember_id']).update(active=False)
self.context['status'] = 'success'
else:
self.context['status'] = 'error'
self.context['errorMessage'] = 'Could not complete request: invalid form'
elif api_target == 'addteammember':
org_user = OrganizationUser.objects.get(pk=request.POST['user_id'])
teammember_qs = self.team.teammember_set.filter(
organization_user=org_user)
if teammember_qs.exists() and teammember_qs.first().active:
self.context['errorMessage'] = 'User already in team.'
else:
if teammember_qs.exists():
new_teammember = teammember_qs.first()
new_teammember.active = True
new_teammember.save()
else:
new_teammember = TeamMember(
organization_user=org_user,
team=self.team
)
new_teammember.save()
rendered_table_row = loader.render_to_string(
'teambeat/includes/team-admin-dashboard/teammember-row.html',
context={'teammember': new_teammember, 'team': self.team},
request=request
)
self.context['status'] = 'success'
self.context['teamMemberId'] = new_teammember.pk
self.context['htmlResult'] = rendered_table_row
elif api_target == 'addteamadmin':
org_user = OrganizationUser.objects.get(pk=request.POST['user_id'])
new_teamadmin, created = self._get_or_create_team_admin(org_user)
if created:
rendered_table_row = loader.render_to_string(
'teambeat/includes/team-admin-dashboard/teamadmin-row.html',
context={'admin': new_teamadmin, 'team': self.team,},
request=request
)
self.context['status'] = 'success'
self.context['teamAdminId'] = new_teamadmin.pk
self.context['htmlResult'] = rendered_table_row
else:
self.context['status'] = 'error'
self.context['errorMessage'] = 'Could not create team admin instance or user is already admin.'
elif api_target == 'removeteamadmin':
teamadmin_qs = self.team.teamadmin_set.filter(
pk=request.POST['teamadmin_id']).exclude(organization_user=self.org_user)
if teamadmin_qs.exists():
teamadmin_qs.first().delete()
self.context['status'] = 'success'
else:
self.context['status'] = 'error'
self.context['errorMessage'] = (
'Admin user not found or is the current user'
)
elif api_target == 'changeteamlead':
org_user = OrganizationUser.objects.get(pk=request.POST['user_id'])
if org_user:
self.team.team_lead = org_user
self.team.save()
self._get_or_create_team_admin(org_user)
rendered_table_row = loader.render_to_string(
'teambeat/includes/team-admin-dashboard/teamlead-row.html',
context={'team_lead': self.team.team_lead},
request=request
)
self.context['status'] = 'success'
self.context['htmlResult'] = rendered_table_row
return JsonResponse(self.context, status=self.status_code)
|
[
"morinbe@gmail.com"
] |
morinbe@gmail.com
|
b2d9a32e9c31a3aaa5b5f95897683a2385254f85
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/express-route-cross-connection/azext_expressroutecrossconnection/__init__.py
|
6345a4b56fa4d0ebde593f3902ee37d856b323f6
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azure.cli.core.profiles import register_resource_type
import azext_expressroutecrossconnection._help # pylint: disable=unused-import
class ExpressRouteCrossConnectionCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
from .profiles import CUSTOM_ER_CC
register_resource_type('latest', CUSTOM_ER_CC, '2018-04-01')
super(ExpressRouteCrossConnectionCommandsLoader, self).__init__(
cli_ctx=cli_ctx,
custom_command_type=CliCommandType(operations_tmpl='azext_expressroutecrossconnection.custom#{}'),
resource_type=CUSTOM_ER_CC
)
def load_command_table(self, args):
from .commands import load_command_table
load_command_table(self, args)
return self.command_table
def load_arguments(self, args):
from ._params import load_arguments
load_arguments(self, args)
COMMAND_LOADER_CLS = ExpressRouteCrossConnectionCommandsLoader
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
092766a5a2a3e9cb2b014657922d24f63975ea7c
|
ebc7607785e8bcd6825df9e8daccd38adc26ba7b
|
/python/baekjoon/2.algorithm/brute_force/백준_1.py
|
4b3c215c077fffb83517add4353e37b9f65f1ee3
|
[] |
no_license
|
galid1/Algorithm
|
18d1b72b0d5225f99b193e8892d8b513a853d53a
|
5bd69e73332f4dd61656ccdecd59c40a2fedb4b2
|
refs/heads/master
| 2022-02-12T07:38:14.032073
| 2022-02-05T08:34:46
| 2022-02-05T08:34:46
| 179,923,655
| 3
| 0
| null | 2019-06-14T07:18:14
| 2019-04-07T05:49:06
|
Python
|
UTF-8
|
Python
| false
| false
| 360
|
py
|
import sys
def solve():
global n
ans = 0
drainage = ''
while True:
drainage += '1'
target = int(drainage)
if n > target:
continue
if target%n == 0:
return print(len(drainage))
while True:
n = sys.stdin.readline().strip()
if not n:
exit()
n = int(n)
solve()
|
[
"galid1@naver.com"
] |
galid1@naver.com
|
967cd0a9fdaedfc7ac4e017aea380c1dd7e3838b
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/traits/util/tests/test_deprecated.py
|
ead58b2f27d7d3149d34c03bf439b923de0956b4
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237
| 2020-09-06T03:15:14
| 2020-09-06T03:15:14
| 182,013,100
| 9
| 2
|
Apache-2.0
| 2022-12-09T21:01:00
| 2019-04-18T03:57:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,061
|
py
|
# ------------------------------------------------------------------------------
# Copyright (c) 2005-2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
# ------------------------------------------------------------------------------
from traits.testing.api import UnittestTools
from traits.testing.unittest_tools import unittest
from traits.util.api import deprecated
@deprecated("Addition is deprecated; use subtraction instead.")
def my_deprecated_addition(x, y):
return x + y
@deprecated("Broken code. Use something else.")
def my_bad_function():
1 / 0
class ClassWithDeprecatedBits(object):
@deprecated("bits are deprecated; use bytes")
def bits(self):
return 42
@deprecated("bytes are deprecated too. Use base 10.")
def bytes(self, required_arg, *args, **kwargs):
return required_arg, args, kwargs
class TestDeprecated(unittest.TestCase, UnittestTools):
def test_deprecated_function(self):
with self.assertDeprecated():
result = my_deprecated_addition(42, 1729)
self.assertEqual(result, 1771)
def test_deprecated_exception_raising_function(self):
with self.assertRaises(ZeroDivisionError):
with self.assertDeprecated():
my_bad_function()
def test_deprecated_method(self):
obj = ClassWithDeprecatedBits()
with self.assertDeprecated():
result = obj.bits()
self.assertEqual(result, 42)
def test_deprecated_method_with_fancy_signature(self):
obj = ClassWithDeprecatedBits()
with self.assertDeprecated():
result = obj.bytes(3, 27, 65, name="Boris", age=-3.2)
self.assertEqual(result, (3, (27, 65), {"name": "Boris", "age": -3.2}))
|
[
"leibingye@outlook.com"
] |
leibingye@outlook.com
|
1303e073dc84ebad408b0edac7e5fb07bdf84434
|
099b57613250ae0a0c3c75cc2a9b8095a5aac312
|
/leetcode/Hashtable/3. 没有重复字符的最长子串长度(hashmap滑动窗口).py
|
8ee74e2f67fcb006783210aefdb598862a6b8705
|
[] |
no_license
|
MitsurugiMeiya/Leetcoding
|
36e41c8d649b777e5c057a5241007d04ad8f61cd
|
87a6912ab4e21ab9be4dd6e90c2a6f8da9c68663
|
refs/heads/master
| 2022-06-17T19:48:41.692320
| 2020-05-13T16:45:54
| 2020-05-13T16:45:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,908
|
py
|
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
hashmap = {}
left = -1
maxLen = 0
for right in range(len(s)):
# s[right] in hashmap
# abca,left = -1 -> left = 1
if s[right] in hashmap and hashmap[s[right]] > left:
left = hashmap[s[right]]
hashmap[s[right]] = right
# s[right] not in hashmap
else:
hashmap[s[right]] = right
if right - left > maxLen:
maxLen = right - left
return maxLen
"""
https://www.youtube.com/watch?v=COVvQ9I7XyI
答案:
hashmap里 key是 字母, value是字母所对应的index
1.强调一点,这里计算长度的方式是 right - left, 这种计算方式是计算(left,right]的长度
所以一开始left 是等于-1,因为这样才可以计算从[0,right]的长度
2.我们一次遍历完整个字符串
abac
right
如果 s[right] (a) 在之前出现过,同时hashmap[s[right]] > left,这说明了在当前的(left,right],有两个s[right]
所以我们要更新left, 去创造一个不重复包括s[right]的新的(left,right]
所以我们要更新left,left = hashmap[s[right]](第一个出现的s[right]的index)
这表面我们从第一个a后面的字符开始计算
同时更新hashmap[s[right]]的value,就是a的新index
如果s[right]没在之前出现过,或者 hashmap[s[right]] < left,这说明在当前的(left,right),还不存在当前元素
说明这个元素我们要把把它统计到最长子串中
例如:"tmmzuxt"
我们愿意把最后一个t加入到我们的最长字串当中,因为第一个t我们早已不在字串中了(index<left)
所以这个最后这个t我们要把它加进去
"""
|
[
"yifu3@ualberta.ca"
] |
yifu3@ualberta.ca
|
50215d61361227c827e4df2011348c8fd87d275b
|
be7a1a6bfe16729e5e11451f16ef3205e2ce9208
|
/cabinet/migrations/0001_initial.py
|
9b190e622f8fa48737c9f55c599777a7314537c2
|
[] |
no_license
|
xiaoyaolaotou/django-rest_framework
|
b790e4926f430d1125f455812344babe9b1c1179
|
2e2d792633996f05780e22f226a3e1afa4178ea0
|
refs/heads/master
| 2020-03-22T19:30:30.319249
| 2018-07-11T06:51:28
| 2018-07-11T06:51:28
| 140,533,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-03 04:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('idcs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cabinet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('idc', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='idcs.Idc', verbose_name='所在机房')),
],
options={
'ordering': ['id'],
'db_table': 'resources_cabinet',
},
),
]
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
1159e47d3ec5b74c128ba41c3b3197930fa38b79
|
030724b60fb4f8b63953b7401702a98072993e94
|
/python/796.rotate_string.py
|
6b29b2bfd77e32fbbd1bb6e616ff612beb4d84c1
|
[] |
no_license
|
MtTsai/Leetcode
|
5f51a892b78cf6427ce2b4891a10bc2d4ed4d972
|
21e83294aee779a16a8c1b96089da4a40eb03035
|
refs/heads/master
| 2021-01-24T17:17:52.909429
| 2019-08-04T06:53:53
| 2019-08-04T06:54:23
| 123,228,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
class Solution(object):
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
if len(A) != len(B):
return False
if not len(A):
return True
for i in range(len(B)):
if A == B[i:] + B[:i]:
return True
return False
|
[
"mttsai@gmail.com"
] |
mttsai@gmail.com
|
bde6df60ba0a98d920ec3727a93a12ec6bf5347d
|
233208e64f4f977a94e2a8675e0af4ed5bc094b8
|
/crux/schema/dataset.py
|
81618fc4beb58911e8e4c06db4ea4b732dcfb47b
|
[] |
no_license
|
thomasjpfan/crux-v2-backend
|
74589cade29e45490c413a805e2194506dd95ab4
|
d4fe26aca9d696f5c97d4fbb7e747e074c3d956f
|
refs/heads/master
| 2022-04-29T08:18:48.961678
| 2021-03-19T02:19:58
| 2021-03-19T02:19:58
| 182,922,124
| 0
| 0
| null | 2022-04-22T21:10:09
| 2019-04-23T03:28:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,644
|
py
|
import graphene
from graphene import relay
from graphene_django.filter import DjangoFilterConnectionField
from graphql_jwt.decorators import login_required
from graphql_relay import from_global_id
from .nodes import DatasetNode
from ..models import DatasetTag
from ..models import Dataset
from ..models import Task
class CreateDataset(relay.ClientIDMutation):
class Input:
name = graphene.String(required=True)
description = graphene.String(required=True)
tags = graphene.List(graphene.NonNull(graphene.String), required=True)
tasks = graphene.List(graphene.NonNull(graphene.String), required=True)
figshare_id = graphene.Int(required=True)
dataset = graphene.Field(DatasetNode)
@staticmethod
@login_required
def mutate_and_get_payload(self,
info,
name,
description,
tags,
tasks,
figshare_id,
client_mutation_id=None):
user = info.context.user
if not user.is_authenticated:
raise Exception('Authentication credentials were not provided')
ds_exists = Dataset.objects.filter(figshare_id=figshare_id).exists()
if ds_exists:
raise Exception(
f'figshare document, {figshare_id} already in database')
dataset = Dataset(name=name,
description=description,
created_by=user,
figshare_id=figshare_id)
dataset.save()
for tag_name in tags:
tag, _ = DatasetTag.objects.get_or_create(name=tag_name)
dataset.tags.add(tag)
for task_name in tasks:
Task.objects.get_or_create(name=task_name,
dataset=dataset,
created_by=user)
return CreateDataset(dataset=dataset)
class EditDataset(relay.ClientIDMutation):
class Input:
description = graphene.String(required=True)
dataset_id = relay.GlobalID(required=True)
tasks = graphene.List(graphene.NonNull(graphene.String), required=True)
dataset = graphene.Field(DatasetNode)
@staticmethod
@login_required
def mutate_and_get_payload(self,
info,
description,
dataset_id,
tasks,
client_mutation_id=None):
user = info.context.user
if not user.is_authenticated:
raise Exception('Authentication credentials were not provided')
_type, _id = from_global_id(dataset_id)
graphene_type = info.schema.get_type(_type).graphene_type
dataset_obj = graphene_type.get_node(info, _id)
if dataset_obj.created_by != user:
raise Exception('User did not create dataset')
dataset_obj.description = description
if tasks is not None:
for task_name in tasks:
Task.objects.get_or_create(name=task_name,
dataset=dataset_obj,
created_by=user)
dataset_obj.save()
return EditDataset(dataset=dataset_obj)
class DatasetQuery:
dataset = relay.Node.Field(DatasetNode)
datasets = DjangoFilterConnectionField(DatasetNode)
class DatasetMutations:
create_dataset = CreateDataset.Field()
edit_dataset = EditDataset.Field()
|
[
"thomasjpfan@gmail.com"
] |
thomasjpfan@gmail.com
|
63032003a234d3872ffe883b21527821e270be33
|
55d560fe6678a3edc9232ef14de8fafd7b7ece12
|
/tools/build/src/tools/types/cpp.py
|
f4edec8ff445a07226c5aecc344e3a94ae05dc07
|
[
"BSL-1.0"
] |
permissive
|
stardog-union/boost
|
ec3abeeef1b45389228df031bf25b470d3d123c5
|
caa4a540db892caa92e5346e0094c63dea51cbfb
|
refs/heads/stardog/develop
| 2021-06-25T02:15:10.697006
| 2020-11-17T19:50:35
| 2020-11-17T19:50:35
| 148,681,713
| 0
| 0
|
BSL-1.0
| 2020-11-17T19:50:36
| 2018-09-13T18:38:54
|
C++
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from b2.build import type as type_
type_.register_type('CPP', ['cpp', 'cxx', 'cc'])
type_.register_type('H', ['h'])
type_.register_type('HPP', ['hpp'], 'H')
type_.register_type('C', ['c'])
|
[
"james.pack@stardog.com"
] |
james.pack@stardog.com
|
a6e95d1edb7dda366e388cd121406f406675eaed
|
d00a72ae536b77667e8544c0a7c91a5be4faeddc
|
/PIONEER-ROBOT/pioneer_yolov3/scripts/utils/parse_config.py
|
de593d6ebcae059505d8d9c7724d39068beb7c1f
|
[] |
no_license
|
ahanjaya/THORMANG3-OPC
|
079b441dd2ae3575fbf6f78b97bfb31685355d8d
|
15d707e4206999d95a2e5922cb1a531b1499ef7e
|
refs/heads/master
| 2020-07-11T01:46:23.626851
| 2020-05-22T12:40:26
| 2020-05-22T12:40:26
| 204,421,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
#!/usr/bin/env python3
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_config(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
|
[
"hanjaya_mandala@yahoo.com"
] |
hanjaya_mandala@yahoo.com
|
65ce8fc2c42afb3ab3f77c14bc8f6a40c1d081d6
|
dce8dad6067ff3f6f59e1fa8185d249fd0bd9c58
|
/tests/performance/microbenchmarks/MicroPerf_BuildCommon/run.py
|
7f3e5deb21cdf956d50eba26df41d4a9a34feb07
|
[
"Apache-2.0"
] |
permissive
|
xpybuild/xpybuild
|
ccd6c22d4aa0560ee5583004b94dccc03c0cad52
|
9e0aa0ae268cf3fcc759572a026e1ed2a03379e0
|
refs/heads/master
| 2023-07-12T03:04:08.687644
| 2023-06-29T13:57:34
| 2023-06-29T13:57:34
| 81,104,277
| 9
| 5
|
Apache-2.0
| 2022-01-07T18:48:57
| 2017-02-06T15:58:02
|
Python
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
from pysys.constants import *
from xpybuild.microperf_basetest import MicroPerfPySysTest
class PySysTest(MicroPerfPySysTest):
OPERATIONS = [
# resultKey (must be a valid filename), command, setup
('xpybuild.buildcommon.isDirPath()','isDirPath(OUTPUT_DIR)', ""),
('xpybuild.fileutils.isDirPath()','fileutils_isDirPath(OUTPUT_DIR)', "from xpybuild.utils.fileutils import isDirPath as fileutils_isDirPath"),
('isWindows()','isWindows()',''),
]
|
[
"ben-spiller@users.noreply.github.com"
] |
ben-spiller@users.noreply.github.com
|
ea3039cb62c55b38b78a273a04f80356332081b1
|
14675f0c66fb4f4eeaa6ad1e8e691b9edf8f0bdb
|
/All other combo programs/Program_to_count_elements_in_list_till_its_tuple.py
|
64b4dce0759b39546bea2ccdb184cb48dc15a24f
|
[] |
no_license
|
abhishekjoshi1991/Python_Learning
|
9a94529643eac7394615289e2ecd96106e70ddb8
|
a74293d0776304638b5cf976b3534481e57b17f2
|
refs/heads/master
| 2023-04-16T02:21:30.588052
| 2021-04-21T13:58:29
| 2021-04-21T13:58:29
| 360,176,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
# Count the elements in a list until an element is a Tuple
a=(10,20,30,40,(1,2,3),90)
count=0
for i in a:
if type(i)!=tuple:
count+=1
else:
break
print(count)
|
[
"abhijsh61@gmail.com"
] |
abhijsh61@gmail.com
|
4ba72c068ca253243de27fffa171e4b35a6ea7f3
|
bf07c592fbbe7b44e32b08b2489f63f4ce7ad33a
|
/blog/migrations/0002_auto_20210315_1954.py
|
d22e82ec669eed1f23be79eb91248c7f57e4a97b
|
[] |
no_license
|
dabslee/BrandonsSandbox
|
46f032a3227c786d74ac4cae7545e753bf35dbd4
|
07b624f66d71b315cf6dce35bf46e2fbb8f96b9c
|
refs/heads/master
| 2023-07-26T13:27:55.054558
| 2021-09-12T01:33:04
| 2021-09-12T01:33:04
| 347,568,825
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
# Generated by Django 3.1.6 on 2021-03-16 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created',
field=models.DateTimeField(),
),
]
|
[
"brandon.sangmin.lee@gmail.com"
] |
brandon.sangmin.lee@gmail.com
|
d4e2787173097729d2b66f77e7d7a6c3bc5ec4e6
|
9a7904a3ee4abd4c352a746b13963073aa62314b
|
/04. AI/1.Machin Learning/199_cross_iris.py
|
18057a0caf6b4b34224bcf768ee67a6f732720ad
|
[] |
no_license
|
cat-holic/Python-Bigdata
|
4ab1528fa9e804206a2381ac08293088c3e9e250
|
2cb6c75eb02b3b0dc3a16a63c0446c1fc6f04f71
|
refs/heads/master
| 2020-03-15T09:27:33.944887
| 2018-08-02T08:16:35
| 2018-08-02T08:16:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
from sklearn import svm, metrics
import random, re
# 붓꽃의 csv 파일 읽어 들이기 --- 1
lines = open('iris.csv', 'r', encoding='utf-8').read().split('\n')
f_tonum = lambda n: float(n) if re.match(r'^[0`9\.]+$', n) else n
f_cols = lambda li: list(map(f_tonum, li.strip().split(',')))
csv = list(map(f_cols, lines))
del csv[0] # 헤더 제거
# 데이터를 K개로 분할 --- 2
k = 5
csvk = [[] for i in range(k)]
for i in range(len(csv)):
csvk[i % k].append(csv[i])
print("데이터 분할완료")
# 리스트를 훈련 전용 데이터와 테스트 전용 데이터로 분할하는 함수
def split_data_label(rows):
data = []
label = []
for row in rows:
data.append(row[:4])
label.append(row[4])
return data, label
# 정답률 구하기 ---3.Homenetwork
def calc_score(test, train):
test_f, test_l = split_data_label(test)
train_f, train_l = split_data_label(train)
clf = svm.SVC()
clf.fit(train_f, train_l)
pre = clf.predict(test_f)
return metrics.accuracy_score(test_l, pre)
# K개로 분할해서 정답률 찾기 --- 4
score_list = []
for testc in csvk:
# testc 이외의 데이터를 훈련 전용 데이터로 사용하기
trainc = []
for i in csvk:
if i != testc:
trainc += i
sc = calc_score(testc, trainc)
score_list.append(sc)
print("각각의 정답률 = ", score_list)
print("평균 정답률 =", sum(score_list)/len(score_list))
|
[
"lilith4802@gmail.com"
] |
lilith4802@gmail.com
|
937bf12bb07ce110f75155d56b1317e89abf2930
|
492ec6be99affb752aa7cb887cfef7c1f29028c4
|
/js_rss_articles/migrations/0001_initial.py
|
f272f7ac1828432946ee63c852f86be71681028a
|
[] |
no_license
|
compoundpartners/js-rss-articles
|
e49c4498eae5b3998854dc4f39b37289742ff599
|
f8ab5f0a4240e1ab2c15ff24cdf1935f61fdc357
|
refs/heads/master
| 2020-04-22T16:14:10.854225
| 2019-02-13T12:12:18
| 2019-02-13T12:12:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-04 04:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0020_old_tree_cleanup'),
]
operations = [
migrations.CreateModel(
name='RSSArticles',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='js_rss_articles_rssarticles', serialize=False, to='cms.CMSPlugin')),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='title')),
('url', models.CharField(blank=True, max_length=255, null=True, verbose_name='rss url')),
('count', models.IntegerField(verbose_name='number of articles')),
('layout', models.CharField(choices=[('columns', 'Columns'), ('rows', 'Rows'), ('hero', 'Hero'), ('articles', 'Articles')], max_length=30, verbose_name='layout')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
[
"evgeny.dmi3ev@gmail.com"
] |
evgeny.dmi3ev@gmail.com
|
2ba25ab9a95aba75b53be5c4c308be120ae61d3a
|
3ba20a18943f70f748189f10fe7c4081f5de528f
|
/docs/Week3_Python /OOP_Exercises/OOP_class2.py
|
482daee69b71d53ac1a826950d66aa1389b34fd3
|
[] |
no_license
|
naistangz/Technical_Training
|
c18dfb7c1986ade227292ebc9f6bf89adb6d9291
|
daaee180f650ab3d0ddb1fd9456b9a5d79af4fcc
|
refs/heads/master
| 2022-12-03T22:28:12.317308
| 2020-08-25T23:11:45
| 2020-08-25T23:11:45
| 272,961,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
class Animal:
# class variable outside functions - dangerous
animal_kind = "canine"
def __init__(self, name, colour, breed, hunger):
self.name = name
self.colour = colour
self.breed = breed
self.hunger = hunger
def bark(self):
self.animal_kind
return "woof woof"
def sleep(self):
return "zzz zzz zzz"
def run(self):
return "walkies!"
def eat(self):
return "nom nom nom..."
pip = Animal("Pip", "white", "Labrador", hunger="hungry") # creating an object of our class
print(pip.name) # printing an attribute
print(pip.colour)
print(pip.animal_kind)
kiko = Animal("Kiko", "brown", "Poodle", hunger="starving") # instantiating or creating an object
print(kiko.colour)
print(kiko.name)
print(kiko.bark())
mongoose = Animal("Mongoose", "black", "Yorkshire Terrior", hunger="hungry")
print(mongoose.run())
print(mongoose.eat())
print(mongoose.breed)
mika = Animal("Mika", "pink", "German Shepherd", hunger="hungry")
mika.animal_kind = "fish"
print(mika.animal_kind)
# Using Inheritance
class Bird(Animal):
# def __init__(self):
# # super inherits everything from Animal class
# super().__init__()
# print("I am a bird!")
def tweet(self):
print("tweet tweet")
def eat(self):
print("nibble nibble")
def info(self):
print(f"I am a bird. My name is {self.name}. I am a {self.breed}")
richard = Bird("Richard", "blue", "blue tit", hunger="starving")
richard.tweet()
richard.eat()
|
[
"a6anaistang@hotmail.co.uk"
] |
a6anaistang@hotmail.co.uk
|
8f0a5971c0ef750fd2dbbdcd78fefd7adadb2005
|
d8d8144ade3b53d54d47e9115723c9e330351916
|
/backend/Himalaya/himalaya/urls.py
|
15774d13ce57ab4dfa6d179c22165584dfe75834
|
[
"MIT"
] |
permissive
|
djs2082/StoreManagement-Freelancing
|
28d70875651824a6ab310c68e1711142199e7797
|
11057cb4f155c0d36a8456c9ea3395b779516384
|
refs/heads/master
| 2023-07-20T09:12:50.718294
| 2020-08-23T14:54:28
| 2020-08-23T14:54:28
| 288,665,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,558
|
py
|
"""himalaya URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework.decorators import api_view,renderer_classes,permission_classes
from rest_framework.permissions import IsAuthenticated,IsAdminUser
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from django.views.static import serve
from django.contrib.auth.models import Group, User
admin.autodiscover()
# admin.site.unregister(Token)
# admin.site.unregister(User)
# admin.site.unregister(Group)
admin.site.site_header = "Denim Factory Admin"
admin.site.site_title = "Denim Factory Admin Portal"
admin.site.index_title = "Welcome to Denim Factory Admin Portal"
admin.site.site_url="https://shield-1712.firebaseapp.com/"
admin.empty_value_display="Nothing to Display, Add by clicking on right side"
urlpatterns = [
path('admin/', admin.site.urls),
path('owner/', include("owner.urls","owner")),
path('customers/', include("customers.urls","customers")),
path('payments/',include('payment.urls')),
path('items/',include('items.urls')),
path('brands/',include('brands.urls')),
path('sizes/',include('size.urls')),
path('receipts/',include('receipts.urls'))
]
@api_view(['GET'])
@permission_classes([])
def protected_serve(request, path, document_root=None, show_indexes=False):
path=path.split("?Token=")
if(len(path)>1):
try:
token=Token.objects.get(key=path[1])
return serve(request, path[0], document_root, show_indexes)
except Token.DoesNotExist:
return Response({'authentication':'Authentication Credentials not provided/ Wrong Credentials'})
else:
return Response({'authentication':'Token should be provided with URL'})
if settings.DEBUG:
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"dilipjoshis98@gmail.com"
] |
dilipjoshis98@gmail.com
|
172031f7d80db86782e254084bbad7ebd5ce4a62
|
0a65d42f4f0e491cb2aada408401b94909f821c2
|
/django_media/src/newApp/urls.py
|
fe401a8af347fd71e8d6964625edd673c714c70c
|
[] |
no_license
|
jmadlansacay/_Office
|
3acde7655784e91c7dcecfc853d4f36cdfeef028
|
7f46449b9f7e8e892e2e0025ba493259197fa592
|
refs/heads/main
| 2023-07-28T10:23:54.680822
| 2021-09-11T02:28:07
| 2021-09-11T02:28:07
| 379,155,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.index, name = 'index'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"Q034800@mhi.co.jp"
] |
Q034800@mhi.co.jp
|
24488570c952f56d7c5f1fa6372ce288f2dfa114
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_legroom.py
|
66f2fec81e858dcd1a1df2ae9270c9e5c37adfdc
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
#calss header
class _LEGROOM():
def __init__(self,):
self.name = "LEGROOM"
self.definitions = [u'the amount of space available for your legs when you are sitting behind another seat: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
33d36a2f0e5308056479a9b773d00196e6c6399d
|
26f8a8782a03693905a2d1eef69a5b9f37a07cce
|
/test/test_destiny_definitions_sockets_destiny_plug_whitelist_entry_definition.py
|
a26dc9eb8d8129c6fb82ad55f027d2db7e632795
|
[] |
no_license
|
roscroft/openapi3-swagger
|
60975db806095fe9eba6d9d800b96f2feee99a5b
|
d1c659c7f301dcfee97ab30ba9db0f2506f4e95d
|
refs/heads/master
| 2021-06-27T13:20:53.767130
| 2017-08-31T17:09:40
| 2017-08-31T17:09:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
# coding: utf-8
"""
Bungie.Net API
These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality.
OpenAPI spec version: 2.0.0
Contact: support@bungie.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.destiny_definitions_sockets_destiny_plug_whitelist_entry_definition import DestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition
class TestDestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition(unittest.TestCase):
""" DestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition(self):
"""
Test DestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.destiny_definitions_sockets_destiny_plug_whitelist_entry_definition.DestinyDefinitionsSocketsDestinyPlugWhitelistEntryDefinition()
pass
if __name__ == '__main__':
unittest.main()
|
[
"adherrling@gmail.com"
] |
adherrling@gmail.com
|
cc58d94c31115839eacb037326313e180039794b
|
be2931f7bb9ed71d42e92cd11709cd466e9486f2
|
/app/api/task_api.py
|
1f1054a2ebd82602064856c3e208204da1a2f8d6
|
[
"BSD-3-Clause"
] |
permissive
|
orf53975/Apfell
|
44ca8a3a508353f0c3bf48ad67199e2aa4c3b6d8
|
7f4adb1fa7178137d2b78c2862a767712446e0e1
|
refs/heads/master
| 2020-03-29T21:20:33.312506
| 2018-09-22T02:18:19
| 2018-09-22T02:18:19
| 150,361,542
| 3
| 0
| null | 2018-09-26T03:09:36
| 2018-09-26T03:09:37
| null |
UTF-8
|
Python
| false
| false
| 5,219
|
py
|
from app import apfell, db_objects
from sanic.response import json
from app.database_models.model import Callback, Operator, Task, Command, FileMeta, Operation
from urllib.parse import unquote_plus
import datetime
from sanic_jwt.decorators import protected, inject_user
from app.api.utils import breakout_quoted_params
# This gets all tasks in the database
@apfell.route(apfell.config['API_BASE'] + "/tasks/", methods=['GET'])
@inject_user()
@protected()
async def get_all_tasks(request, user):
callbacks = Callback.select()
operators = Operator.select()
tasks = Task.select()
# callbacks_with_operators = await db_objects.prefetch(callbacks, operators)
full_task_data = await db_objects.prefetch(tasks, callbacks, operators)
return json([c.to_json() for c in full_task_data])
@apfell.route(apfell.config['API_BASE'] + "/tasks/callback/<cid:int>", methods=['GET'])
@inject_user()
@protected()
async def get_all_tasks_for_callback(request, cid, user):
try:
callback = await db_objects.get(Callback, id=cid)
except Exception as e:
return json({'status': 'error',
'error': 'Callback does not exist'})
try:
tasks = Task.select()
cb_task_data = await db_objects.execute(Task.select().where(Task.callback == callback))
return json([c.to_json() for c in cb_task_data])
except Exception as e:
return json({'status': 'error',
'error': 'No Tasks',
'msg': str(e)})
# We don't put @protected or @inject_user here since the callback needs to be able to call this function
@apfell.route(apfell.config['API_BASE'] + "/tasks/callback/<cid:int>/nextTask", methods=['GET'])
async def get_next_task(request, cid):
# gets the next task by time for the callback to do
try:
callback = await db_objects.get(Callback, id=cid)
except Exception as e:
return json({'status': 'error',
'error': 'callback does not exist'})
try:
callback.last_checkin = datetime.datetime.now()
callback.active = True # always set this to true regardless of what it was before because it's clearly active
await db_objects.update(callback) # update the last checkin time
operation = await db_objects.get(Operation, name=callback.operation.name)
if not operation.complete:
tasks = await db_objects.get(Task.select().join(Callback).where(
(Task.callback == callback) & (Task.status == "submitted")).order_by(Task.timestamp))
else:
# if the operation is done, kill anything that still tries to get tasking
return json({"command": "exit", "params": ""})
except Exception as e:
print(e)
return json({'command': 'none'}) # return empty if there are no tasks that meet the criteria
tasks.status = "processing"
await db_objects.update(tasks)
return json({"command": tasks.command.cmd, "params": tasks.params, "id": tasks.id})
# create a new task to a specific callback
@apfell.route(apfell.config['API_BASE'] + "/tasks/callback/<cid:int>", methods=['POST'])
@inject_user()
@protected()
async def add_task_to_callback(request, cid, user):
data = request.json
data['operator'] = user['username']
return json(await add_task_to_callback_func(data, cid))
async def add_task_to_callback_func(data, cid):
try:
# first see if the operator and callback exists
op = await db_objects.get(Operator, username=data['operator'])
cb = await db_objects.get(Callback, id=cid)
# now check the task and add it if it's valid
cmd = await db_objects.get(Command, cmd=data['command'])
file_meta = ""
# some tasks require a bit more processing, so we'll handle that here so it's easier for the implant
if cmd.cmd == "upload":
# we need to get the file into the database before we can signal for the callback to pull it down
# this will have {path to local file} {path to remote file} in the data['params'] section
upload_params = await breakout_quoted_params(data['params'])
file_meta = await db_objects.create(FileMeta, total_chunks=1, chunks_received=1, complete=True,
path=upload_params[0], operation=cb.operation)
data['params'] = str(file_meta.id) + " " + upload_params[1]
if cmd.cmd == "download":
if '"' in data['params']:
data['params'] = data['params'][1:-1] # remove "" around the string at this point if they are there
task = await db_objects.create(Task, callback=cb, operator=op, command=cmd, params=data['params'])
if cmd.cmd == "upload":
# now we can associate the task with the filemeta object
file_meta.task = task
await db_objects.update(file_meta)
status = {'status': 'success'}
task_json = task.to_json()
return {**status, **task_json}
except Exception as e:
print("failed to get something in add_task_to_callback_func " + str(e))
return {'status': 'error', 'error': 'Failed to create task', 'msg': str(e)}
|
[
"codybthomas@gmail.com"
] |
codybthomas@gmail.com
|
e39b0900c10267e355514f90c0edadec2b928e73
|
a1e17363c5fbb5e1e70c38c91108cc84b0a2e98a
|
/expyfun/_externals/ndarraysource.py
|
2ef142c93f62ca66ac5cae3f189cb8898e8eec78
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
mmittag/expyfun
|
daa3332c8b228abaa60e2893210e7598d761a89b
|
ca52135ace62baf7419f4708e54ebf648a21c9cc
|
refs/heads/master
| 2021-01-16T19:59:47.671278
| 2014-07-03T20:57:53
| 2014-07-03T20:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
# -*- coding: utf-8 -*-
try:
from pyglet.media import NdarraySource
except ImportError:
from pyglet.media import StaticMemorySource, AudioFormat
class NdarraySource(StaticMemorySource):
"""Play sound from numpy array
:Parameters:
`data` : ndarray
float data with shape n_channels x n_samples. If ``data`` is
1D, then the sound is assumed to be mono. Note that data
will be clipped between +/- 1.
`fs` : int
Sample rate for the data.
"""
def __init__(self, data, fs):
fs = int(fs)
if data.ndim not in (1, 2):
raise ValueError('Data must have one or two dimensions')
n_ch = data.shape[0] if data.ndim == 2 else 1
data = data.T.ravel('C')
data[data < -1] = -1
data[data > 1] = 1
data = (data * (2 ** 15)).astype('int16').tostring()
audio_format = AudioFormat(channels=n_ch, sample_size=16,
sample_rate=fs)
super(NdarraySource, self).__init__(data, audio_format)
def _get_queue_source(self):
return self
|
[
"larson.eric.d@gmail.com"
] |
larson.eric.d@gmail.com
|
69b55b7eae0532801ad2677e109a8824ef180527
|
4dbc4d9c864ac4565193f412d1a2928f34d28da5
|
/Educational Codeforces Round 91 (Rated for Div. 2)/.history/D_Berserk_And_Fireball_20200714175533.py
|
c57c462fe3a7e046e49bf7e8ae5dc3eb0d04a592
|
[] |
no_license
|
TomChan1991/codeforce
|
91807fd9b62abc48eaed8c0bfac17a38707a2b5c
|
d23c882d9194ff09f8b41bd76c9cddc3af5c9b21
|
refs/heads/master
| 2022-12-08T09:23:05.599194
| 2020-07-20T13:54:35
| 2020-07-20T13:54:35
| 281,128,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
import sys
inpy = [int(x) for x in sys.stdin.read().split()]
n, m, x, k, y = inpy[0:5]
a, b = inpy[5:5+n], inpy[5+n:]
prei = -1
i, j = 0, 0
res = 0
while i < len(a) and j < len(b):
print(i, j)
if a[i] == b[j]:
flag = True
maxV = 0
for j in range(prei + 1, i):
maxV = max(maxV, a[j])
minMana = None
if max(a[prei] if prei > 0 else 0, a[i]) > minMana:
minMana = y * (i - prei - 1)
if i - prei > k:
minMana = min(((i - prei - 1) // k) * x + ((i - prei - 1) % k) * y, minMana)
if not minMana:
break
res += minMana
prei = i
j += 1
i += 1
if j == len(b):
for j in range(prei + 1, len(a)):
maxV = max(maxV, a[j])
minMana = None
if a[prei] > minMana:
minMana = y * (i - prei - 1)
if i - prei > k:
minMana = min(((i - prei - 1) // k) * x + ((i - prei - 1) % k) * y, minMana)
if not minMana:
print(-1)
else:
print(res + minMana)
else:
print(-1)
|
[
"chen_yanhua123@126.com"
] |
chen_yanhua123@126.com
|
94ac20b87ff92b36f7406e2ef2b2dfcb4d534a0b
|
17d5736896e79d4b8a11ed8d8ecddd6ede56b2a6
|
/day_159_AddtoArrayFormofInteger.py
|
02a99cfacee4532a28914a1aa701427853144533
|
[] |
no_license
|
parulsharma-121/CodingQuestions
|
e733e5b24c30f137593267d8464721a83df3f241
|
9c3a99501878edd22052505b8bda9863e5855fd7
|
refs/heads/master
| 2021-04-23T19:19:13.651753
| 2020-10-22T16:30:29
| 2020-10-22T16:30:29
| 249,979,493
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
'''
For a non-negative integer X, the array-form of X is an array of its digits in left to right order. For example, if X = 1231, then the array form is [1,2,3,1].
Given the array-form A of a non-negative integer X, return the array-form of the integer X+K.
Example 1:
Input: A = [1,2,0,0], K = 34
Output: [1,2,3,4]
Explanation: 1200 + 34 = 1234
Example 2:
Input: A = [2,7,4], K = 181
Output: [4,5,5]
Explanation: 274 + 181 = 455
'''
def addToArrayForm(A, K):
list_to_int = lambda A: int(''.join(str(i) for i in A)) # Generator exp.
A1 = list_to_int(A) + K
return list(str(A1))
A = [1,2,0,0]
K = 34
print(addToArrayForm(A,K))
|
[
"contactparulsharma11@gmail.com"
] |
contactparulsharma11@gmail.com
|
d7a5d281599ae77fdda2fbf31690cc3d93d99a0b
|
463c8ba5baad086d37819804af4ee10f43ab6dd5
|
/06_django_advance/01_DJANGO_RECAP/poll/views.py
|
e768d7b81c353ad198972ae2e64c97b315bc5f02
|
[] |
no_license
|
sooya14/TIL
|
dbbb0608d45ce273ddef6f7cea1b1195285f269d
|
232b0d38d8f6ee2e6e5517bfd6a2a15cf1000dad
|
refs/heads/master
| 2023-01-11T17:12:39.370178
| 2020-05-11T12:06:41
| 2020-05-11T12:06:41
| 195,916,241
| 0
| 0
| null | 2023-01-05T18:22:56
| 2019-07-09T02:17:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 651
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from .models import Question, Choice
from .forms import ChoiceModelForm
from django.views.decorators.http import require_GET, require_POST, require_http_methods
def question_detail(request, question_id):
question = get_object_or_404(Question, id=question_id)
return render(request, 'poll/question_detail.html', {
'question': question,
})
def upvote(request, question_id):
question = get_object_or_404(Question, id=question_id)
choice = ChoiceModelForm(request.POST, instance=question)
return redirect('poll:question_detail', question_id)
|
[
"soosmile9653@gmail.com"
] |
soosmile9653@gmail.com
|
69268ae1f4ab71c81fc10a27196e5b30bd979016
|
91b80ef798cbcdaab7f6ae0be994f5a3b12f1515
|
/709.py
|
2f8a0323dfd3f9e25176ed78d7adcb8763a4e366
|
[] |
no_license
|
luckkyzhou/leetcode
|
13377565a1cc2c7861601ca5d55f6b83c63d490e
|
43bcf65d31f1b729ac8ca293635f46ffbe03c80b
|
refs/heads/master
| 2021-06-21T11:26:06.114096
| 2021-03-24T21:06:15
| 2021-03-24T21:06:15
| 205,568,339
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
class Solution:
def toLowerCase(self, str: str) -> str:
res = []
for s in str:
res.append(chr(ord(s) | 32))
return "".join(res)
|
[
"luckky_zhou@163.com"
] |
luckky_zhou@163.com
|
ffedc98b8e7a736467f154ddf564e6d90d606fa5
|
bddd0b5e00906380bd45e3d948efdbe5ab9c5063
|
/tests/test_marshmallow.py
|
93854b69f4b8e04592b1959a4fee32865e8a04a6
|
[
"MIT"
] |
permissive
|
life4/vaa
|
2e1f3f01c88e7d592b977db4715fa9e14225267c
|
d934e073966cacc1810419bed0ed8e5dca83fac8
|
refs/heads/master
| 2020-08-04T10:57:29.791563
| 2020-04-16T13:34:31
| 2020-04-16T13:34:31
| 212,113,705
| 5
| 0
|
MIT
| 2021-12-14T09:53:17
| 2019-10-01T14:12:54
|
Python
|
UTF-8
|
Python
| false
| false
| 720
|
py
|
import marshmallow
import vaa
@vaa.marshmallow
class Scheme(marshmallow.Schema):
name = marshmallow.fields.Str(required=True)
mail = marshmallow.fields.Email(required=True)
count = marshmallow.fields.Int(required=True)
def test_valid():
data = {'name': 'Gram', 'mail': 'master_fess@mail.ru', 'count': 10}
v = Scheme(data)
assert v.is_valid() is True
assert v.cleaned_data == data
assert v.errors is None
def test_invalid_name():
data = {'name': 'Gram', 'mail': 'mail.ru', 'count': 10}
v = Scheme(data)
assert v.is_valid() is False
assert v.cleaned_data is None
error = vaa.Error(field='mail', message='Not a valid email address.')
assert v.errors == [error]
|
[
"master_fess@mail.ru"
] |
master_fess@mail.ru
|
bf84f9c5adfa9e7583673be3f2010094b0ec0812
|
ad4c2aa0398406ccb7e70562560e75fa283ffa1a
|
/invert-binary-tree/invert-binary-tree.py
|
5d741a14934dc5b2a736ed7eb2ffb43c88227147
|
[
"Apache-2.0"
] |
permissive
|
kmgowda/kmg-leetcode-python
|
427d58f1750735618dfd51936d33240df5ba9ace
|
4d32e110ac33563a8bde3fd3200d5804db354d95
|
refs/heads/main
| 2023-08-22T06:59:43.141131
| 2021-10-16T14:04:32
| 2021-10-16T14:04:32
| 417,841,590
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
// https://leetcode.com/problems/invert-binary-tree
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
def invert(root):
if not root:
return root
l = invert(root.left)
r = invert(root.right)
root.left, root.right=r,l
return root
return invert(root)
|
[
"keshava.gowda@gmail.com"
] |
keshava.gowda@gmail.com
|
6995ee1e78fd36068874a000c4c37ef1b646d8d8
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/RenewAdditionalBandwidthRequest.py
|
13003abc6ba6cd98917539ae4400928484b4e505
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,305
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class RenewAdditionalBandwidthRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'RenewAdditionalBandwidth','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CouponNo(self): # String
return self.get_query_params().get('CouponNo')
def set_CouponNo(self, CouponNo): # String
self.add_query_param('CouponNo', CouponNo)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_SourceBiz(self): # String
return self.get_query_params().get('SourceBiz')
def set_SourceBiz(self, SourceBiz): # String
self.add_query_param('SourceBiz', SourceBiz)
def get_OrderTimeLength(self): # String
return self.get_query_params().get('OrderTimeLength')
def set_OrderTimeLength(self, OrderTimeLength): # String
self.add_query_param('OrderTimeLength', OrderTimeLength)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
322aa78a81d83983090c878e3f2a5fb4415a64d9
|
7635ccca81bb3c2cfce123ebf14831522b8ba6ee
|
/cvutils/DropboxFileWatcherUpload.py
|
94effc0c120f720bfc9c5da82e3a446119cacc3f
|
[] |
no_license
|
youngsoul/cvutils
|
6ef45f26e56d06cc445ae41066eb2028f8d08e3b
|
75d8249b2a5724e4c168b598943edeca87f15335
|
refs/heads/master
| 2023-02-19T10:13:36.143823
| 2021-01-24T16:44:51
| 2021-01-24T16:44:51
| 332,498,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,972
|
py
|
from cvutils.BackgroundFileProcessor import BackgroundFileProcessor
from pathlib import Path
import dropbox
from dropbox.exceptions import ApiError
class DropboxFileWatcherUpload(BackgroundFileProcessor):
def _upload_file(self, file_from, file_to):
dbx = dropbox.Dropbox(self.dropbox_access_token)
with open(file_from, 'rb') as f:
dbx.files_upload(f.read(), file_to)
def __init__(self, dropbox_access_token: str, root_dir: str, include_parent_dir_in_to_file=True, pattern:str="*", delete_after_process: bool=False, batch_size: int=10, polling_time: int=5 ):
BackgroundFileProcessor.__init__(self, root_dir, pattern, delete_after_process, batch_size, polling_time)
self.include_parent_dir_in_to_file = include_parent_dir_in_to_file
self.dropbox_access_token = dropbox_access_token
def process_file(self, absolute_file_path):
print(absolute_file_path)
p = Path(absolute_file_path)
if self.include_parent_dir_in_to_file:
to_path = f"/{p.parent.name}/{p.name}"
else:
to_path = p.name
try:
self._upload_file(absolute_file_path, to_path)
except ApiError as err:
# Check user has enough Dropbox space quota
if (err.error.is_path() and
err.error.get_path().error.is_insufficient_space()):
print("ERROR: Cannot upload; insufficient space.")
elif err.user_message_text:
print(err.user_message_text)
else:
print(err)
if __name__ == '__main__':
from dotenv import load_dotenv
import os
load_dotenv()
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
access_token = os.getenv('dropbox_access_token')
db = DropboxFileWatcherUpload(dropbox_access_token=access_token, root_dir="../motion", pattern="*.jpg", delete_after_process=True)
db.start()
db.drain()
|
[
"theyoungsoul@gmail.com"
] |
theyoungsoul@gmail.com
|
8527a180c4f68c8b4694e5badaf03b66f91d6750
|
8d472f9facb895dda9e1df81f3bb6c2f81b9c357
|
/master/bt5/slapos_accounting/SkinTemplateItem/portal_skins/slapos_accounting/SaleInvoiceTransaction_init.py
|
bfbac88099e23111059502c1988eb88e8a08d087
|
[] |
no_license
|
SlapOS/slapos.core
|
852485eed9382685f3df6ba8532f8192bb1389c4
|
369e8d56636e1c59a745e68dc68154abfc5b7840
|
refs/heads/master
| 2023-08-31T04:42:34.722241
| 2023-08-30T15:13:08
| 2023-08-30T15:13:08
| 1,825,920
| 11
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
if kw.get('created_by_builder', 0):
return
context.newContent(portal_type='Sale Invoice Transaction Line',
id='income',)
context.newContent(portal_type='Sale Invoice Transaction Line',
id='receivable', )
context.newContent(portal_type='Sale Invoice Transaction Line',
id='collected_vat',)
|
[
"alain.takoudjou@nexedi.com"
] |
alain.takoudjou@nexedi.com
|
9655ab9b5ab81ccda6e6117b91a292de0f007db0
|
b424a13f032d5a607e6df4dd78bc47ad1d06a147
|
/lhc/io/fastq/iterator.py
|
d620bee09bebc387ba2e493048ec1f2d2c782158
|
[] |
no_license
|
EnjoyLifeFund/macSierra-py36-pkgs
|
1e7eeb9b55415da6eb12465d67730d76e9cc619a
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
refs/heads/master
| 2021-01-20T10:23:50.044019
| 2017-09-05T02:53:26
| 2017-09-05T02:53:26
| 90,333,987
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
from collections import namedtuple
from lhc.itertools.chunked_iterator import ChunkedIterator
class FastqEntry(namedtuple('FastqEntry', ('hdr', 'seq', 'qual_hdr', 'qual'))):
def __str__(self):
return '@{}\n{}\n{}+\n{}\n'.format(self.hdr, self.seq, self.qual_hdr, self.qual)
class FastqEntryIterator(object):
def __init__(self, iterator):
self.iterator = iterator
self.it = ChunkedIterator(self.iterator, 4)
def __iter__(self):
return self
def __next__(self):
seq_id, seq, qual_id, qual = next(self.it)
return FastqEntry(seq_id.strip()[1:],
seq.strip(),
qual_id.strip()[1:],
qual.strip())
def __del__(self):
if hasattr(self.iterator, 'close'):
self.iterator.close()
|
[
"raliclo@gmail.com"
] |
raliclo@gmail.com
|
cac7b511c6a80009d9336e269018d5ebaff5f0fc
|
bce492a540213327e524a528a0bde3fe13e4cbdc
|
/hospital/get_data.py
|
b6f86359acb215f804128dae3f84c1350a97b5b7
|
[] |
no_license
|
RympeR/hospital
|
e8277ce895b321f3fcc434cbddc388b07887458f
|
7a682a1a9a936f6257e9f7f28de0376f88447cf9
|
refs/heads/master
| 2021-04-08T13:16:50.716211
| 2020-04-17T13:46:42
| 2020-04-17T13:46:42
| 248,779,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
with open('Hospinfo.txt') as f:
data = f.read().split('"')
# print(data[:200])
for row, row_data in enumerate(data):
if row == 1:
# print(row_data)
print(len(row_data.split(',')))
cols = """Provider ID,Hospital Name,Address,City,State,ZIP Code,County Name,Phone Number,Hospital Type,Hospital Ownership,Emergency Services,Meets criteria for meaningful use of EHRs,Hospital overall rating,Hospital overall rating footnote,Mortality national comparison,Mortality national comparison footnote,Safety of care national comparison,Safety of care national comparison footnote,Readmission national comparison,Readmission national comparison footnote,Patient experience national comparison,Patient experience national comparison footnote,Effectiveness of care national comparison,Effectiveness of care national comparison footnote,Timeliness of care national comparison,Timeliness of care national comparison footnote,Efficient use of medical imaging national comparison,Efficient use of medical imaging national comparison footnote,Location
""".split(',')
for i in cols:
print(i)
|
[
"georg.rashkov@gmail.com"
] |
georg.rashkov@gmail.com
|
0a43eb71f8d79b57e3e6eeac7e51f86e76c91464
|
47deebe6fefedb01fdce5d4e82f58bb08f8e1e92
|
/python core/Lesson_10/matrix_13.py
|
bfaf79444f2715dffed23a8b328deda5812089c2
|
[] |
no_license
|
developeryuldashev/python-core
|
5bb162603bdb5782acf05e3fb25ca5dd6347067a
|
08fca77c9cfde69d93a7875b3fb65b98f3dabd78
|
refs/heads/main
| 2023-08-21T03:33:12.160133
| 2021-10-19T04:56:53
| 2021-10-19T04:56:53
| 393,383,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
from methods import makeMatrix,Print
n=5
a=makeMatrix(n)
Print(a)
for i in range(n):
for j in range(n-i):
print(a[i][j],end=' ')
print()
for k in range(1+i,n):
print(a[k][n-1-i],end=' ')
print()
|
[
"81365808+developeryuldashev@users.noreply.github.com"
] |
81365808+developeryuldashev@users.noreply.github.com
|
0baeae710bb003d24d9f5571745cc95246b97e50
|
5b5a49643c75aa43d5a876608383bc825ae1e147
|
/python99/arithmetic/p206.py
|
052c679c71877459f7b22b02df25cd6179ae3ee8
|
[] |
no_license
|
rscai/python99
|
281d00473c0dc977f58ba7511c5bcb6f38275771
|
3fa0cb7683ec8223259410fb6ea2967e3d0e6f61
|
refs/heads/master
| 2020-04-12T09:08:49.500799
| 2019-10-06T07:47:17
| 2019-10-06T07:47:17
| 162,393,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
# A list of Goldbach compositions
from python99.arithmetic.p201 import is_prime
from python99.arithmetic.p204 import prime_generator
import math
def goldbach_list(lower, upper, min_prime_factor=0):
return [x for x in
[goldbach(even, min_prime_factor) for even in
even_nums(lower, upper, min_prime_factor*2)]
if x != None]
def even_nums(lower, upper, min_even):
for num in range(lower, upper+1):
if num > min_even and num % 2 == 0:
yield num
def goldbach(n, min_prime_factor):
for first_prime in prime_generator(max(2, min_prime_factor+1), n//2):
if n-first_prime < min_prime_factor:
return None
if is_prime(n-first_prime):
return [first_prime, n-first_prime]
return None
|
[
"ray.s.cai@icloud.com"
] |
ray.s.cai@icloud.com
|
f15c94f47d7cf0fbb915e73bca37f4000fee172b
|
8fc999f5262b5a2dadc830f1cc345f51b6dde862
|
/samples/conceptual_samples/functions/enumerate_function.py
|
11fdfb523aa3b9139a195ffe0c46779ab406a3cd
|
[] |
no_license
|
pandiyan07/python_2.x_tutorial_for_beginners_and_intermediate
|
5ca5cb5fcfe7ce08d109fb32cdf8138176ac357a
|
a4c14deaa518fea1f8e95c2cc98783c8ca3bd4ae
|
refs/heads/master
| 2022-04-09T20:33:28.527653
| 2020-03-27T06:35:50
| 2020-03-27T06:35:50
| 250,226,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
# this is a sample python script program which is used to demonstrate the concept of enumerate built in function in the python scripting
# language.
for i,v in enumerate(['tic','tack','toe']):
print i,'-',v
# this is the end of the program file. happy coding..!!
|
[
"becool.pandiyan@gmail.com"
] |
becool.pandiyan@gmail.com
|
85242539aeb51faefb57164d622aa34a9f448586
|
89812f6ab80008222bcf93a9b2ca614a60291738
|
/river/time_series/holt_winters.py
|
11afd133036663f6c9e404b62eaa8a6edfe84a8a
|
[
"BSD-3-Clause"
] |
permissive
|
Pandinosaurus/river
|
47135f5b7e612f83d96f4a50f9d746dec834b16d
|
09a24d35c1f548239c54c1244973241bfe5c4edc
|
refs/heads/master
| 2023-08-27T21:08:12.553115
| 2021-11-09T22:10:17
| 2021-11-09T22:10:17
| 409,610,355
| 0
| 0
|
BSD-3-Clause
| 2021-11-10T04:13:30
| 2021-09-23T13:47:27
|
Python
|
UTF-8
|
Python
| false
| false
| 6,329
|
py
|
import operator
import statistics
from collections import deque
from .base import Forecaster
__all__ = ["HoltWinters"]
class Component(deque):
...
class AdditiveLevel(Component):
def __init__(self, alpha):
super().__init__([], maxlen=2)
self.alpha = alpha
def update(self, y, trend, season):
self.append(
self.alpha * (y - (season[-season.seasonality] if season else 0))
+ (1 - self.alpha) * (self[-1] + (trend[-1] if trend else 0))
)
class MultiplicativeLevel(Component):
def __init__(self, alpha):
super().__init__([], maxlen=2)
self.alpha = alpha
def update(self, y, trend, season):
self.append(
self.alpha * (y / (season[-season.seasonality] if season else 1))
+ (1 - self.alpha) * (self[-1] + (trend[-1] if trend else 0))
)
class Trend(Component):
def __init__(self, beta):
super().__init__([], maxlen=2)
self.beta = beta
def update(self, y, level):
self.append(self.beta * (level[-1] - level[-2]) + (1 - self.beta) * self[-1])
class AdditiveSeason(Component):
def __init__(self, gamma, seasonality):
super().__init__([], maxlen=seasonality + 1)
self.gamma = gamma
self.seasonality = seasonality
def update(self, y, level, trend):
self.append(
self.gamma * (y - level[-2] - trend[-2])
+ (1 - self.gamma) * self[-self.seasonality]
)
class MultiplicativeSeason(Component):
def __init__(self, gamma, seasonality):
super().__init__([], maxlen=seasonality + 1)
self.gamma = gamma
self.seasonality = seasonality
def update(self, y, level, trend):
self.append(
self.gamma * y / (level[-2] + trend[-2])
+ (1 - self.gamma) * self[-self.seasonality]
)
class HoltWinters(Forecaster):
r"""Holt-Winters forecaster.
This is a standard implementation of the Holt-Winters forecasting method. Certain
parametrisations result in special cases, such as simple exponential smoothing.
Optimal parameters and initialisation values can be determined in a batch setting. However, in
an online setting, it is necessary to wait and observe enough values. The first
`k = max(2, seasonality)` values are indeed used to initialize the components.
**Level initialization**
$$l = \frac{1}{k} \sum_{i=1}{k} y_i$$
**Trend initialization**
$$t = \frac{1}{k - 1} \sum_{i=2}{k} y_i - y_{i-1}$$
**Trend initialization**
$$s_i = \frac{y_i}{k}$$
Parameters
----------
alpha
Smoothing parameter for the level.
beta
Smoothing parameter for the trend.
gamma
Smoothing parameter for the seasonality.
seasonality
The number of periods in a season. For instance, this should be 4 for quarterly data,
and 12 for yearly data.
multiplicative
Whether or not to use a multiplicative formulation.
Examples
--------
>>> from river import datasets
>>> from river import metrics
>>> from river import time_series
>>> dataset = datasets.AirlinePassengers()
>>> model = time_series.HoltWinters(
... alpha=0.3,
... beta=0.1,
... gamma=0.6,
... seasonality=12,
... multiplicative=True
... )
>>> metric = metrics.MAE()
>>> time_series.evaluate(
... dataset,
... model,
... metric,
... horizon=12,
... grace_period=12
... )
+1 MAE: 25.899087
+2 MAE: 26.26131
+3 MAE: 25.735903
+4 MAE: 25.625678
+5 MAE: 26.093842
+6 MAE: 26.90249
+7 MAE: 28.634398
+8 MAE: 29.284769
+9 MAE: 31.018351
+10 MAE: 32.252349
+11 MAE: 33.518946
+12 MAE: 33.975057
References
----------
[^1]: [Exponential smoothing — Wikipedia](https://www.wikiwand.com/en/Exponential_smoothing)
[^2]: [Exponential smoothing — Forecasting: Principles and Practice](https://otexts.com/fpp2/expsmooth.html)
[^3]: [What is Exponential Smoothing? — Engineering statistics handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc43.htm)
"""
def __init__(
self, alpha, beta=None, gamma=None, seasonality=0, multiplicative=False,
):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.seasonality = seasonality
self.multiplicative = multiplicative
self.level = (
MultiplicativeLevel(alpha) if multiplicative else AdditiveLevel(alpha)
)
self.trend = Trend(beta) if beta else None
self.season = (
(
MultiplicativeSeason(gamma, seasonality)
if multiplicative
else AdditiveSeason(gamma, seasonality)
)
if (gamma or seasonality)
else None
)
self._first_values = []
self._initialized = False
def learn_one(self, y, x=None):
if self._initialized:
self.level.update(y, self.trend, self.season)
if self.trend:
self.trend.update(y, self.level)
if self.season:
self.season.update(y, self.level, self.trend)
return self
self._first_values.append(y)
if len(self._first_values) < max(2, self.seasonality):
return self
# The components can be initialized now that enough values have been observed
self.level.append(statistics.mean(self._first_values))
diffs = [b - a for a, b in zip(self._first_values[:-1], self._first_values[1:])]
self.trend.append(statistics.mean(diffs))
self.season.extend([y / self.level[-1] for y in self._first_values])
self._initialized = True
return self
def forecast(self, horizon, xs=None):
op = operator.mul if self.multiplicative else operator.add
return [
op(
self.level[-1] + ((h + 1) * self.trend[-1] if self.trend else 0),
(
self.season[-self.seasonality + h % self.seasonality]
if self.seasonality
else 0
),
)
for h in range(horizon)
]
|
[
"noreply@github.com"
] |
Pandinosaurus.noreply@github.com
|
12d9546549889ef6154b5507be8052e83c67feb5
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/initial_2508.py
|
c688c4238de404a6dfd37ac60813eb1cd942d180
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,330
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((952, 290, 680), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((894, 105, 458), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((2, 731, 654), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((673, 305, 895), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((698, 112, 782), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((338, 192, 144), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((129, 617, 486), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((655, 740, 828), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((107, 904, 348), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((365, 980, 742), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((110, 223, 36), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((377, 465, 805), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((12, 813, 34), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((152, 995, 987), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((344, 857, 79), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((109, 422, 328), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((74, 800, 387), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((162, 116, 380), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((891, 372, 877), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((803, 934, 758), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((876, 616, 557), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
b405cb126d3e976154d3534a211d7c924676e808
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02682/s857771361.py
|
d205ccdcbde00b6bd04ef1defef77f456a20c864
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
a, b, c, k = map(int, input().split())
max_val = 0
if k <= a:
max_val += k
else:
max_val += a
k -= a
if k <= b:
pass
# break
else:
k -= b
max_val -= k
print(max_val)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b43322d399185982d5b64b3d61a54d2011130ca6
|
bdfdd067b98b0c93fab49dc0b61eb6160edc1175
|
/Day 1 sum of numbers in a string.py
|
23477d8ba2ef4eeaa9dac87ba223c44c656a2d6c
|
[] |
no_license
|
AprajitaChhawi/365DaysOfCode.FEBRUARY
|
7b049ac92df68cb7162f68a66cfdb014a0bb45ba
|
6f9619e33e200247473543b0f9cbaa17b55782a2
|
refs/heads/main
| 2023-03-09T16:54:21.545637
| 2021-03-01T17:58:02
| 2021-03-01T17:58:02
| 336,838,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
#User function Template for python3
'''
Your task is to return the sum of all the
numbers appearing in the given string.
Function Arguments: s (given string)
Return Type: integer
'''
import re
def findSum(s):
a=[]
a=[int(i) for i in re.split("[a-z]",s) if i.isdigit()]
return sum(a)
#code here
#{
# Driver Code Starts
#Initial Template for Python 3
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
if __name__=='__main__':
t = int(input())
for i in range(t):
s=str(input())
print(findSum(s))
# } Driver Code Ends
|
[
"achhawip@gmail.com"
] |
achhawip@gmail.com
|
3d41bb59afbe0048d46debc82e42718ccb3d96d5
|
2c7f40ad997de27ef13c368f84da6df2d2f3a565
|
/oci/auth.py
|
e71268e72f6333d2d3e553114b7b7349603e3d44
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
mliepold/cc-utils
|
ff88ba9f95064b598f33649005c8b6764a0dce3a
|
3f8c4b0d11d6a52d1605026f478371411daab81e
|
refs/heads/master
| 2023-06-01T18:36:44.089480
| 2021-06-14T14:42:47
| 2021-06-14T14:42:47
| 377,459,403
| 0
| 0
|
Apache-2.0
| 2021-06-16T10:43:02
| 2021-06-16T10:35:30
| null |
UTF-8
|
Python
| false
| false
| 3,538
|
py
|
import dataclasses
import enum
import operator
import typing
import oci.util
class AuthType(enum.Enum):
BASIC_AUTH = 'basic_auth'
class Privileges(enum.Enum):
READONLY = 'readonly'
READWRITE = 'readwrite'
def _asint(self, privileges):
if privileges is self.READONLY:
return 0
elif privileges is self.READWRITE:
return 1
elif privileges is None:
return 2
else:
raise NotImplementedError(privileges)
def __hash__(self):
return self._asint(self).__hash__()
def __lt__(self, other):
o = self._asint(other)
return self._asint(self).__lt__(o)
def __le__(self, other):
o = self._asint(other)
return self._asint(self).__le__(o)
def __eq__(self, other):
o = self._asint(other)
return self._asint(self).__eq__(o)
def __ne__(self, other):
o = self._asint(other)
return self._asint(self).__ne__(o)
def __gt__(self, other):
o = self._asint(other)
return self._asint(self).__gt__(o)
def __ge__(self, other):
o = self._asint(other)
return self._asint(self).__ge__(o)
@dataclasses.dataclass(frozen=True)
class OciCredentials:
pass
@dataclasses.dataclass(frozen=True)
class OciConfig:
privileges: Privileges
credentials: OciCredentials
url_prefixes: typing.Sequence[str] = dataclasses.field(default_factory=tuple)
def valid_for(self, image_reference: str, privileges: Privileges=Privileges.READONLY):
if privileges and privileges > self.privileges:
return False
if not self.url_prefixes:
return True
unmodified_ref = image_reference.lower()
image_reference = oci.util.normalise_image_reference(image_reference=image_reference).lower()
for prefix in self.url_prefixes:
prefix = prefix.lower()
if image_reference.startswith(oci.util.normalise_image_reference(prefix)):
return True
if image_reference.startswith(prefix.lower()):
return True
if unmodified_ref.startswith(prefix):
return True
return False
@dataclasses.dataclass(frozen=True)
class OciBasicAuthCredentials(OciCredentials):
username: str
password: str
# typehint-alias
image_reference = str
credentials_lookup = typing.Callable[[image_reference, Privileges, bool], OciCredentials]
def mk_credentials_lookup(
cfgs: typing.Union[OciCredentials, typing.Sequence[OciCredentials]],
) -> typing.Callable[[image_reference, Privileges, bool], OciConfig]:
'''
returns a callable that can be queried for matching OciCredentials for requested
privileges and image-references
'''
if isinstance(cfgs, OciConfig):
cfgs = (cfgs,)
def lookup_credentials(
image_reference: str,
privileges: Privileges=Privileges.READONLY,
absent_ok: bool=False,
):
valid_cfgs = sorted(
(
c for c in cfgs
if c.valid_for(image_reference=image_reference, privileges=privileges)
),
key=operator.attrgetter('privileges'),
)
if not valid_cfgs and absent_ok:
return None
if not valid_cfgs:
raise ValueError(f'no valid cfg found: {image_reference=}, {privileges=}')
# first element contains cfg with least required privileges
return valid_cfgs[0].credentials
return lookup_credentials
|
[
"christian.cwienk@sap.com"
] |
christian.cwienk@sap.com
|
01bde579ac5e8282b572898002630a3b05d69be0
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/N/NinaC/obesity_scraper_3_us.py
|
d60baf370cc2cca6e73d62152bc797eb7f588e6f
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
###################################################################################
# Twitter API scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Get results from the Twitter API! Change QUERY to your search term of choice.
# Examples: 'newsnight', '#newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = '"I am obese"'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 100
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=exclude:retweets+%s&rpp=%s&lang=%s&page=%s&country_code=US' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['geo'] = result['geo']
data['profile_image_url'] = result['profile_image_url']
data['from_user_id_str'] = result['from_user_id_str']
data['created_at'] = result['created_at']
data['from_user'] = result['from_user']
data['id_str'] = result['id_str']
data['metadata'] = result['metadata']
data['to_user_id'] = result['to_user_id']
print data['from_user'], data['text'], data['geo']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter API scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Get results from the Twitter API! Change QUERY to your search term of choice.
# Examples: 'newsnight', '#newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = '"I am obese"'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 100
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=exclude:retweets+%s&rpp=%s&lang=%s&page=%s&country_code=US' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['geo'] = result['geo']
data['profile_image_url'] = result['profile_image_url']
data['from_user_id_str'] = result['from_user_id_str']
data['created_at'] = result['created_at']
data['from_user'] = result['from_user']
data['id_str'] = result['id_str']
data['metadata'] = result['metadata']
data['to_user_id'] = result['to_user_id']
print data['from_user'], data['text'], data['geo']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
1b85cda45f6371da032a7c90d41f74ee9956e9d4
|
bfa100593b7fc67ae65593bdddb357fa3d9e27cf
|
/quotes/migrations/0001_initial.py
|
9b07bac95358e974f2fd108e3f0c319e3ee86360
|
[] |
no_license
|
wall-e-08/medigap-wagtail
|
e2342631004de047a4b3d09571dd88f2a6fc2286
|
1d7b77759f071eec89e29591e814523d4c433655
|
refs/heads/master
| 2020-05-20T17:27:43.966094
| 2019-04-30T06:25:58
| 2019-04-30T06:25:58
| 185,688,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
# Generated by Django 2.1.7 on 2019-04-07 09:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=100, null=True)),
('middle_name', models.CharField(blank=True, max_length=100, null=True)),
('last_name', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(blank=True, max_length=200, null=True)),
('phone', models.CharField(blank=True, max_length=100, null=True, verbose_name='Phone number')),
('phone2', models.CharField(blank=True, max_length=100, null=True, verbose_name='Alternate Phone number')),
('zip_code', models.CharField(blank=True, max_length=100, null=True)),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='Quote Time')),
],
),
]
|
[
"debashis.buet08@gmail.com"
] |
debashis.buet08@gmail.com
|
980b3f033059374e13f3f5fe9614b19c68e86ac1
|
51d0377511a5da902033fb9d80184db0e096fe2c
|
/18-linear-classifiers-in-python/2-loss-functions/03-comparing-the-logistic-and-hinge-losses.py
|
69c140b9a46811245c800484fcf7d7e5197fd13d
|
[] |
no_license
|
sashakrasnov/datacamp
|
c28c6bda178163337baed646220b2f7dcc36047d
|
759f4cec297883907e21118f24a3449d84c80761
|
refs/heads/master
| 2021-12-07T02:54:51.190672
| 2021-09-17T21:05:29
| 2021-09-17T21:05:29
| 157,093,632
| 6
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
'''
Comparing the logistic and hinge losses
In this exercise you'll create a plot of the logistic and hinge losses using their mathematical expressions, which are provided to you. The loss function diagram from the video is shown on the right.
'''
import numpy as np
import matplotlib.pyplot as plt
'''
INSTRUCTIONS
* Evaluate the log_loss and hinge_loss functions at the grid points, so that they are plotted.
'''
# Mathematical functions for logistic and hinge losses
# Feel free to ignore if you're not interested
def log_loss(raw_model_output):
return np.log(1 + np.exp(-raw_model_output))
def hinge_loss(raw_model_output):
return np.maximum(0, 1-raw_model_output)
# Create a grid of values and plot
grid = np.linspace(-2, 2, 1000)
plt.plot(grid, log_loss(grid), label='logistic')
plt.plot(grid, hinge_loss(grid), label='hinge')
plt.legend()
plt.show()
|
[
"a@skrasnov.com"
] |
a@skrasnov.com
|
f49f2e35c58478490b43d51f112079f4c750f693
|
2194b6c17f3153c5976d6ac4a9ab78211027adab
|
/otoroshi_admin_api_client/api/templates/otoroshicontrollersadminapi_templates_controllerinitiate_api_key_templates.py
|
fcb0e751d7ce49d5a2c84428e89ff712f4d09b14
|
[] |
no_license
|
krezreb/otoroshi-admin-api-client
|
7fab5e873c9c5950d77fffce6bcf80d3fdf4c319
|
9b3156c11eac227024cfe4a26c0129618deb2c4d
|
refs/heads/master
| 2023-05-08T08:32:00.982987
| 2021-05-27T09:55:00
| 2021-05-27T09:55:00
| 371,324,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
from typing import Any, Dict, Optional, Union
import httpx
from ...client import AuthenticatedClient
from ...models.error_response import ErrorResponse
from ...models.otoroshimodels_api_key import OtoroshimodelsApiKey
from ...types import Response
def _get_kwargs(
*,
client: AuthenticatedClient,
) -> Dict[str, Any]:
url = "{}/api/new/apikey".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[Union[ErrorResponse, OtoroshimodelsApiKey]]:
if response.status_code == 401:
response_401 = ErrorResponse.from_dict(response.json())
return response_401
if response.status_code == 400:
response_400 = ErrorResponse.from_dict(response.json())
return response_400
if response.status_code == 404:
response_404 = ErrorResponse.from_dict(response.json())
return response_404
if response.status_code == 200:
response_200 = OtoroshimodelsApiKey.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[Union[ErrorResponse, OtoroshimodelsApiKey]]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: AuthenticatedClient,
) -> Response[Union[ErrorResponse, OtoroshimodelsApiKey]]:
kwargs = _get_kwargs(
client=client,
)
response = httpx.get(
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: AuthenticatedClient,
) -> Optional[Union[ErrorResponse, OtoroshimodelsApiKey]]:
""" """
return sync_detailed(
client=client,
).parsed
async def asyncio_detailed(
*,
client: AuthenticatedClient,
) -> Response[Union[ErrorResponse, OtoroshimodelsApiKey]]:
kwargs = _get_kwargs(
client=client,
)
async with httpx.AsyncClient() as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: AuthenticatedClient,
) -> Optional[Union[ErrorResponse, OtoroshimodelsApiKey]]:
""" """
return (
await asyncio_detailed(
client=client,
)
).parsed
|
[
"josephbeeson@gmail.com"
] |
josephbeeson@gmail.com
|
2dfa3b839998cf326bc38e466629f82853dae124
|
0a9cfe3d7c07e2a5997647ddbc04a73e7a0dc69d
|
/hivetools/lost_bee.py
|
23763356df196decad810b15952f20c16d07b8e0
|
[
"MIT"
] |
permissive
|
brianoflondon/hivetools
|
7120ec87bb608ea17daae395c42f637fc8e9fe44
|
dbf97370503d2891cc953e136d226b098defa5ee
|
refs/heads/master
| 2022-04-21T01:30:15.289781
| 2020-04-16T15:30:14
| 2020-04-16T15:30:14
| 255,656,686
| 0
| 0
| null | 2020-04-14T16:03:25
| 2020-04-14T16:03:24
| null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
#!/usr/bin/env python3
from getpass import getpass
from beemgraphenebase.account import PasswordKey
from tabulate import tabulate
hive_id = "thecrazygm"
hive_id = input("Hive User ID: ")
brain_key = getpass(prompt='Master Password: ')
roles = ["owner", "active", "posting", "memo"]
data = []
for role in roles:
keys = PasswordKey(hive_id, brain_key, role=role, prefix="STM")
priv_key = keys.get_private()
# priv = keys.get_private()
pub_key = keys.get_public()
# pub = keys.get_public()
data += [[role, str(pub_key), str(priv_key)]]
print(tabulate(data, headers=["Role", "Public Key", "Private Key"]))
|
[
"thecrazygm@gmail.com"
] |
thecrazygm@gmail.com
|
c1810bdae8eb260c21b70432f9e0091da1d8ee3a
|
c61798997614f4430a6a56b16e8d17fe75fb2f9c
|
/Yurii_Khomych/l_6_files/csv_examples/csv_read_dictionary.py
|
521b595e29954d34abd71fca21273664147b2280
|
[] |
no_license
|
YuriiKhomych/ITEA_AC
|
ad944bbe74be88f306a45f38efa70765c5286162
|
f9eb147da1135a978929ae370d9c9fcd8dc59d21
|
refs/heads/master
| 2022-12-18T14:55:56.162451
| 2020-05-03T12:45:02
| 2020-05-03T12:45:02
| 234,373,863
| 0
| 9
| null | 2022-12-08T03:46:33
| 2020-01-16T17:26:50
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
import csv
with open("employee_birthday.csv", mode="r") as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
print(
f'\t{row["name"]} works in the {row["department"]} department, and was born in {row["birthday month"]}.'
)
line_count += 1
print(f"Processed {line_count} lines.")
|
[
"yuriykhomich@gmail.com"
] |
yuriykhomich@gmail.com
|
ac382ddbbaef11f5a3db3f6a26cb1703eeac2af9
|
949908be7a522279bc5947ee0be436ef058767a9
|
/code/generate_mcts_games.py
|
c6a85e7062e56d3eeeb9474ee28d5d3435bb39d0
|
[] |
no_license
|
maxpumperla/deep_learning_and_the_game_of_go
|
3bd2bddce228b6696fb716eb0f18a2e9c82bb20c
|
c70cfe4a03dd2365dcb4295236755cca7a7178b7
|
refs/heads/master
| 2023-08-21T14:48:53.899001
| 2022-09-17T13:03:09
| 2022-09-17T13:03:09
| 108,328,408
| 955
| 402
| null | 2022-09-17T13:03:10
| 2017-10-25T21:32:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,187
|
py
|
# tag::generate_mcts_imports[]
import argparse
import numpy as np
from dlgo.encoders import get_encoder_by_name
from dlgo import goboard_fast as goboard
from dlgo import mcts
from dlgo.utils import print_board, print_move
# end::generate_mcts_imports[]
# tag::generate_mcts[]
def generate_game(board_size, rounds, max_moves, temperature):
boards, moves = [], [] # <1>
encoder = get_encoder_by_name('oneplane', board_size) # <2>
game = goboard.GameState.new_game(board_size) # <3>
bot = mcts.MCTSAgent(rounds, temperature) # <4>
num_moves = 0
while not game.is_over():
print_board(game.board)
move = bot.select_move(game) # <5>
if move.is_play:
boards.append(encoder.encode(game)) # <6>
move_one_hot = np.zeros(encoder.num_points())
move_one_hot[encoder.encode_point(move.point)] = 1
moves.append(move_one_hot) # <7>
print_move(game.next_player, move)
game = game.apply_move(move) # <8>
num_moves += 1
if num_moves > max_moves: # <9>
break
return np.array(boards), np.array(moves) # <10>
# <1> In `boards` we store encoded board state, `moves` is for encoded moves.
# <2> We initialize a OnePlaneEncoder by name with given board size.
# <3> An new game of size `board_size` is instantiated.
# <4> A Monte Carlo tree search agent with specified number of rounds and temperature will serve as our bot.
# <5> The next move is selected by the bot.
# <6> The encoded board situation is appended to `boards`.
# <7> The one-hot-encoded next move is appended to `moves`.
# <8> Afterwards the bot move is applied to the board.
# <9> We continue with the next move, unless the maximum number of moves has been reached.
# end::generate_mcts[]
# tag::generate_mcts_main[]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--board-size', '-b', type=int, default=9)
parser.add_argument('--rounds', '-r', type=int, default=1000)
parser.add_argument('--temperature', '-t', type=float, default=0.8)
parser.add_argument('--max-moves', '-m', type=int, default=60,
help='Max moves per game.')
parser.add_argument('--num-games', '-n', type=int, default=10)
parser.add_argument('--board-out')
parser.add_argument('--move-out')
args = parser.parse_args() # <1>
xs = []
ys = []
for i in range(args.num_games):
print('Generating game %d/%d...' % (i + 1, args.num_games))
x, y = generate_game(args.board_size, args.rounds, args.max_moves, args.temperature) # <2>
xs.append(x)
ys.append(y)
x = np.concatenate(xs) # <3>
y = np.concatenate(ys)
np.save(args.board_out, x) # <4>
np.save(args.move_out, y)
if __name__ == '__main__':
main()
# <1> This application allows some customization via command line arguments.
# <2> For the specified number of games we generate game data.
# <3> After all games have been generated, we concatenate features and labels, respectively.
# <4> We store feature and label data to separate files, as specified by the command line options.
# end::generate_mcts_main[]
|
[
"max.pumperla@googlemail.com"
] |
max.pumperla@googlemail.com
|
d845487c0e8cfd54401601e8139b2f3acf4ad17a
|
3e63608e1cad90bc845c4580723e57ae7ca3f61d
|
/cartography/intel/oci/utils.py
|
e92ab4552703c97a1d2b485e2a3694912bb35905
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
lyft/cartography
|
06dcbf13907cbb9a31b75cd8b21f5721f7cc1b01
|
830b8944879a01f52b21ee12b6fddf245f9733cb
|
refs/heads/master
| 2023-08-31T12:27:59.752452
| 2023-08-28T20:42:12
| 2023-08-28T20:42:12
| 172,811,550
| 2,778
| 334
|
Apache-2.0
| 2023-09-13T04:59:46
| 2019-02-27T00:16:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,223
|
py
|
# Copyright (c) 2020, Oracle and/or its affiliates.
# OCI intel module - utility functions
import json
from typing import Any
from typing import Dict
from typing import List
import neo4j
# Generic way to turn a OCI python object into the json response that you would see from calling the REST API.
def oci_object_to_json(in_obj: Any) -> List[Dict[str, Any]]:
out_list = []
for dict in json.loads(str(in_obj)):
out_list.append(replace_char_in_dict(dict))
return out_list
# Have to replace _ with - in dictionary keys, since _ is substituted for - in OCI object variables.
def replace_char_in_dict(in_dict: Dict[str, Any]) -> Dict[str, Any]:
out_dict = {}
for dict_key, dict_val in in_dict.items():
if isinstance(dict_val, dict):
dict_val = replace_char_in_dict(dict_val)
out_dict[dict_key.replace('_', '-')] = dict_val
return out_dict
# Grab list of all compartments and sub-compartments in neo4j already populated by iam.
def get_compartments_in_tenancy(neo4j_session: neo4j.Session, tenancy_id: str) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-[*]->(compartment:OCICompartment) " \
"return DISTINCT compartment.name as name, compartment.ocid as ocid, " \
"compartment.compartmentid as compartmentid;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id)
# Grab list of all groups in neo4j already populated by iam.
def get_groups_in_tenancy(neo4j_session: neo4j.Session, tenancy_id: str) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-[*]->(group:OCIGroup)" \
"return DISTINCT group.name as name, group.ocid as ocid;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id)
# Grab list of all policies in neo4j already populated by iam.
def get_policies_in_tenancy(neo4j_session: neo4j.Session, tenancy_id: str) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-[*]->(policy:OCIPolicy)" \
"return DISTINCT policy.name as name, policy.ocid as ocid, policy.statements as statements, " \
"policy.compartmentid as compartmentid;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id)
# Grab list of all regions in neo4j already populated by iam.
def get_regions_in_tenancy(neo4j_session: neo4j.Session, tenancy_id: str) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-->(region:OCIRegion)" \
"return DISTINCT region.name as name, region.key as key;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id)
# Grab list of all security groups in neo4j already populated by network. Need to handle regions for this one.
def get_security_groups_in_tenancy(
neo4j_session: neo4j.Session,
tenancy_id: str, region: str,
) -> neo4j.Result:
query = "MATCH (OCITenancy{ocid: $OCI_TENANCY_ID})-[*]->(security_group:OCINetworkSecurityGroup)-[OCI_REGION]->" \
"(region:OCIRegion{name: $OCI_REGION})" \
"return DISTINCT security_group.name as name, security_group.ocid as ocid, security_group.compartmentid " \
"as compartmentid;"
return neo4j_session.run(query, OCI_TENANCY_ID=tenancy_id, OCI_REGION=region)
|
[
"noreply@github.com"
] |
lyft.noreply@github.com
|
1aaca2801000e12f5206239db1426efe9c79af26
|
e3bdb7844f634efd89109079d22cade713c4899d
|
/test/test_tele_check_cbp_payment_method.py
|
bc5d46fb45107545f525634ef2eb3c58cb505e6f
|
[] |
no_license
|
pc-coholic/Python
|
5170c27da09b066c353e09539e404961f7ad50b7
|
b7251c31339b579f71fb7ee9db05be51e9e43361
|
refs/heads/master
| 2023-04-19T02:42:02.914726
| 2021-04-26T16:07:37
| 2021-04-26T16:07:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
# coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.2.0.20210406.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.tele_check_cbp_payment_method import TeleCheckCBPPaymentMethod # noqa: E501
from openapi_client.rest import ApiException
class TestTeleCheckCBPPaymentMethod(unittest.TestCase):
"""TeleCheckCBPPaymentMethod unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTeleCheckCBPPaymentMethod(self):
"""Test TeleCheckCBPPaymentMethod"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.tele_check_cbp_payment_method.TeleCheckCBPPaymentMethod() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"emargules@bluepay.com"
] |
emargules@bluepay.com
|
4f90609902c001000dd8541fb9265dbecca5a894
|
7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14
|
/airbyte-integrations/connectors/source-shopify/main.py
|
583c32023bc0f39e94d299da40e63db51016e230
|
[
"MIT",
"Elastic-2.0"
] |
permissive
|
Velocity-Engineering/airbyte
|
b6e1fcead5b9fd7c74d50b9f27118654604dc8e0
|
802a8184cdd11c1eb905a54ed07c8732b0c0b807
|
refs/heads/master
| 2023-07-31T15:16:27.644737
| 2021-09-28T08:43:51
| 2021-09-28T08:43:51
| 370,730,633
| 0
| 1
|
MIT
| 2021-06-08T05:58:44
| 2021-05-25T14:55:43
|
Java
|
UTF-8
|
Python
| false
| false
| 248
|
py
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import sys
from airbyte_cdk.entrypoint import launch
from source_shopify import SourceShopify
if __name__ == "__main__":
source = SourceShopify()
launch(source, sys.argv[1:])
|
[
"noreply@github.com"
] |
Velocity-Engineering.noreply@github.com
|
7f438642a7db1531b61af45a6b30465cec4404e4
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/116_13.py
|
c7ed19f6baf638e4c0f6144bd59a54b0499a494f
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
Python | Retain K consecutive elements
Sometimes while working with data, we can have a problem in which we need to
select some of the elements that occur K times consecutively. This problem can
occur in many domains. Let’s discuss certain ways in which this problem can be
solved.
**Method #1 : Usinggroupby() \+ list comprehension**
This task can be performed using above functionalities. In this, we group all
the numbers that are occurring K consecutively. We iterate the list using list
comprehension.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Retain K consecutive elements
# using groupby() + list comprehension
from itertools import groupby
# initialize list
test_list = [1, 1, 4, 5, 5, 6, 7, 7,
8]
# printing original list
print("The original list : " + str(test_list))
# initialize K
K = 2
# Retain K consecutive elements
# using groupby() + list comprehension
res = [i for i, j in groupby(test_list) if len(list(j))
== K]
# printing result
print("The K consecutive elements are : " + str(res))
---
__
__
**Output :**
The original list : [1, 1, 4, 5, 5, 6, 7, 7, 8]
The K consecutive elements are : [1, 5, 7]
**Method #2 : Using list comprehension +slice() + groupby()**
This task can also be performed using above functions. In this, we just
perform grouping in similar way as above but the way we extract consecutive
elements is by slice().
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Retain K consecutive elements
# using groupby() + list comprehension + islice()
from itertools import groupby, islice
# initialize list
test_list = [1, 1, 4, 5, 5, 6, 7, 7,
8]
# printing original list
print("The original list : " + str(test_list))
# initialize K
K = 2
# Retain K consecutive elements
# using groupby() + list comprehension + islice()
res = [i for i, j in groupby(test_list) if
len(list(islice(j, 0, K))) == K]
# printing result
print("The K consecutive elements are : " + str(res))
---
__
__
**Output :**
The original list : [1, 1, 4, 5, 5, 6, 7, 7, 8]
The K consecutive elements are : [1, 5, 7]
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
b2b118c86d4de3a1e077197eb9735bf522e54fbc
|
5ddcaa63a665b91b4928517a8463db497d581e79
|
/run.py
|
efa32721e6a61deed9332f8194da40c53460c644
|
[] |
no_license
|
vgoklani/aiohttpvsgrequests
|
5c1144977a94dfad7fe1f5866004b37d69a232e0
|
ef260649ff16c886a8d0e7f0d1a85dee89af3e15
|
refs/heads/master
| 2021-01-19T20:47:00.745877
| 2016-12-08T08:03:05
| 2016-12-08T08:03:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,650
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Compare aiohttp and grequests
'''
import logging
import hashlib
import asyncio
import time
import aiohttp
import grequests
from hdx.data.resource import Resource
from hdx.facades.simple import facade
from requests import HTTPError
from requests import Session
from requests.adapters import HTTPAdapter
logger = logging.getLogger(__name__)
NUMBER_OF_URLS_TO_PROCESS = 100
async def fetch(metadata, session):
url, resource_id = metadata
md5hash = hashlib.md5()
try:
with aiohttp.Timeout(300, loop=session.loop):
async with session.get(url, timeout=10) as response:
last_modified = response.headers.get('Last-Modified', None)
if last_modified:
response.close()
return resource_id, url, 1, last_modified
logger.info('Hashing %s' % url)
async for chunk in response.content.iter_chunked(1024):
if chunk:
md5hash.update(chunk)
return resource_id, url, 2, md5hash.hexdigest()
except Exception as e:
return resource_id, url, 0, str(e)
async def bound_fetch(sem, metadata, session):
# Getter function with semaphore.
async with sem:
return await fetch(metadata, session)
async def aiohttp_check_resources_for_last_modified(last_modified_check, loop):
tasks = list()
# create instance of Semaphore
sem = asyncio.Semaphore(100)
conn = aiohttp.TCPConnector(keepalive_timeout=10, limit=100)
async with aiohttp.ClientSession(connector=conn, loop=loop) as session:
for metadata in last_modified_check:
task = bound_fetch(sem, metadata, session)
tasks.append(task)
return await asyncio.gather(*tasks)
def set_metadata(metadata):
def hook(resp, **kwargs):
resp.metadata = metadata
return resp
return hook
def grequests_check_resources_for_last_modified(last_modified_check):
results = list()
reqs = list()
def exception_handler(req, exc):
url, res_id = req.metadata
results.append((res_id, url, 0, str(exc)))
with Session() as session:
session.mount('http://', HTTPAdapter(pool_connections=100, pool_maxsize=100))
session.mount('https://', HTTPAdapter(pool_connections=100, pool_maxsize=100))
for metadata in last_modified_check:
req = grequests.get(metadata[0], timeout=10, session=session, callback=set_metadata(metadata))
req.metadata = metadata
reqs.append(req)
for response in grequests.imap(reqs, size=100, stream=True, exception_handler=exception_handler):
url, resource_id = response.metadata
try:
response.raise_for_status()
except HTTPError as e:
results.append((resource_id, url, 0, response.status_code))
response.close()
continue
last_modified = response.headers.get('Last-Modified', None)
if last_modified:
results.append((resource_id, url, 1, last_modified))
response.close()
continue
logger.info('Hashing %s' % url)
md5hash = hashlib.md5()
try:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
md5hash.update(chunk)
results.append((resource_id, url, 2, md5hash.hexdigest()))
except Exception as e:
results.append((resource_id, url, 0, str(e)))
finally:
response.close()
return results
def print_results(results):
lastmodified_count = 0
hash_count = 0
failed_count = 0
for resource_id, url, status, result in results:
if status == 0:
failed_count += 1
logger.error(result)
elif status == 1:
lastmodified_count += 1
elif status == 2:
hash_count += 1
else:
raise ValueError('Invalid status returned!')
str = 'Have Last-Modified: %d, Hashed: %d, ' % (lastmodified_count, hash_count)
str += 'Number Failed: %d' % failed_count
logger.info(str)
def run_aiohttp(last_modified_check):
start_time = time.time()
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(aiohttp_check_resources_for_last_modified(last_modified_check, loop))
results = loop.run_until_complete(future)
logger.info('Execution time: %s seconds' % (time.time() - start_time))
print_results(results)
def run_grequests(last_modified_check):
start_time = time.time()
results = grequests_check_resources_for_last_modified(last_modified_check)
logger.info('Execution time: %s seconds' % (time.time() - start_time))
print_results(results)
def main(configuration):
resources = Resource.search_in_hdx(configuration, 'name:')
last_modified_check = list()
for resource in resources:
resource_id = resource['id']
url = resource['url']
if 'data.humdata.org' in url or 'manage.hdx.rwlabs.org' in url or 'proxy.hxlstandard.org' in url or \
'scraperwiki.com' in url or 'ourairports.com' in url:
continue
last_modified_check.append((url, resource_id))
last_modified_check = sorted(last_modified_check)[:NUMBER_OF_URLS_TO_PROCESS]
# run_grequests(last_modified_check)
run_aiohttp(last_modified_check)
if __name__ == '__main__':
facade(main, hdx_site='prod', hdx_read_only=True)
|
[
"rans@email.com"
] |
rans@email.com
|
e7bd2854db85a4f829ca05755bd0a9ded7ee7c71
|
a79b734bec4bb0dacfee46f0fb8f33f2872581a9
|
/p_gen_data.py
|
dc876af9fb474224758838e2b4821abbead64689
|
[] |
no_license
|
ryosuke071111/cnn_seq2seq
|
530d27e0efa96fe9181c0708000897261ca489b6
|
fda5ffa68b37d3f537ccb8b5ec142c1904c455a8
|
refs/heads/master
| 2020-07-02T15:53:20.137133
| 2019-08-12T11:23:49
| 2019-08-12T11:23:49
| 201,579,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,564
|
py
|
import glob
from tensorflow.core.example import example_pb2
import struct
from nltk.tokenize import word_tokenize, sent_tokenize
from utils import *
from tqdm import tqdm
PATH="/home/ryosuke/desktop/data_set/cnn_stories_tokenized/"
def sent_split(text):
words =[sent for sent in sent_tokenize(text)]
words = list(map(lambda x:x.split(),words))
return [word for inner_list in words for word in inner_list]
def data_generate(vocab, num_of_data):
file = open(PATH+"train.bin","rb")
# file = open(PATH+"val.bin","rb")
#valでやって見る
articles = []
abstracts = []
articles_extend = []
abstracts_extend = []
oovs = []
print('# of data', num_of_data)
i=0
pbar = tqdm(total=num_of_data)
while i<num_of_data:
len_bytes = file.read(8)
if not len_bytes:
print('finishied reading this files')
break
#連続するバイト列から単位ごとに区切る
str_len = struct.unpack('q',len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, file.read(str_len))[0]
#区切られた単位からarticle/abstractを取り出す
data = example_pb2.Example.FromString(example_str)
article = data.features.feature["article"].bytes_list.value[0]
if len(article)<=0:
continue
abstract = data.features.feature["abstract"].bytes_list.value[0]
#バイトコードを文字列に変換する(articleの場合はセンテンスとの一つずつに<s><\s>を入れている)
article = sent_split(article.decode())[:MAX_INPUT_LENGTH]
abstract = " ".join(vocab.abstract2sents(abstract.decode())).split()[:MAX_OUTPUT_LENGTH]
#IDに変更している(辞書増やし版)
article_vocab_extend, oov = vocab.article2ids(article)
abstract_vocab_extend = [START_DECODING_NO]+vocab.abstract2ids(abstract, oov)+[STOP_DECODING_NO]
#IDに変更している(辞書増やし版)
article = [vocab._word2id(word) for word in article]
abstract = [START_DECODING_NO]+[vocab._word2id(word) for word in abstract]+[STOP_DECODING_NO]
articles.append(article)
abstracts.append(abstract)
oovs.append(oov)
articles_extend.append(article_vocab_extend)
abstracts_extend.append(abstract_vocab_extend)
i+=1
pbar.update(1)
print('data successfully constructed!')
print()
return articles, abstracts, oovs, articles_extend, abstracts_extend
|
[
"ryosuke0711993@gmail.com"
] |
ryosuke0711993@gmail.com
|
ea43039889c71780bfb652cd23a7ffd233c9b35a
|
81fe7f2faea91785ee13cb0297ef9228d832be93
|
/AdventOfCode/19/day09.py
|
2aa42d5b34b1bbbd2b3ff0b8293a3fedf050b62b
|
[] |
no_license
|
blegloannec/CodeProblems
|
92349c36e1a35cfc1c48206943d9c2686ea526f8
|
77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e
|
refs/heads/master
| 2022-05-16T20:20:40.578760
| 2021-12-30T11:10:25
| 2022-04-22T08:11:07
| 54,330,243
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
#!/usr/bin/env python3
import sys
from collections import deque
P = list(map(int,sys.stdin.readline().strip().split(',')))
class IntcodeComputer:
def __init__(self, P, In=None):
self.P = P[:] + [0]*10**3 # program copy & padding
self.i = 0
if In is None:
self.In = deque()
elif isinstance(In, list):
self.In = deque(In)
else:
assert isinstance(In, deque)
self.In = In
self.Out = [] # could be deque too
self.halt = False
self.base = 0
def run(self):
assert not self.halt
value = lambda k: self.P[self.i+k]
mode = lambda k: (value(0)//10**(1+k))%10
addr = lambda k: self.base+value(k) if mode(k)==2 else value(k)
param = lambda k: value(k) if mode(k)==1 else self.P[addr(k)]
while True:
op = value(0) % 100
if op==1: # add
self.P[addr(3)] = param(1) + param(2)
self.i += 4
elif op==2: # mul
self.P[addr(3)] = param(1) * param(2)
self.i += 4
elif op==3: # input
if self.In:
x = self.In.popleft()
#print('input %d' % x)
self.P[addr(1)] = x
self.i += 2
else:
break
elif op==4: # output
self.Out.append(param(1))
print(self.Out[-1])
self.i += 2
elif op==5: # jnz
if param(1)!=0:
self.i = param(2)
else:
self.i += 3
elif op==6: # jz
if param(1)==0:
self.i = param(2)
else:
self.i += 3
elif op==7: # lt
self.P[addr(3)] = 1 if param(1)<param(2) else 0
self.i += 4
elif op==8: # eq
self.P[addr(3)] = 1 if param(1)==param(2) else 0
self.i += 4
elif op==9: # incr base
self.base += param(1)
self.i += 2
else:
assert op==99
self.halt = True
break
# Part 1
IntcodeComputer(P,[1]).run()
# Part 2
IntcodeComputer(P,[2]).run()
|
[
"blg@gmx.com"
] |
blg@gmx.com
|
b15823ed5db74a6e8478495832ba2993301dad62
|
73e147e1d49656fafba5d4bf84df5ded2c4dca73
|
/team_9/cocos/test/test_tmx_autotest.py
|
bd9efdc51d8246157010fa676a115abee804a8b4
|
[
"LGPL-2.1-only",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-2.0",
"BSD-3-Clause"
] |
permissive
|
Donnyvdm/dojo19
|
2278747366c57bfc80eb9ee28ca617ec0a79bae3
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
refs/heads/master
| 2020-07-26T12:22:15.882800
| 2019-09-15T20:34:36
| 2019-09-15T20:34:36
| 208,642,183
| 1
| 0
|
BSD-3-Clause
| 2019-09-15T18:57:53
| 2019-09-15T18:57:52
| null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 1.1, s, t 2.1, s, t 3.1, s, t 4.1, s, t 5.1, s, t 6.1, s, q"
tags = "scrolling, ScrollingManager, TMX"
import pyglet
pyglet.resource.path.append(pyglet.resource.get_script_home())
pyglet.resource.reindex()
import cocos
from cocos import tiles, layer
from cocos.actions import CallFunc, ScaleTo, Delay
from cocos.director import director
class TestScene(cocos.scene.Scene):
def __init__(self):
super(TestScene, self).__init__()
scroller = layer.ScrollingManager()
scrollable = tiles.load('road-map.tmx')['map0']
scroller.add(scrollable)
self.add(scroller)
template_action = ( CallFunc(scroller.set_focus, 0, 0) + Delay(1) +
CallFunc(scroller.set_focus, 768, 0) + Delay(1) +
CallFunc(scroller.set_focus, 768, 768) +Delay(1) +
CallFunc(scroller.set_focus, 1500, 768) +Delay(1) +
ScaleTo(0.75, 1) + Delay(1) +
CallFunc(scrollable.set_debug, True) + Delay(1) +
CallFunc(director.window.set_size, 800, 600)
)
scroller.do(template_action)
def main():
director.init(width=600, height=300, autoscale=False, resizable=True)
main_scene = TestScene()
director.run(main_scene)
if __name__ == '__main__':
main()
|
[
"a.campello@wellcome.ac.uk"
] |
a.campello@wellcome.ac.uk
|
f19cac3711c4c978703670b4f20e4a32000bc39d
|
573a66e4f4753cc0f145de8d60340b4dd6206607
|
/JS-CS-Detection-byExample/Dataset (ALERT 5 GB)/362764/shogun-2.0.0/shogun-2.0.0/examples/undocumented/python_modular/kernel_linear_byte_modular.py
|
b06ecaf888f155e9958d54c81554c292997b5de8
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
mkaouer/Code-Smells-Detection-in-JavaScript
|
3919ec0d445637a7f7c5f570c724082d42248e1b
|
7130351703e19347884f95ce6d6ab1fb4f5cfbff
|
refs/heads/master
| 2023-03-09T18:04:26.971934
| 2022-03-23T22:04:28
| 2022-03-23T22:04:28
| 73,915,037
| 8
| 3
| null | 2023-02-28T23:00:07
| 2016-11-16T11:47:44
| null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
#!/usr/bin/env python
###########################################################################
# linear kernel on byte features
###########################################################################
from tools.load import LoadMatrix
from numpy import ubyte
lm=LoadMatrix()
traindat = ubyte(lm.load_numbers('../data/fm_train_byte.dat'))
testdat = ubyte(lm.load_numbers('../data/fm_test_byte.dat'))
parameter_list=[[traindat,testdat],[traindat,testdat]]
def kernel_linear_byte_modular (fm_train_byte=traindat,fm_test_byte=testdat):
from shogun.Kernel import LinearKernel
from shogun.Features import ByteFeatures
feats_train=ByteFeatures(fm_train_byte)
feats_test=ByteFeatures(fm_test_byte)
kernel=LinearKernel(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return kernel
if __name__=='__main__':
print('LinearByte')
kernel_linear_byte_modular(*parameter_list[0])
|
[
"mmkaouer@umich.edu"
] |
mmkaouer@umich.edu
|
c172f69311e43071b174976da7a5783ee9d8d304
|
e7fcc1d64cd95805918ab1b5786bf81a92f973ef
|
/2016/day01/day01.py
|
7d9cd2be2ff4f76e40d4eb42d46a8370f43b0be6
|
[] |
no_license
|
trolen/advent-of-code
|
8145c1e36fea04e53d4b7a885efcc2da71fbfe57
|
0a4e022a6a810d86e044a15036a2f5778f0d38af
|
refs/heads/master
| 2023-02-26T13:11:58.341006
| 2023-02-20T23:22:27
| 2023-02-20T23:22:27
| 54,579,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
#! /usr/bin/env python3
DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)] # N, E, S, W
class Position:
def __init__(self):
self.reset()
def reset(self):
self._x, self._y = (0, 0)
self._direction = 0
self._positions = []
def get_distance(self):
return abs(self._x) + abs(self._y)
def apply_instruction(self, instruction, unique):
turn = instruction[0].upper()
if turn == 'R':
self._direction += 1
else:
self._direction -= 1
self._direction %= 4
distance = int(instruction[1:])
if not unique:
self._x += DIRECTIONS[self._direction][0] * distance
self._y += DIRECTIONS[self._direction][1] * distance
return False
if DIRECTIONS[self._direction][0] != 0:
plist = [(self._x + i * DIRECTIONS[self._direction][0], self._y) for i in range(1, distance + 1)]
if DIRECTIONS[self._direction][1] != 0:
plist = [(self._x, self._y + i * DIRECTIONS[self._direction][1]) for i in range(1, distance + 1)]
for p in plist:
self._x, self._y = p
if p in self._positions:
return True
self._positions.append(p)
return False
def apply_instructions(self, instructions, unique=False):
for instruction in [x.strip() for x in instructions.split(',')]:
if self.apply_instruction(instruction, unique):
break
if __name__ == '__main__':
instructions = ''
with open('input.txt', 'rt') as file:
instructions = file.read()
p = Position()
p.apply_instructions(instructions)
print('Part One: {0}'.format(p.get_distance()))
p.reset()
p.apply_instructions(instructions, unique=True)
print('Part Two: {0}'.format(p.get_distance()))
|
[
"timothy.rolen@gmail.com"
] |
timothy.rolen@gmail.com
|
be73b3b89032e500668e954d0d7cbf1e4e038763
|
ba0e07b34def26c37ee22b9dac1714867f001fa5
|
/azure-graphrbac/azure/graphrbac/models/password_credential_paged.py
|
f4d5ef494856e72642ca0a714abfe73012a38552
|
[
"MIT"
] |
permissive
|
CharaD7/azure-sdk-for-python
|
b11a08ac7d24a22a808a18203072b4c7bd264dfa
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
refs/heads/master
| 2023-05-12T12:34:26.172873
| 2016-10-26T21:35:20
| 2016-10-26T21:35:20
| 72,448,760
| 1
| 0
|
MIT
| 2023-05-04T17:15:01
| 2016-10-31T15:14:09
|
Python
|
UTF-8
|
Python
| false
| false
| 914
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class PasswordCredentialPaged(Paged):
"""
A paging container for iterating over a list of PasswordCredential object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PasswordCredential]'}
}
def __init__(self, *args, **kwargs):
super(PasswordCredentialPaged, self).__init__(*args, **kwargs)
|
[
"autorestci@microsoft.com"
] |
autorestci@microsoft.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.