blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f14308e3fd66781d5cbdd827da378221a727e027
|
bccbb5244947574c63992dc812b5ef44519ec161
|
/tests/test_command_runner.py
|
fcb536ca809e16f5103fd66573f5e2e7dd3eeea3
|
[] |
no_license
|
hal1932/pysvn
|
d4fab12dbb07838d947292146ca49e9a31119deb
|
a579744543765b574655377a2e1ada5be961e8d8
|
refs/heads/master
| 2020-03-14T06:35:46.835307
| 2018-05-01T16:17:10
| 2018-05-01T16:17:10
| 131,487,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
# coding: utf-8
from __future__ import print_function, unicode_literals
import unittest as ut
import xml.etree.ElementTree as et
from svn.command_runner import CommandRunner
class TestCommandRunner(ut.TestCase):
def setUp(self):
self.__runner = CommandRunner()
self.__runner.current_directory = 'C:/Users/yuta/Desktop/subversion/trunk'
def tearDown(self):
pass
@ut.skip
def test_run(self):
result, out, err = self.__runner.run('info', ['--xml'])
self.assertEqual(result, 0)
self.assertEqual(err, '')
root = et.fromstring(out)
self.assertEqual(root.tag, 'info')
entry = root.find('entry')
self.assertEqual(entry.find('url').text, 'https://svn.apache.org/repos/asf/subversion/trunk')
self.assertEqual(entry.find('wc-info/wcroot-abspath').text, 'C:/Users/yuta/Desktop/subversion/trunk')
if __name__ == '__main__':
ut.main()
|
[
"yu.arai.19@gmail.com"
] |
yu.arai.19@gmail.com
|
89bfad9927cab9ec96b3795aa8887564a390caf1
|
6234d711a6352c694bb69946ff673e4829ab6916
|
/feelings/groups/views/company.py
|
91f38b17afdc2b90f2c6a890c1149c612963a773
|
[
"MIT"
] |
permissive
|
treehouse/livestream-django-feelings
|
c816beb4557d52d5aafb5f11a40f5e6a0c0f6ba5
|
a246e456bb28f736cfb670486a1534e2d18efc78
|
refs/heads/master
| 2021-01-13T12:51:04.730505
| 2019-02-21T15:25:38
| 2019-02-21T15:25:38
| 78,469,589
| 32
| 24
| null | 2017-02-23T22:10:10
| 2017-01-09T21:14:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,596
|
py
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.views import generic
from braces.views import SetHeadlineMixin
from .. import forms
from .. import models
class Create(LoginRequiredMixin, SetHeadlineMixin, generic.CreateView):
form_class = forms.CompanyForm
headline = 'Create Company'
success_url = reverse_lazy('users:dashboard')
template_name = 'companies/form.html'
def form_valid(self, form):
form.instance.created_by = self.request.user
response = super().form_valid(form)
self.object.members.add(self.request.user)
return response
class Update(LoginRequiredMixin, SetHeadlineMixin, generic.UpdateView):
form_class = forms.CompanyForm
template_name = 'companies/form.html'
def get_queryset(self):
return self.request.user.companies.all()
def get_headline(self):
return f'Edit {self.object.name}'
def get_success_url(self):
return reverse('groups:companies:detail', kwargs={
'slug': self.object.slug})
class Detail(LoginRequiredMixin, generic.FormView):
form_class = forms.CompanyInviteForm
template_name = 'companies/detail.html'
def get_success_url(self):
self.get_object()
return reverse('groups:companies:detail', kwargs={
'slug': self.object.slug})
def get_queryset(self):
return self.request.user.companies.all()
def get_object(self):
self.object = self.request.user.companies.get(
slug=self.kwargs.get('slug')
)
return self.object
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = self.get_object()
return context
def form_valid(self, form):
response = super().form_valid(form)
models.CompanyInvite.objects.create(
from_user=self.request.user,
to_user=form.invitee,
company=self.get_object()
)
return response
class Leave(LoginRequiredMixin, SetHeadlineMixin, generic.FormView):
form_class = forms.LeaveForm
template_name = 'companies/form.html'
success_url = reverse_lazy('users:dashboard')
def get_object(self):
try:
self.object = self.request.user.companies.filter(
slug=self.kwargs.get('slug'),
).exclude(created_by=self.request.user).get()
except models.Company.DoesNotExist:
raise Http404
def get_headline(self):
self.get_object()
return f'Leave {self.object}?'
def form_valid(self, form):
self.get_object()
self.object.members.remove(self.request.user)
return super().form_valid(form)
class Invites(LoginRequiredMixin, generic.ListView):
model = models.CompanyInvite
template_name = 'companies/invites.html'
def get_queryset(self):
return self.request.user.companyinvite_received.filter(status=0)
class InviteResponse(LoginRequiredMixin, generic.RedirectView):
url = reverse_lazy('groups:companies:invites')
def get(self, request, *args, **kwargs):
invite = get_object_or_404(
models.CompanyInvite,
to_user=request.user,
uuid=kwargs.get('code'),
status=0
)
if kwargs.get('response') == 'accept':
invite.status = 1
else:
invite.status = 2
invite.save()
return super().get(request, *args, **kwargs)
|
[
"kenneth@gigantuan.net"
] |
kenneth@gigantuan.net
|
e95fae2b71d041eff7090fe472700f65339ffa56
|
3b7474148c07df7f4755106a3d0ada9b2de5efdc
|
/training/c31_pattern_design/e04_callback.py
|
b608b4cea7865efac231c64f8fa6e7dd59efcde1
|
[] |
no_license
|
juancsosap/pythontraining
|
7f67466846138f32d55361d64de81e74a946b484
|
1441d6fc9544042bc404d5c7efffd119fce33aa7
|
refs/heads/master
| 2021-08-26T05:37:15.851025
| 2021-08-11T22:35:23
| 2021-08-11T22:35:23
| 129,974,006
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
class Provider:
def get(self, path, cbf):
with open(path) as file:
text = file.read()
result = self.analyze(text)
cbf(result)
def word_count(self, text):
split_text = text.lower().split(' ')
words = len(set(split_text))
count = len(split_text)
return (words, count)
def char_count(self, text):
characters = len(set(text))
count = len(text)
return (characters, count)
def analyze(self, text):
word_info = self.word_count(text)
char_info = self.char_count(text)
return (word_info, char_info)
class Requester:
def make(self, path):
p = Provider()
p.get(path, self.done)
def done(self, result):
print(result)
if __name__ == "__main__":
basedir = __file__[:__file__.rfind('/')+1]
r = Requester()
r.make(basedir + 'data.txt')
|
[
"juan.c.sosa.p@gmail.com"
] |
juan.c.sosa.p@gmail.com
|
0457440b4e3f996aaa557313efda6c7f2d6e1a76
|
069ce71ee1ca85988ebf5bc179bcafbbd3d04f7f
|
/golib/views.py
|
70f41659f6d03852008364558b13e70346ea68e7
|
[] |
no_license
|
9gix/golib
|
21a1376b553a83b743c68f418f82a488c9964c1a
|
fbcfe0a9c5e0523c7b2e85f46cb0d18a4ac85db5
|
refs/heads/master
| 2021-03-12T19:57:59.971214
| 2012-11-04T17:22:59
| 2012-11-04T17:22:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
from django.shortcuts import redirect, render
from django.core.urlresolvers import reverse
def index(request):
if request.user.is_authenticated():
return redirect(reverse('catalog:book_list'))
return render(request, 'index.html', {})
|
[
"yeo.eugene.oey@gmail.com"
] |
yeo.eugene.oey@gmail.com
|
1ee42e0fa0fd0e830473f4079c9058dd6869c849
|
7ab85ba79a6553659f0b324ecebb4bb39f8a8a1c
|
/shallow copy.py
|
827e08006334256c38c0ceb955c5a8fd2ff5b596
|
[] |
no_license
|
subinmun1997/my_python
|
b75db77a035fa8f531d9872bf33a1818a002206a
|
634acc948e7758f5d26084536c506e7da45cd53c
|
refs/heads/master
| 2022-12-28T21:11:40.173378
| 2020-10-16T08:02:18
| 2020-10-16T08:02:18
| 292,875,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
r1=['John',('man','USA'),[175,23]]
r2=list(r1)
print(r1 is r2)
print(r1[0] is r2[0])
print(r1[1] is r2[1])
print(r1[2] is r2[2])
|
[
"qzxy812@gmail.com"
] |
qzxy812@gmail.com
|
7f64617c1f9ffa09fcadfbe29ce329539eae983a
|
0f074e5adef64fa16e88dc2499e76f08b4c33c02
|
/matplotlib/ipython and pylab/03 改变线条颜色和粗细.py
|
5536a6d7b75055db76aeaa13f86025196647c11b
|
[] |
no_license
|
guozhenjiang/Python
|
0ac39adaf72df0bfee51795fabcfd959a69b1862
|
44b07bd767f3f2a947331111ab920200ac2412c6
|
refs/heads/master
| 2021-05-19T16:54:40.725132
| 2020-11-19T16:26:26
| 2020-11-19T16:27:11
| 252,035,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
# 导入 matplotlib 的所有内容(nympy 可以用 np 这个名字来使用)
from pylab import *
# 创建一个 8 * 6 点(point)的图,并设置分辨率为 80
# figure(figsize=(8,6), dpi=80)
figure(figsize=(10,6), dpi=80)
# 创建一个新的 1 * 1 的子图,接下来的图样绘制在其中的第 1 块(也是唯一的一块)
subplot(1,1,1)
X = np.linspace(-np.pi, np.pi, 256,endpoint=True)
C,S = np.cos(X), np.sin(X)
# 绘制余弦曲线,使用蓝色的、连续的、宽度为 1 (像素)的线条
# plot(X, C, color="blue", linewidth=1.0, linestyle="-")
plot(X, C, color="blue", linewidth=2.5, linestyle="-")
# 绘制正弦曲线,使用绿色的、连续的、宽度为 1 (像素)的线条
# plot(X, S, color="green", linewidth=1.0, linestyle="-")
plot(X, S, color="red", linewidth=2.5, linestyle="-")
# 设置横轴的上下限
xlim(-4.0,4.0)
# 设置横轴记号
xticks(np.linspace(-4,4,9,endpoint=True))
# 设置纵轴的上下限
ylim(-1.0,1.0)
# 设置纵轴记号
yticks(np.linspace(-1,1,5,endpoint=True))
# 以分辨率 72 来保存图片
# savefig("exercice_2.png",dpi=72)
# 在屏幕上显示
show()
|
[
"guo_zhen_jiang@163.com"
] |
guo_zhen_jiang@163.com
|
6ff8cf46f9afbcf4558f4fc7c0f57921fcc8d9d4
|
68577bb693fe01cddce56da36a43702c6bdedc07
|
/Programming/python/threads/events.001.py
|
a7ef176267b372f3242e604881eb1b4acfb8801b
|
[] |
no_license
|
ceccopierangiolieugenio/scripts
|
480ab9b94c135d47c4d7c916e35df537cfabbed3
|
fe0eca7d76733e204c1c702e03b9ccc11ee421fd
|
refs/heads/master
| 2023-03-31T16:57:37.064553
| 2023-03-26T13:21:36
| 2023-03-26T13:21:36
| 99,695,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
# Example from:
# https://www.bogotobogo.com/python/Multithread/python_multithreading_Event_Objects_between_Threads.php
import threading
import time
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
def wait_for_event(e):
logging.debug('wait_for_event starting')
event_is_set = e.wait()
logging.debug('event set: %s', event_is_set)
def wait_for_event_timeout(e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
logging.debug('processing event')
else:
logging.debug('doing other things')
if __name__ == '__main__':
e = threading.Event()
t1 = threading.Thread(name='blocking',
target=wait_for_event,
args=(e,))
t1.start()
t2 = threading.Thread(name='non-blocking',
target=wait_for_event_timeout,
args=(e, 2))
t2.start()
logging.debug('Waiting before calling Event.set()')
time.sleep(3)
e.set()
logging.debug('Event is set')
|
[
"ceccopierangiolieugenio@googlemail.com"
] |
ceccopierangiolieugenio@googlemail.com
|
b4c0472ccadd94cd2d5b8635aa3af2ec2da7fb48
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/476/usersdata/321/110683/submittedfiles/Av2_Parte3.py
|
871c02d88b4454cc7f87fe1b0a0f024a5aa1caa1
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
# -*- coding: utf-8 -*-
qa= int(input('Quantidade de elementos de a: '))
qb= int(input('Quantidade de elementos de b: '))
a= []
b= []
for i in range(qa):
a.append(int(input('Digite o valor%d de a: ' % i)))
for i in range(qb):
b.append(int(input('Digite o valor%d de b: ' % i)))
soma= 0
while a[i] == b[i]:
soma+=1
print(soma)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
9d617e8e56b480d3f2c9796faf890e935037a64c
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/network/v20180701/get_network_watcher.py
|
a1c1900c872cb4134a105e35685ba9cbea6c876c
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 4,324
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetNetworkWatcherResult',
'AwaitableGetNetworkWatcherResult',
'get_network_watcher',
]
@pulumi.output_type
class GetNetworkWatcherResult:
"""
Network watcher in a resource group.
"""
def __init__(__self__, etag=None, location=None, name=None, provisioning_state=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkWatcherResult(GetNetworkWatcherResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkWatcherResult(
etag=self.etag,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type)
def get_network_watcher(network_watcher_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkWatcherResult:
"""
Use this data source to access information about an existing resource.
:param str network_watcher_name: The name of the network watcher.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['networkWatcherName'] = network_watcher_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20180701:getNetworkWatcher', __args__, opts=opts, typ=GetNetworkWatcherResult).value
return AwaitableGetNetworkWatcherResult(
etag=__ret__.etag,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
306efc4d66d57b80b9a10c625c04f08557d7f834
|
8adcfe7485ea04bc1f83cac7d92bb51b97582f64
|
/ALGORITHM/210531/프로그래머스 타겟 넘버.py
|
c31258c8edf6e3e7fa3b78f33fb5b9e9aed6108b
|
[] |
no_license
|
NoJeong/TIL
|
fdceb6efc5d2d56f8dd2e27271ea0faacfe336ae
|
c79c34b84f025aa40cd3a8e28fd0898bcb40b608
|
refs/heads/master
| 2023-06-24T22:18:50.665917
| 2021-07-23T06:21:21
| 2021-07-23T06:21:21
| 280,307,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
import sys
sys.stdin = open('프로그래머스 타겟 넘버.txt')
def solution(numbers, target):
answer = 0
def dfs(numbers, target, index):
nonlocal answer
if index < len(numbers):
numbers[index] *= 1
dfs(numbers, target, index + 1)
numbers[index] *= -1
dfs(numbers, target, index + 1)
elif sum(numbers) == target:
answer += 1
dfs(numbers, target, 0)
return answer
a = list(map(int,input().split()))
b = int(input())
solution(a,b)
|
[
"op032@naver.com"
] |
op032@naver.com
|
8527a82984c2cd8a19d450dc69773a45da4c0b51
|
79bc9a420df5c706b2ae06f4b75bf2bd2ba9646e
|
/emission/net/ext_service/push/query/trip_metrics.py
|
bad51c6afb3acaf63183dd918f4250efd1da085d
|
[
"BSD-3-Clause"
] |
permissive
|
Andrew-Tan/e-mission-server
|
7022786a13b4be87be62cfc2cc6d82543d063e5d
|
91d59bee86e63d803e401f10f4b6a2502effedda
|
refs/heads/master
| 2021-01-16T18:25:17.860723
| 2017-11-21T19:24:40
| 2017-11-21T19:24:40
| 100,073,534
| 0
| 0
|
BSD-3-Clause
| 2018-05-05T18:26:36
| 2017-08-11T22:13:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,279
|
py
|
# Input spec sample at
# emission/net/ext_service/push/sample.specs/trip_metrics.query.sample sample
# finds all users who have at least one day in Feb 2017 with no more than 10
# walk sections and a walk distance of at least 1km during the evening commute
# hours
# Input: query spec
# Output: list of uuids
#
import logging
import numpy as np
import emission.core.wrapper.motionactivity as ecwm
import emission.net.api.metrics as enam
import emission.storage.decorations.local_date_queries as esdl
import emission.storage.decorations.location_queries as esdlq
import emission.storage.decorations.user_queries as esdu
import emission.storage.timeseries.geoquery as estg
import emission.storage.timeseries.timequery as estt
import emission.storage.timeseries.tcquery as esttc
import emission.storage.decorations.analysis_timeseries_queries as esda
def get_metric_list(checks):
metric_list = [e["metric"] for e in checks]
logging.debug("Returning %s" % metric_list)
return metric_list
def compare_value(threshold, summed_value):
if '$gt' in threshold:
return summed_value > threshold['$gt']
if '$gte' in threshold:
return summed_value >= threshold['$gte']
if '$lt' in threshold:
return summed_value < threshold['$lt']
if '$lte' in threshold:
return summed_value <= threshold['$lte']
return False
def matches_check(check, msts):
# We know that the metric in the check matches the result because that's the
# way that the metrics API works. So we just need to check mode versus threshold
# entry looks like this (for count)
# ModeStatTimeSummary({'fmt_time': '2017-01-20T00:00:00+00:00',
# 'nUsers': 1,
# 'UNKNOWN': 1,
# 'ts': 1484870400,
# 'AIR_OR_HSR': 2,
# 'local_dt': LocalDate(...)})
mode_list = check['modes']
summed_value = 0
for mode in mode_list:
summed_value = summed_value + msts.get(mode, 0)
return compare_value(check["threshold"], summed_value)
def is_matched_user(user_id, spec):
metric_list = get_metric_list(spec["checks"])
time_type = spec['time_type']
if 'from_local_date' in spec and 'to_local_date' in spec:
freq_metrics = enam.summarize_by_local_date(user_id,
spec["from_local_date"], spec["to_local_date"],
spec["freq"], metric_list, include_aggregate=False)
elif 'start_time' in spec and 'end_time' in spec:
freq_metrics = enam.summarize_by_timestamp(user_id,
spec["start_time"], spec["end_time"],
spec["freq"], metric_list, include_aggregate=False)
else:
# If no start and end times are specified, we assume that this is a
# timestamp query because we can come up with a reasonable start and end
# time for timestamps but not for local_dates, which are basically a filter.
# so if we run this on the first of a month, for example, we won't find
# anything, which seems bogus and not what people would expect
assert time_type == "timestamp", "time_type = %s, expected timestamp" % time_type
freq_metrics = enam.summarize_by_timestamp(user_id,
0, time.time(), spec["freq"], metric_list, include_aggregate=False)
assert(freq_metrics is not None)
assert('user_metrics' in freq_metrics)
curr_user_metrics = freq_metrics['user_metrics']
checks = spec['checks']
check_results = np.zeros(len(checks))
for i, check in enumerate(checks):
curr_metric_result = curr_user_metrics[i]
# curr_freq_result is a list of ModeStatTimeSummary objects, one for each
# grouped time interval in the range
# e.g. for daily, 2017-01-19, 2017-01-20, 2017-01-21, 2017-01-22, 2017-01-23, ....
for msts in curr_metric_result:
# We defined our check as being true if it is true for _any_ grouped time
# period in the range. So as long as we find a match for that check, we are
# good!
if matches_check(check, msts):
check_results[i] = True
logging.info("For user_id %s, check result array = %s, all? %s" % (user_id, check_results, np.all(check_results)))
return np.all(check_results)
def query(spec):
sel_uuids = esdu.get_all_uuids()
matched_uuid_list = [uuid for uuid in sel_uuids if is_matched_user(uuid, spec)]
logging.info("matched matched_uuid_list of length = %s = %s" %
(len(matched_uuid_list), matched_uuid_list))
return matched_uuid_list
|
[
"shankari@eecs.berkeley.edu"
] |
shankari@eecs.berkeley.edu
|
df7859b3968e2e07fe6d573c3c0175bb0d06485b
|
72dbf8366cf17b6a81ab37e72af667726e3f2661
|
/store/migrations/0016_auto_20201104_1719.py
|
31c0e9e8bf2783c9b201a665dd614b048aa7b44d
|
[] |
no_license
|
Rayhun/Django_E-Commerce_website
|
3aef732ffa0a41509be95ced3c33b845233903a7
|
1a5f7e31f942914256e49ba7da1f7367a799f097
|
refs/heads/main
| 2023-05-23T18:18:27.875328
| 2021-04-30T19:29:06
| 2021-04-30T19:29:06
| 306,414,778
| 3
| 1
| null | 2021-04-30T19:28:58
| 2020-10-22T17:41:57
|
CSS
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
# Generated by Django 3.1.1 on 2020-11-04 11:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0015_remove_product_product_name'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='gender',
field=models.CharField(blank=True, choices=[('MALE', 'Male'), ('FEMALE', 'Female'), ('OTHERS', 'Others')], default='MALE', max_length=6, null=True),
),
]
|
[
"rayhunkhan27@gmail.com"
] |
rayhunkhan27@gmail.com
|
936baa9a603ebaf11d6c5adc98fecc3cf562f6cc
|
952abfc855d0fca89200f1e428aac9a87f1d3295
|
/tf114/tf09_mv2.py
|
6e8e7a3f054efe4bf5163d9aaf09c665b95a2f75
|
[] |
no_license
|
TaeYeon-kim-ai/STUDY_1.py
|
7570b4510bf8d9791447efe3a97a9668a1cabe06
|
e14392c706b7e51e40f1ac68555e26558e25b38f
|
refs/heads/master
| 2023-06-03T09:04:13.498591
| 2021-06-21T17:10:47
| 2021-06-21T17:10:47
| 329,834,933
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,710
|
py
|
#과제
# 차원 형태 내용
#스칼라 1
#벡터 [1,2,3]
#행렬 [[1,2],[2,3]]
#텐서 [[[1,2],[1,2,3]]]
# x * W 두개의 사이즈가 맞아야함
# x = 5, 3
# W = 3, 1(2,3,4,되던 상관없음) + b와 더할 수 있는 shape가 동일해야함.
# (5, 3) x (3, 1) = (5, 1) #앞에 열과 뒤에 행만 맞으면 행렬 연산할 수 있음
# (3, 2) x (2, 3) = (3, 3)
# [실습] 만들어봐
# verbose 로 나오는건 step과 cost와 hypothesis // epochs = 2001, 10개단위
import tensorflow as tf
tf.set_random_seed(66)
x_data = [[73, 51, 65],
[92, 98, 40],
[89, 31, 33],
[99, 33, 100],
[17, 66, 79]] #(5,3) metrix
y_data = [[152],
[185],
[180],
[205],
[142]] #metrix(5,1)
x = tf.placeholder(tf.float32, shape = [None, 3])
y = tf.placeholder(tf.float32, shape = [None, 1])
#행 맞춰두고 열은 y의 열값 x*w를 한 shape와 y의 shape가 같아야한다.
w = tf.Variable(tf.random_normal([3, 1]), name = 'weight')
b = tf.Variable(tf.random_normal([1]), name = 'bias') #바이어스 하나임
#hypothesis = x * w + b
hypothesis = tf.matmul(x, w) + b #matmul 은 매트릭스 멀티(매트릭스 곱)
cost = tf.reduce_mean(tf.square(hypothesis - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 71e-6) # cost : 294.25824
#optimizer = tf.train.AdamOptimizer(learning_rate = 0.1) #cost : 176.789 [실험]
train = optimizer.minimize(cost)
sess = tf.compat.v1.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001) :
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict = {x : x_data, y : y_data})
if step % 20 == 0 :
print("step : ", step, "\n", "cost : ", cost_val, "\n", hy_val)
sess.close()
'''
import matplotlib.pyplot as plt
w_history = []
cost_history = []
with tf.compat.v1.Session() as sess :
#sess.run(tf.compat.v1.global_variables_initializer()) #안해줘도 돌아감
#텐서플로의 실질적인 w에 tf.valiable설정은 안해줘서 파이썬의 veriable로 취급하는 듯
for i in range(-30, 50) : #-30 ~ 50
curr_w = i * 0.1 #i*0.1단위로 증가 -3,
curr_cost = sess.run(cost, feed_dict={w : curr_w})
w_history.append(curr_w)
cost_history.append(curr_cost)
print("=========================================")
print("W : ", w_history)
print("=========================================")
print("cost : ", cost_history)
print("=========================================")
plt.plot(w_history, cost_history)
plt.show()
'''
|
[
"noreply@github.com"
] |
TaeYeon-kim-ai.noreply@github.com
|
75af37c7035fa42e49638ffc2f8b9d925f49ea7e
|
ee00ebe5e71c36b05fbff993b19e9723b963313f
|
/35_inserted_position.py
|
f5142c9a3ab2c24b65c81f2721f1dd7ad04a16e3
|
[] |
no_license
|
26XINXIN/leetcode
|
f365560d93604a28abf399707b333f3c11f924ec
|
78ed11f34fd03e9a188c9c6cb352e883016d05d9
|
refs/heads/master
| 2021-06-28T16:31:45.103879
| 2020-09-19T20:33:55
| 2020-09-19T20:33:55
| 144,975,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
class Solution:
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
ans = self.binary_search(nums, 0, len(nums)-1, target)
if ans == len(nums)-1 and target > nums[-1]:
ans += 1
return ans
def binary_search(self, nums, l, r, target):
if l == r:
return l
mid = (l + r) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
return self.binary_search(nums, mid + 1, r, target)
else:
return self.binary_search(nums, l, mid, target)
|
[
"yangxin.nlp@bytedance.com"
] |
yangxin.nlp@bytedance.com
|
0961a55413c0854c2148a4c91bfb17bbb9891d86
|
3122ac39f1ce0a882b48293a77195476299c2a3b
|
/clients/python-flask/generated/openapi_server/models/pipeline_run_node.py
|
f205f70d61ad88141b09f4ad9bdc2cfd5a55b15f
|
[
"MIT"
] |
permissive
|
miao1007/swaggy-jenkins
|
4e6fe28470eda2428cbc584dcd365a21caa606ef
|
af79438c120dd47702b50d51c42548b4db7fd109
|
refs/heads/master
| 2020-08-30T16:50:27.474383
| 2019-04-10T13:47:17
| 2019-04-10T13:47:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,765
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.pipeline_run_nodeedges import PipelineRunNodeedges # noqa: F401,E501
from openapi_server import util
class PipelineRunNode(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, display_name: str=None, duration_in_millis: int=None, edges: List[PipelineRunNodeedges]=None, id: str=None, result: str=None, start_time: str=None, state: str=None): # noqa: E501
"""PipelineRunNode - a model defined in OpenAPI
:param _class: The _class of this PipelineRunNode. # noqa: E501
:type _class: str
:param display_name: The display_name of this PipelineRunNode. # noqa: E501
:type display_name: str
:param duration_in_millis: The duration_in_millis of this PipelineRunNode. # noqa: E501
:type duration_in_millis: int
:param edges: The edges of this PipelineRunNode. # noqa: E501
:type edges: List[PipelineRunNodeedges]
:param id: The id of this PipelineRunNode. # noqa: E501
:type id: str
:param result: The result of this PipelineRunNode. # noqa: E501
:type result: str
:param start_time: The start_time of this PipelineRunNode. # noqa: E501
:type start_time: str
:param state: The state of this PipelineRunNode. # noqa: E501
:type state: str
"""
self.openapi_types = {
'_class': str,
'display_name': str,
'duration_in_millis': int,
'edges': List[PipelineRunNodeedges],
'id': str,
'result': str,
'start_time': str,
'state': str
}
self.attribute_map = {
'_class': '_class',
'display_name': 'displayName',
'duration_in_millis': 'durationInMillis',
'edges': 'edges',
'id': 'id',
'result': 'result',
'start_time': 'startTime',
'state': 'state'
}
self.__class = _class
self._display_name = display_name
self._duration_in_millis = duration_in_millis
self._edges = edges
self._id = id
self._result = result
self._start_time = start_time
self._state = state
@classmethod
def from_dict(cls, dikt) -> 'PipelineRunNode':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The PipelineRunNode of this PipelineRunNode. # noqa: E501
:rtype: PipelineRunNode
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""Gets the _class of this PipelineRunNode.
:return: The _class of this PipelineRunNode.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this PipelineRunNode.
:param _class: The _class of this PipelineRunNode.
:type _class: str
"""
self.__class = _class
@property
def display_name(self) -> str:
"""Gets the display_name of this PipelineRunNode.
:return: The display_name of this PipelineRunNode.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name: str):
"""Sets the display_name of this PipelineRunNode.
:param display_name: The display_name of this PipelineRunNode.
:type display_name: str
"""
self._display_name = display_name
@property
def duration_in_millis(self) -> int:
"""Gets the duration_in_millis of this PipelineRunNode.
:return: The duration_in_millis of this PipelineRunNode.
:rtype: int
"""
return self._duration_in_millis
@duration_in_millis.setter
def duration_in_millis(self, duration_in_millis: int):
"""Sets the duration_in_millis of this PipelineRunNode.
:param duration_in_millis: The duration_in_millis of this PipelineRunNode.
:type duration_in_millis: int
"""
self._duration_in_millis = duration_in_millis
@property
def edges(self) -> List[PipelineRunNodeedges]:
"""Gets the edges of this PipelineRunNode.
:return: The edges of this PipelineRunNode.
:rtype: List[PipelineRunNodeedges]
"""
return self._edges
@edges.setter
def edges(self, edges: List[PipelineRunNodeedges]):
"""Sets the edges of this PipelineRunNode.
:param edges: The edges of this PipelineRunNode.
:type edges: List[PipelineRunNodeedges]
"""
self._edges = edges
@property
def id(self) -> str:
"""Gets the id of this PipelineRunNode.
:return: The id of this PipelineRunNode.
:rtype: str
"""
return self._id
@id.setter
def id(self, id: str):
"""Sets the id of this PipelineRunNode.
:param id: The id of this PipelineRunNode.
:type id: str
"""
self._id = id
@property
def result(self) -> str:
"""Gets the result of this PipelineRunNode.
:return: The result of this PipelineRunNode.
:rtype: str
"""
return self._result
@result.setter
def result(self, result: str):
"""Sets the result of this PipelineRunNode.
:param result: The result of this PipelineRunNode.
:type result: str
"""
self._result = result
@property
def start_time(self) -> str:
"""Gets the start_time of this PipelineRunNode.
:return: The start_time of this PipelineRunNode.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time: str):
"""Sets the start_time of this PipelineRunNode.
:param start_time: The start_time of this PipelineRunNode.
:type start_time: str
"""
self._start_time = start_time
@property
def state(self) -> str:
"""Gets the state of this PipelineRunNode.
:return: The state of this PipelineRunNode.
:rtype: str
"""
return self._state
@state.setter
def state(self, state: str):
"""Sets the state of this PipelineRunNode.
:param state: The state of this PipelineRunNode.
:type state: str
"""
self._state = state
|
[
"cliffano@gmail.com"
] |
cliffano@gmail.com
|
4e4336b975c5ee46eb7645c1b114c235d4303c50
|
989f011a784015e1a33c41362ab4ec06e92b3339
|
/examples/07_functions/func_args_unpacking.py
|
1e837181f73877fce4b28831c45f757f1b3da290
|
[] |
no_license
|
yevgeniy-voloshin/pyneng-online-jun-jul-2017
|
b0be9df7d379e24b654172c1bc3f5cc0bdbbcd2f
|
050e43d7f582528189005c1b7c34970352e968f1
|
refs/heads/master
| 2021-01-21T16:22:27.347769
| 2017-05-19T17:35:16
| 2017-05-19T17:35:16
| 91,885,650
| 1
| 0
| null | 2017-05-20T11:46:28
| 2017-05-20T11:46:28
| null |
UTF-8
|
Python
| false
| false
| 3,132
|
py
|
# Unpacking positional arguments
def config_interface(intf_name, ip_address, cidr_mask):
interface = 'interface %s'
no_shut = 'no shutdown'
ip_addr = 'ip address %s %s'
result = []
result.append(interface % intf_name)
result.append(no_shut)
mask_bits = int(cidr_mask.split('/')[-1])
bin_mask = '1'*mask_bits + '0'*(32-mask_bits)
dec_mask = '.'.join([ str(int(bin_mask[i:i+8], 2)) for i in [0,8,16,24] ])
result.append(ip_addr % (ip_address, dec_mask))
return result
#print config_interface('Fa0/1', '10.0.1.1', '/25')
interfaces_info = [['Fa0/1', '10.0.1.1', '/24'],
['Fa0/2', '10.0.2.1', '/24'],
['Fa0/3', '10.0.3.1', '/24'],
['Fa0/4', '10.0.4.1', '/24'],
['Lo0', '10.0.0.1', '/32']]
for i in interfaces_info:
print config_interface(*i)
"""
Output:
['interface Fa0/1', 'no shutdown', 'ip address 10.0.1.1 255.255.255.0']
['interface Fa0/2', 'no shutdown', 'ip address 10.0.2.1 255.255.255.0']
['interface Fa0/3', 'no shutdown', 'ip address 10.0.3.1 255.255.255.0']
['interface Fa0/4', 'no shutdown', 'ip address 10.0.4.1 255.255.255.0']
['interface Lo0', 'no shutdown', 'ip address 10.0.0.1 255.255.255.255']
"""
# Unpacking keyword arguments
def config_to_list(cfg_file, delete_excl=True,
delete_empty=True, strip_end=True):
result = []
with open( cfg_file ) as f:
for line in f:
if strip_end:
line = line.rstrip()
if delete_empty and not line:
pass
elif delete_excl and line.startswith('!'):
pass
else:
result.append(line)
return result
cfg = [dict(cfg_file='r1.txt', delete_excl=True, delete_empty=True, strip_end=True),
dict(cfg_file='r2.txt', delete_excl=False, delete_empty=True, strip_end=True),
dict(cfg_file='r3.txt', delete_excl=True, delete_empty=False, strip_end=True),
dict(cfg_file='r4.txt', delete_excl=True, delete_empty=True, strip_end=False)]
for d in cfg:
print config_to_list(**d)
"""
Output:
['service timestamps debug datetime msec localtime show-timezone year', 'service timestamps log datetime msec localtime show-timezone year', 'service password-encryption', 'service sequence-numbers', 'no ip domain lookup', 'ip ssh version 2']
['!', 'service timestamps debug datetime msec localtime show-timezone year', 'service timestamps log datetime msec localtime show-timezone year', 'service password-encryption', 'service sequence-numbers', '!', 'no ip domain lookup', '!', 'ip ssh version 2', '!']
['service timestamps debug datetime msec localtime show-timezone year', 'service timestamps log datetime msec localtime show-timezone year', 'service password-encryption', 'service sequence-numbers', '', '', '', 'ip ssh version 2', '']
['service timestamps debug datetime msec localtime show-timezone year\n', 'service timestamps log datetime msec localtime show-timezone year\n', 'service password-encryption\n', 'service sequence-numbers\n', 'no ip domain lookup\n', 'ip ssh version 2\n']
```
|
[
"pyneng.course@gmail.com"
] |
pyneng.course@gmail.com
|
408cd967099fe900471e4103edf3f71c8f1f8cd8
|
12e04c219d6911d06a048c913f8d8d6c00dad857
|
/chendian/api/blog/views.py
|
5deab03ba38adced720c0e0764230d3bc891c9a2
|
[
"MIT"
] |
permissive
|
mozillazg/chendian-plus
|
928e98beb77f351e08b25a5ba9671ad648dac4b5
|
893c62b4b855879006d4cb378faeb9d1c6635923
|
refs/heads/master
| 2023-09-04T09:58:58.112022
| 2017-04-04T09:44:28
| 2017-04-04T09:44:28
| 31,481,576
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,155
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from api.permissions import IsAdminOrReadonly, IsAdminOrReadAndCreate
from blog.models import Article, Tag, Category
from .serializers import ArticleSerializer, TagSerializer, CategorySerializer
class CategoryViewSet(ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
search_fields = ('name',)
filter_fields = ('id',)
permission_classes = (IsAdminOrReadonly,)
class TagViewSet(ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
search_fields = ('name',)
filter_fields = ('id',)
class ArticleViewSet(ModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
filter_fields = (
'id', 'author__nick_name',
'categories__slug', 'tags__slug'
)
search_fields = ('title',)
permission_classes = (IsAdminOrReadAndCreate,)
def get_queryset(self):
queryset = super(ArticleViewSet, self).get_queryset()
if not self.request.user.is_staff:
queryset = queryset.filter(status=Article.STATUS_APPROVED)
return queryset.select_related('author'
).prefetch_related('tags',
'categories')
class ArticleApprove(APIView):
permission_classes = (IsAdminUser,)
def put(self, request, pk, *args, **kwargs):
article = get_object_or_404(Article.objects.all(), pk=pk)
article.status = Article.STATUS_APPROVED
article.save()
return Response(status=204)
def delete(self, request, pk, *args, **kwargs):
article = get_object_or_404(Article.objects.all(), pk=pk)
article.status = Article.STATUS_DISAPPROVED
article.save()
return Response(status=204)
|
[
"opensource.mozillazg@gmail.com"
] |
opensource.mozillazg@gmail.com
|
ddf15062b858f78fb39fed56808c8b1e276647cd
|
f4b3be2a3955c26b4e05ab162fa4909cf9a14f11
|
/CRB/validators/subsystems/enforcements/enf088.py
|
c28e582eaead1d50d8b94f0b33352ef67a14a38f
|
[] |
no_license
|
njovujsh/crbdjango
|
fd1f61403c1fbdac01b1bda5145faeb4b9ef9608
|
fdf5cc6ca5920a596c5463187d29202719664144
|
refs/heads/master
| 2022-12-04T18:13:07.709963
| 2018-05-14T09:07:47
| 2018-05-14T09:07:47
| 133,333,767
| 0
| 0
| null | 2022-11-22T01:44:28
| 2018-05-14T09:04:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
from validators.subsystems.enforcements import enf001
class ENF088(enf001.ENF001):
def __init__(self, mobject, field, priority, action):
super(ENF088, self).__init__(mobject, field, priority, action)
self.status = None
self.fcs = None
def validate_field(self, field, records):
try:
if(records.Applicant_Classification == "1"):
if(field):
return True
else:
return False
else:
return True
except:
raise
|
[
"njovujsh@gmail.com"
] |
njovujsh@gmail.com
|
878d117d208c8bddc445d9193bd60d9962bc2d04
|
ad553dd718a8df51dabc9ba636040da740db57cf
|
/.history/app_20181202205024.py
|
2016dd936c4c5606bd2c690a9091adbc44772a0d
|
[] |
no_license
|
NergisAktug/E-Commerce-PythonWithFlask-Sqlite3
|
8e67f12c28b11a7a30d13788f8dc991f80ac7696
|
69ff4433aa7ae52ef854d5e25472dbd67fd59106
|
refs/heads/main
| 2023-01-01T14:03:40.897592
| 2020-10-19T20:36:19
| 2020-10-19T20:36:19
| 300,379,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
"""Flask Login Example and instagram fallowing find"""
from flask import Flask, url_for, render_template, request, redirect, session, escape
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.secret_key = 'any random string'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///kullanicilar.db'
db = SQLAlchemy(app)
class User(db.Model):
""" Create user table"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
password = db.Column(db.String(80))
def __init__(self, username, password):
self.username = username
self.password = password
@app.route('/')
def index():
if not session.get('giris_yap'):
return render_template('index.html')
else:
if request.method == 'POST':
return render_template('index.html')
return render_template('index.html')
@app.route('/üye')
def uye():
return render_template('/üye.html')
@app.route('/giris', methods=['GET', 'POST'])
def giris():
if request.method == 'GET':
return render_template('kayit.html')
else:
name = request.form['username']
passw = request.form['password']
data = User.query.filter_by(username=name, password=passw).first()
if data is not None:
session['giris_yap'] = True
return redirect(url_for('index'))
else:
return render_template('index.html')
@app.route('/kayit', methods=['GET', 'POST'])
def kayit():
"""Register Form"""
if request.method == 'POST':
new_user = User(username=request.form.get('username'), password=request.form.get('password'))
db.session.add(new_user)
db.session.commit()
return render_template('üye.html')
return render_template('kayit.html')
@app.route("/cıkıs")
def cıkıs():
session['logged_in'] = False
return redirect(url_for('index'))
if __name__ == '__main__':
db.create_all()
app.run(debug=True)
|
[
"nergis.aktug2014@gmail.com"
] |
nergis.aktug2014@gmail.com
|
c266c889f792a3b3629b97cb48f01a1e98e7ab09
|
4ca0cb74402be70c63ad8e1c67b529cd7770ba38
|
/19_model-view_controller/mvc.py
|
f0b57f822676e41408bad59aeb0327aba2d02a44
|
[] |
no_license
|
alxfed/python-design-patterns
|
06af6f8e47925bcafe39a117943dd8287a6fe567
|
b1a1ffb02b6e81e44bc7f0491376f9121b325a09
|
refs/heads/master
| 2020-04-02T04:34:18.060976
| 2019-12-18T16:08:00
| 2019-12-18T16:08:00
| 154,022,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
"""
mvc.py
"""
import sys
class GenericController(object):
def __init__(self):
self.model = GenericModel()
self.view = GenericView()
def handle(self, request):
data = self.model.get_data(request)
self.view.generate_response(data)
class GenericModel(object):
def __init__(self):
pass
def get_data(self, request):
return {'request': request}
class GenericView(object):
def __init__(self):
pass
def generate_response(self, data):
print(data)
def main(name):
request_handler = GenericController()
request_handler.handle(name)
if __name__ == "__main__":
main(sys.argv[1])
|
[
"alxfed@gmail.com"
] |
alxfed@gmail.com
|
66379b12d4d5befc395446e0bd7e8fd9610fbfe9
|
7626a8371c7a847f93bdae5e1d6e03ee9667c3ba
|
/func/print_area_kz/venv/bin/sqlformat
|
87ba197dbb67cb1b0c1fd9666d91f0b0353cc1f2
|
[] |
no_license
|
zzyzx4/sp
|
52c815fd115b4605942baa73687838f64cd41864
|
90c7a90b3de27af674422e2c8892bad5ba7891e8
|
refs/heads/master
| 2020-05-23T21:20:28.166932
| 2019-07-19T11:56:49
| 2019-07-19T11:56:49
| 186,950,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
#!/home/user/PycharmProjects/print_area_kz/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"dastik0101@gmail.com"
] |
dastik0101@gmail.com
|
|
1c59053d7c6f0cc642b0dbe1ecc9f46b90c2c6f1
|
34745a8d54fa7e3d9e4237415eb52e507508ad79
|
/Python_Advanced/05_Functions Advanced/Exercise/04_negative_vs_positive.py
|
db0c4a49d3f9878fa05166a33c9f802e169e1017
|
[] |
no_license
|
DilyanTsenkov/SoftUni-Software-Engineering
|
50476af0dc88b267d72c56fa87eeb88d841164b2
|
fe446e3a50a00bb2e48d71ab8f783e0a4a406094
|
refs/heads/main
| 2023-08-12T18:18:42.144210
| 2021-09-25T11:10:38
| 2021-09-25T11:10:38
| 317,235,419
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
def absolute(negative_sum):
return abs(negative_sum)
def compare_negative_positive_sum(negative_sum, positive_sum):
if positive_sum >= negative_sum:
return True
else:
return False
def negative_separator(numbers):
if numbers < 0:
return True
def positive_separator(number):
if number >= 0:
return True
def printer(true_ot_false, positive_sum, negative_sum):
print(negative_sum)
print(positive_sum)
if true_ot_false:
print(f"The positives are stronger than the negatives")
else:
print(f"The negatives are stronger than the positives")
def sum_calc(nums):
return sum(nums)
numbers = [int(el) for el in input().split()]
negative = list(filter(negative_separator, numbers))
positive = list(filter(positive_separator, numbers))
negative_sum = sum_calc(negative)
positive_sum = sum_calc(positive)
negative_abs_sum = absolute(negative_sum)
printer(compare_negative_positive_sum(negative_abs_sum, positive_sum), positive_sum, negative_sum)
|
[
"noreply@github.com"
] |
DilyanTsenkov.noreply@github.com
|
a370c978a47bc4b67c07d327141825fd9ce68d99
|
b441503bcdb484d098885b19a989932b8d053a71
|
/neural_sp/evaluators/wordpiece.py
|
aae95de11e128601df6e62d74b585a82e86bef85
|
[
"Apache-2.0"
] |
permissive
|
entn-at/neural_sp
|
a266594b357b175b0fea18253433e32adc62810c
|
9dbbb4ab3985b825f8e9120a603a6caa141c8bdd
|
refs/heads/master
| 2020-08-28T05:48:28.928667
| 2020-06-22T19:17:53
| 2020-06-22T19:17:53
| 217,611,439
| 0
| 0
| null | 2019-10-25T20:40:18
| 2019-10-25T20:40:18
| null |
UTF-8
|
Python
| false
| false
| 7,250
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Evaluate the wordpiece-level model by WER."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from tqdm import tqdm
from neural_sp.evaluators.edit_distance import compute_wer
from neural_sp.utils import mkdir_join
logger = logging.getLogger(__name__)
def eval_wordpiece(models, dataset, recog_params, epoch,
recog_dir=None, streaming=False, progressbar=False,
fine_grained=False):
"""Evaluate the wordpiece-level model by WER.
Args:
models (list): models to evaluate
dataset (Dataset): evaluation dataset
recog_params (dict):
epoch (int):
recog_dir (str):
streaming (bool): streaming decoding for the session-level evaluation
progressbar (bool): visualize the progressbar
fine_grained (bool): calculate fine-grained WER distributions based on input lengths
Returns:
wer (float): Word error rate
cer (float): Character error rate
"""
# Reset data counter
dataset.reset(recog_params['recog_batch_size'])
if recog_dir is None:
recog_dir = 'decode_' + dataset.set + '_ep' + str(epoch) + '_beam' + str(recog_params['recog_beam_width'])
recog_dir += '_lp' + str(recog_params['recog_length_penalty'])
recog_dir += '_cp' + str(recog_params['recog_coverage_penalty'])
recog_dir += '_' + str(recog_params['recog_min_len_ratio']) + '_' + str(recog_params['recog_max_len_ratio'])
recog_dir += '_lm' + str(recog_params['recog_lm_weight'])
ref_trn_save_path = mkdir_join(models[0].save_path, recog_dir, 'ref.trn')
hyp_trn_save_path = mkdir_join(models[0].save_path, recog_dir, 'hyp.trn')
else:
ref_trn_save_path = mkdir_join(recog_dir, 'ref.trn')
hyp_trn_save_path = mkdir_join(recog_dir, 'hyp.trn')
wer, cer = 0, 0
n_sub_w, n_ins_w, n_del_w = 0, 0, 0
n_sub_c, n_ins_c, n_del_c = 0, 0, 0
n_word, n_char = 0, 0
n_streamable, quantity_rate, n_utt = 0, 0, 0
last_success_frame_ratio = 0
if progressbar:
pbar = tqdm(total=len(dataset))
# calculate WER distribution based on input lengths
wer_dist = {}
with open(hyp_trn_save_path, 'w') as f_hyp, open(ref_trn_save_path, 'w') as f_ref:
while True:
batch, is_new_epoch = dataset.next(recog_params['recog_batch_size'])
if streaming or recog_params['recog_chunk_sync']:
best_hyps_id, _ = models[0].decode_streaming(
batch['xs'], recog_params, dataset.idx2token[0],
exclude_eos=True)
else:
best_hyps_id, _ = models[0].decode(
batch['xs'], recog_params,
idx2token=dataset.idx2token[0] if progressbar else None,
exclude_eos=True,
refs_id=batch['ys'],
utt_ids=batch['utt_ids'],
speakers=batch['sessions' if dataset.corpus == 'swbd' else 'speakers'],
ensemble_models=models[1:] if len(models) > 1 else [])
for b in range(len(batch['xs'])):
ref = batch['text'][b]
if ref[0] == '<':
ref = ref.split('>')[1]
hyp = dataset.idx2token[0](best_hyps_id[b])
# Write to trn
speaker = str(batch['speakers'][b]).replace('-', '_')
if streaming:
utt_id = str(batch['utt_ids'][b]) + '_0000000_0000001'
else:
utt_id = str(batch['utt_ids'][b])
f_ref.write(ref + ' (' + speaker + '-' + utt_id + ')\n')
f_hyp.write(hyp + ' (' + speaker + '-' + utt_id + ')\n')
logger.debug('utt-id: %s' % utt_id)
logger.debug('Ref: %s' % ref)
logger.debug('Hyp: %s' % hyp)
logger.debug('-' * 150)
if not streaming:
# Compute WER
wer_b, sub_b, ins_b, del_b = compute_wer(ref=ref.split(' '),
hyp=hyp.split(' '),
normalize=False)
wer += wer_b
n_sub_w += sub_b
n_ins_w += ins_b
n_del_w += del_b
n_word += len(ref.split(' '))
if fine_grained:
xlen_bin = (batch['xlens'][b] // 200 + 1) * 200
if xlen_bin in wer_dist.keys():
wer_dist[xlen_bin] += [wer_b / 100]
else:
wer_dist[xlen_bin] = [wer_b / 100]
# Compute CER
if dataset.corpus == 'csj':
ref = ref.replace(' ', '')
hyp = hyp.replace(' ', '')
cer_b, sub_b, ins_b, del_b = compute_wer(ref=list(ref),
hyp=list(hyp),
normalize=False)
cer += cer_b
n_sub_c += sub_b
n_ins_c += ins_b
n_del_c += del_b
n_char += len(ref)
if models[0].streamable():
n_streamable += 1
else:
last_success_frame_ratio += models[0].last_success_frame_ratio()
quantity_rate += models[0].quantity_rate()
n_utt += 1
if progressbar:
pbar.update(1)
if is_new_epoch:
break
if progressbar:
pbar.close()
# Reset data counters
dataset.reset()
if not streaming:
wer /= n_word
n_sub_w /= n_word
n_ins_w /= n_word
n_del_w /= n_word
cer /= n_char
n_sub_c /= n_char
n_ins_c /= n_char
n_del_c /= n_char
if n_utt - n_streamable > 0:
last_success_frame_ratio /= (n_utt - n_streamable)
n_streamable /= n_utt
quantity_rate /= n_utt
if fine_grained:
for len_bin, wers in sorted(wer_dist.items(), key=lambda x: x[0]):
logger.info(' WER (%s): %.2f %% (%d)' % (dataset.set, sum(wers) / len(wers), len_bin))
logger.debug('WER (%s): %.2f %%' % (dataset.set, wer))
logger.debug('SUB: %.2f / INS: %.2f / DEL: %.2f' % (n_sub_w, n_ins_w, n_del_w))
logger.debug('CER (%s): %.2f %%' % (dataset.set, cer))
logger.debug('SUB: %.2f / INS: %.2f / DEL: %.2f' % (n_sub_c, n_ins_c, n_del_c))
logger.info('Streamablility (%s): %.2f %%' % (dataset.set, n_streamable * 100))
logger.info('Quantity rate (%s): %.2f %%' % (dataset.set, quantity_rate * 100))
logger.info('Last success frame ratio (%s): %.2f %%' % (dataset.set, last_success_frame_ratio))
return wer, cer
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
f98a1584a105d194c9e6e6a5e93adcc623f4cfab
|
cb61ba31b27b232ebc8c802d7ca40c72bcdfe152
|
/leetcode/931. Minimum Falling Path Sum/soln.py
|
1e4b6c29b5e019be51d4b3abc9a345a86c121f90
|
[
"Apache-2.0"
] |
permissive
|
saisankargochhayat/algo_quest
|
c7c48187c76b5cd7c2ec3f0557432606e9096241
|
a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc
|
refs/heads/master
| 2021-07-04T15:21:33.606174
| 2021-02-07T23:42:43
| 2021-02-07T23:42:43
| 67,831,927
| 5
| 1
|
Apache-2.0
| 2019-10-28T03:51:03
| 2016-09-09T20:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
# So we create a DP and start from the last low and start building up the the upper rows in the dp 2d array
# dp[i][j] represents the minimum path to reach that element with the given constraints.
class Solution:
def minFallingPathSum(self, A: List[List[int]]) -> int:
dp = [[0 for j in range(len(A[0]))] for i in range(len(A))]
# we fill the last row with original values as the min path from last row to last row is its own value
for i in range(len(A)-1, -1, -1):
for j in range(len(A[0])-1, -1, -1):
# Fill last row
if i == len(A)-1:
dp[i][j] = A[i][j]
# left corner
elif j == 0:
dp[i][j] = A[i][j] + min(dp[i+1][j], dp[i+1][j+1])
elif j == len(A[0])-1:
dp[i][j] = A[i][j] + min(dp[i+1][j-1], dp[i+1][j])
else:
dp[i][j] = A[i][j] + min(dp[i+1][j-1], dp[i+1][j], dp[i+1][j+1])
return min(dp[0])
|
[
"saisankargochhayat@gmail.com"
] |
saisankargochhayat@gmail.com
|
b96f2c76b38323327b3fd2cd6fe341d4e3148b74
|
ec4ce2cc5e08e032f2bdb7d8e6ba616e80e6f5f7
|
/chapter11_test_code/test_cities.py
|
f669296324136f72db551ae3d88cecb53a02dda6
|
[] |
no_license
|
AiZhanghan/python-crash-course-a-hands-on-project-based-introduction-to-programming
|
8fc54ef69636c88985df00b546bc49c4a2378e79
|
9d8c9fde7d6ab9fe664fa718e1516d7442eafd00
|
refs/heads/master
| 2020-09-28T18:28:56.558413
| 2019-12-12T11:05:43
| 2019-12-12T11:05:43
| 226,835,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
import unittest
from city_functions import get_formated_city_name
class CityTestCase(unittest.TestCase):
'''测试city_functions.py'''
def test_city_country(self):
'''能够正确地处理像Santiago, Chile这样的城市吗?'''
formetted_city_name = get_formated_city_name('santiago', 'chile')
self.assertEqual(formetted_city_name, 'Santiago, Chile')
def test_city_country_population(self):
'''
能够正确地处理像Santiago, Chile - population 5000000这样的城市吗?
'''
formatted_city_name = get_formated_city_name('santiago',
'chile', 5000000)
self.assertEqual(formatted_city_name,
'Santiago, Chile - population 5000000')
unittest.main()
|
[
"35103759+AiZhanghan@users.noreply.github.com"
] |
35103759+AiZhanghan@users.noreply.github.com
|
8d6d28f03e7dba2a24a1999e76fb628096a9fb19
|
486173e490129cec10b15c36903af3d13cfb0950
|
/FP-growth/fpGrowthTest.py
|
96ee73f6f17be8d5471447071182a3d3d5beda46
|
[] |
no_license
|
Hsingmin/MLinAction_on_Python2
|
ce3592297cbddf4e7a5c6525b6491b1b37b87ca5
|
ac5c5f8a167d3b4a5f7c7ee9e3409136db423ac0
|
refs/heads/master
| 2021-07-25T10:06:02.933608
| 2017-11-04T08:55:08
| 2017-11-04T08:55:08
| 103,387,222
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
# fpGrowthTest.py
import fpGrowth
from numpy import *
'''
# FP-Tree node create test
rootNode = fpGrowth.treeNode('pyramid', 9, None)
rootNode.children['eye'] = fpGrowth.treeNode('eye', 13, None)
rootNode.children['phoenix'] = fpGrowth.treeNode('phoenix', 3, None)
rootNode.disp()
'''
simData = fpGrowth.loadSimpleData()
print('simData : ' , simData)
initSet = fpGrowth.createInitSet(simData)
print('initSet : ', initSet)
simFPTree, simHeaderTable = fpGrowth.createTree(initSet, 3)
simFPTree.disp()
freqItems = []
fpGrowth.mineTree(simFPTree, simHeaderTable, 3, set([]), freqItems)
print '============ news clicks digging =========== '
parseData = [line.split() for line in open('kosarak.dat').readlines()]
initSet = fpGrowth.createInitSet(parseData)
newFPTree, newFPHeaderTable = fpGrowth.createTree(initSet, 100000)
newFreqList = []
fpGrowth.mineTree(newFPTree, newFPHeaderTable, 100000, set([]), newFreqList)
print 'len(newFreqList = )', len(newFreqList)
print '--------- newFreqList --------'
print newFreqList
|
[
"alfred_bit@sina.cn"
] |
alfred_bit@sina.cn
|
f9198d9eb339474258efaac2ded39e65e899ec24
|
b8e249f2bf0aa175899090128f7a77fb34aa2c1b
|
/apps/users/migrations/0002_auto_20190523_2209.py
|
ad3a4736f1152245525812b35261e78189162d03
|
[] |
no_license
|
dojo-ninja-gold/ng-server
|
80d8568fa960e882df9e1a6fff7e020e93ff2990
|
fcd69744a2ebf99f0c24b3136ba7a2d8a4c683e1
|
refs/heads/master
| 2023-05-03T21:05:54.026847
| 2019-05-24T22:29:51
| 2019-05-24T22:29:51
| 187,918,381
| 0
| 0
| null | 2023-04-21T20:32:36
| 2019-05-21T21:49:40
|
Python
|
UTF-8
|
Python
| false
| false
| 830
|
py
|
# Generated by Django 2.2.1 on 2019-05-23 22:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='first_name',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='last_name',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='pw_hash',
field=models.CharField(default='password', max_length=500),
preserve_default=False,
),
]
|
[
"wes@tao.team"
] |
wes@tao.team
|
1d35b0c6b7a5c4252763588c948c81d9b77ad15b
|
b458b2cf3011a73def66605b296144049909cd48
|
/tests/my_trade.py
|
e749520a049723eff15fa850405a79187d1d6f1f
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
shihliu/python-binance
|
8c5607a78a4794f9b42fe90092a149f4050d4710
|
c44f8a315df32c8b5d54750c27703060ec9060aa
|
refs/heads/master
| 2021-08-22T02:47:10.423523
| 2017-11-29T04:34:30
| 2017-11-29T04:34:30
| 111,865,384
| 0
| 0
| null | 2017-11-24T01:57:33
| 2017-11-24T01:57:33
| null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
from binance.client import Client
import json
client = Client('yq67cDjrCxGl6eeKMyTeiK1zkeArFpu8v4uB4b6TWDQdgjDlH0KjmXfHBZ1NjvJj', 'DxE7Wugo75EK8mLmybY76dbZW6tROpyNjBRd9NHsEOXqBaKq6Awgul4390xwRUdc')
my_trade = client.get_my_trades(symbol='QSPETH')
all_buy_price = all_buy_amount= 0.0
all_sell_price = all_sell_amount= 0.0
for i in my_trade:
if i["isBuyer"] is True:
all_buy_price = all_buy_price + float(i["price"]) * float(i["qty"])
all_buy_amount = all_buy_amount + float(i["qty"])
else:
all_sell_price = all_buy_price + float(i["price"]) * float(i["qty"])
all_sell_amount = all_buy_amount + float(i["qty"])
avg_buy_price = all_buy_price / all_buy_amount
print "my total buy price is %f" %all_buy_price
print "my total buy amount is %f" %all_buy_amount
print "average buy price is %f" %avg_buy_price
avg_sell_price = all_sell_price / all_sell_amount
print "my total sell price is %f" %all_sell_price
print "my total sell amount is %f" %all_sell_amount
print "average sell price is %f" %avg_sell_price
|
[
"root@dhcp-129-210.nay.redhat.com"
] |
root@dhcp-129-210.nay.redhat.com
|
4b9785d208ec7bfd695f67a1c0ae0ae14af5c025
|
d3e4b3e0d30dabe9714429109d2ff7b9141a6b22
|
/Visualization/LagrangeInterpolationVisualization.py
|
88ab36a87c9cd16c736d839ffcb9ba3d3157994f
|
[
"MIT"
] |
permissive
|
SymmetricChaos/NumberTheory
|
184e41bc7893f1891fa7fd074610b0c1520fa7dd
|
65258e06b7f04ce15223c1bc0c2384ef5e9cec1a
|
refs/heads/master
| 2021-06-11T17:37:34.576906
| 2021-04-19T15:39:05
| 2021-04-19T15:39:05
| 175,703,757
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from Polynomials import lagrange_interpolation
import matplotlib.pyplot as plt
import numpy as np
points = [1,3,5,7]
function = lambda x: np.sin(x)
print("""Lagrange interpolation takes a set of n points and finds the "best" polynomial that describes them. Given n points on a plane there is a polynomial of degree n-1 that passes through all of them.""")
print(f"In this example we use {len(points)} points taken from the sine function.")
fig = plt.figure()
ax=fig.add_axes([0,0,1,1])
lp = lagrange_interpolation(points,function)
print(lp)
x = np.linspace(min(points),max(points),50)
y0 = function(x)
y1 = lp.evaluate(x)
plt.plot(x,y0)
plt.plot(x,y1)
plt.scatter(points,function(points))
|
[
"ajfraebel@gmail.com"
] |
ajfraebel@gmail.com
|
0b1900e0a13d5588aa349822a427ad816264765e
|
287fcd6bc49381d5b116dd541a97c0ff37141214
|
/app/section/sections/hero_section.py
|
c5960e017024cdfa7d8610c48d487ea424d32899
|
[] |
no_license
|
elcolono/wagtail-cms
|
95812323768b90e3630c5f90e59a9f0074157ab5
|
b3acb2e5c8f985202da919aaa99ea9db2f6b4d51
|
refs/heads/master
| 2023-05-26T05:24:42.362695
| 2020-10-08T17:23:22
| 2020-10-08T17:23:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,839
|
py
|
from django.db import models
from wagtail.snippets.models import register_snippet
from wagtail.admin.edit_handlers import (
MultiFieldPanel, FieldPanel, StreamFieldPanel, FieldRowPanel)
from wagtail.admin.edit_handlers import ObjectList, TabbedInterface
from wagtail.images.edit_handlers import ImageChooserPanel
from section.blocks import ButtonAction, SectionTitleBlock
from . import SectionBase
from wagtail.core.fields import StreamField
from section.blocks import ActionButton, PrimaryButton
from wagtail.core.models import Page
from section.settings import cr_settings
@register_snippet
class HeroSection(SectionBase, SectionTitleBlock, ButtonAction, Page):
hero_layout = models.CharField(
blank=True,
max_length=100,
verbose_name='Layout',
choices=[
('simple_centered', 'Simple centered'),
('image_right', 'Image on right')
],
default='simple_centered',
)
hero_first_button_text = models.CharField(
blank=True,
max_length=100,
verbose_name='Hero button text',
default='Subscribe',
help_text="Leave field empty to hide.",
)
hero_second_button_text = models.CharField(
blank=True,
max_length=100,
verbose_name='Hero button text',
default='Subscribe',
help_text="Leave field empty to hide.",
)
hero_image = models.ForeignKey(
'wagtailimages.Image',
blank=True,
null=True,
on_delete=models.SET_NULL,
verbose_name='Image',
related_name='+',
)
hero_image_size = models.CharField(
max_length=50,
choices=cr_settings['HERO_IMAGE_SIZE_CHOICES'],
default=cr_settings['HERO_IMAGE_SIZE_CHOICES_DEFAULT'],
verbose_name=('Image size'),
)
hero_action_type_1 = models.CharField(
max_length=50,
choices=cr_settings['HERO_ACTION_TYPE_CHOICES'],
default=cr_settings['HERO_ACTION_TYPE_CHOICES_DEFAULT'],
verbose_name=('Action type (First)'),
)
hero_action_type_2 = models.CharField(
max_length=50,
choices=cr_settings['HERO_ACTION_TYPE_CHOICES'],
default=cr_settings['HERO_ACTION_TYPE_CHOICES_DEFAULT'],
verbose_name=('Action type (Second)'),
)
hero_buttons = StreamField(
[
('action_button', ActionButton()),
('primary_button', PrimaryButton())
],
null=True,
verbose_name="Buttons",
help_text="Please choose Buttons"
)
# basic tab panels
basic_panels = Page.content_panels + [
FieldPanel('hero_layout', heading='Layout', classname="title full"),
MultiFieldPanel(
[
FieldRowPanel([
FieldPanel('hero_layout', classname="col6"),
FieldPanel('hero_image_size', classname="col6"),
]),
FieldRowPanel([
FieldPanel('section_heading',
heading='Heading', classname="col6"),
FieldPanel('section_subheading',
heading='Subheading', classname="col6"),
]),
FieldRowPanel([
FieldPanel('section_description',
heading='Description', classname="col6"),
]),
FieldPanel('hero_first_button_text'),
FieldPanel('hero_second_button_text'),
ImageChooserPanel('hero_image'),
],
heading='Content',
),
SectionBase.section_layout_panels,
SectionBase.section_design_panels,
]
# advanced tab panels
advanced_panels = (
SectionTitleBlock.title_basic_panels,
) + ButtonAction.button_action_panels
# Register Tabs
edit_handler = TabbedInterface(
[
ObjectList(basic_panels, heading="Basic"),
ObjectList(advanced_panels, heading="Plus+"),
]
)
# Page settings
template = 'sections/hero_section_preview.html'
parent_page_types = ['home.HomePage']
subpage_types = []
# Overring methods
def set_url_path(self, parent):
"""
Populate the url_path field based on this page's slug and the specified parent page.
(We pass a parent in here, rather than retrieving it via get_parent, so that we can give
new unsaved pages a meaningful URL when previewing them; at that point the page has not
been assigned a position in the tree, as far as treebeard is concerned.
"""
if parent:
self.url_path = ''
else:
# a page without a parent is the tree root, which always has a url_path of '/'
self.url_path = '/'
return self.url_path
|
[
"andreas.siedler@gmail.com"
] |
andreas.siedler@gmail.com
|
b1ff28e00fcaf827759d3315508259d5c02fe49a
|
912cb61eaa768716d30844990ebbdd80ab2c2f4e
|
/ex070.py
|
aad48d4aa3286cd92534b1c397274d2ac7ddf5ea
|
[] |
no_license
|
luizaacampos/exerciciosCursoEmVideoPython
|
5fc9bed736300916e1c26d115eb2e703ba1dd4ca
|
398bfa5243adae00fb58056d1672cc20ff4a31d6
|
refs/heads/main
| 2023-01-06T21:48:17.068478
| 2020-11-11T12:29:10
| 2020-11-11T12:29:10
| 311,964,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
total = tk = menor = soma = 0
print('--------------Loja Sallus-----------------')
while True:
prod = input('Nome do produto: ')
valor = float(input('Preço: R$'))
soma += 1
cont = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
total += valor
if valor > 1000.00:
tk += 1
if soma == 1 or valor < menor:
menor = valor
barato = prod
if cont == 'N':
break
print('---------FIM DO PROGRAMA-------------')
print(f'O total da compra foi R${total:.2f}')
print(f'Temos {tk} produtos custando mais de R$1000.00')
print(f'O produto mais barato foi {barato} que custa R${menor:.2f}')
|
[
"luiza.almcampos@gmail.com"
] |
luiza.almcampos@gmail.com
|
30197700259a9549341c49c7bd19ffeca986744d
|
fb0e99751068fa293312f60fedf8b6d0b9eae293
|
/slepé_cesty_vývoje/iskušitel/najdu_testovací_soubory.py
|
452504d722f35dd929333e4039ac4e9dc3d416ee
|
[] |
no_license
|
BGCX261/zora-na-pruzi-hg-to-git
|
d9628a07e3effa6eeb15b9b5ff6d75932a6deaff
|
34a331e17ba87c0de34e7f0c5b43642d5b175215
|
refs/heads/master
| 2021-01-19T16:52:06.478359
| 2013-08-07T19:58:42
| 2013-08-07T19:58:42
| 41,600,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2012 Домоглед <domogled@domogled.eu>
# @author Петр Болф <petr.bolf@domogled.eu>
import os, fnmatch
MASKA_TESTOVACÍCH_SOUBORŮ = 'testuji_*.py'
def najdu_testovací_soubory(cesta):
počet_nalezených_testů = 0
if os.path.isdir(cesta):
for cesta_do_adresáře, nalezené_adresáře, nalezené_soubory in os.walk(cesta):
for jméno_nalezeného_souboru in nalezené_soubory:
if fnmatch.fnmatch(jméno_nalezeného_souboru, MASKA_TESTOVACÍCH_SOUBORŮ):
# if jméno_nalezeného_souboru.endswith('.py') and not jméno_nalezeného_souboru.startswith('__init__'):
cesta_k_nalezenému_souboru = os.path.join(cesta_do_adresáře, jméno_nalezeného_souboru)
počet_nalezených_testů = počet_nalezených_testů + 1
yield cesta_k_nalezenému_souboru
else:
if os.path.isfile(cesta):
if fnmatch.fnmatch(os.path.basename(cesta), MASKA_TESTOVACÍCH_SOUBORŮ):
počet_nalezených_testů = počet_nalezených_testů + 1
yield cesta
else:
raise IOError('Soubor testu "{}" neodpovídá masce {}'.format(cesta, MASKA_TESTOVACÍCH_SOUBORŮ))
else:
raise IOError('Soubor testu "{}" nejestvuje'.format(cesta))
if počet_nalezených_testů == 0:
raise IOError('Nenašel jsem žádný testovací soubor v cestě "{}" za pomocí masky "{}"'.format(cesta, MASKA_TESTOVACÍCH_SOUBORŮ))
|
[
"petr.bolf@domogled.eu"
] |
petr.bolf@domogled.eu
|
bbca1de8f3365de6962acd80b69471036e33422e
|
68c4805ad01edd612fa714b1e0d210115e28bb7d
|
/venv/Lib/site-packages/numba/tests/test_config.py
|
de8371b8b2d4ac9757452a6d5a24a1954ff13f8d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Happy-Egg/redesigned-happiness
|
ac17a11aecc7459f4ebf0afd7d43de16fb37ae2c
|
08b705e3569f3daf31e44254ebd11dd8b4e6fbb3
|
refs/heads/master
| 2022-12-28T02:40:21.713456
| 2020-03-03T09:04:30
| 2020-03-03T09:04:30
| 204,904,444
| 2
| 1
|
Apache-2.0
| 2022-12-08T06:19:04
| 2019-08-28T10:18:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,444
|
py
|
import os
import tempfile
import unittest
from .support import TestCase, temp_directory, override_env_config
from numba import config
try:
import yaml
_HAVE_YAML = True
except ImportError:
_HAVE_YAML = False
_skip_msg = "pyyaml needed for configuration file tests"
needs_yaml = unittest.skipIf(not _HAVE_YAML, _skip_msg)
@needs_yaml
class TestConfig(TestCase):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def setUp(self):
# use support.temp_directory, it can do the clean up
self.tmppath = temp_directory('config_tmp')
super(TestConfig, self).setUp()
def mock_cfg_location(self):
"""
Creates a mock launch location.
Returns the location path.
"""
return tempfile.mkdtemp(dir=self.tmppath)
def inject_mock_cfg(self, location, cfg):
"""
Injects a mock configuration at 'location'
"""
tmpcfg = os.path.join(location, config._config_fname)
with open(tmpcfg, 'wt') as f:
yaml.dump(cfg, f, default_flow_style=False)
def get_settings(self):
"""
Gets the current numba config settings
"""
store = dict()
for x in dir(config):
if x.isupper():
store[x] = getattr(config, x)
return store
def create_config_effect(self, cfg):
"""
Returns a config "original" from a location with no config file
and then the impact of applying the supplied cfg dictionary as
a config file at a location in the returned "current".
"""
# store original cwd
original_cwd = os.getcwd()
# create mock launch location
launch_dir = self.mock_cfg_location()
# switch cwd to the mock launch location, get and store settings
os.chdir(launch_dir)
# use override to ensure that the config is zero'd out with respect
# to any existing settings
with override_env_config('_', '_'):
original = self.get_settings()
# inject new config into a file in the mock launch location
self.inject_mock_cfg(launch_dir, cfg)
try:
# override something but don't change the value, this is to refresh
# the config and make sure the injected config file is read
with override_env_config('_', '_'):
current = self.get_settings()
finally:
# switch back to original dir with no new config
os.chdir(original_cwd)
return original, current
def test_config(self):
# ensure a non empty settings file does impact config and that the
# case of the key makes no difference
key = 'COLOR_SCHEME'
for case in [str.upper, str.lower]:
orig, curr = self.create_config_effect({case(key): 'light_bg'})
self.assertTrue(orig != curr)
self.assertTrue(orig[key] != curr[key])
self.assertEqual(curr[key], 'light_bg')
# check that just the color scheme is the cause of difference
orig.pop(key)
curr.pop(key)
self.assertEqual(orig, curr)
def test_empty_config(self):
# ensure an empty settings file does not impact config
orig, curr = self.create_config_effect({})
self.assertEqual(orig, curr)
if __name__ == '__main__':
unittest.main()
|
[
"yangyang4910709@163.com"
] |
yangyang4910709@163.com
|
19377378073d0491068a8850c5ec1a202b416b4e
|
e514bbdf8e0abe5ef0b58b94fe5f7d2afb38ea6b
|
/test_suite/shared_data/frame_order/cam/rotor/perm_pseudo_ellipse_z_le_x_le_y_alt/pseudo-ellipse.py
|
b1dec4d76ae1ec4b73e2fd5cf18f201d538cd854
|
[] |
no_license
|
edward-dauvergne/relax
|
98ad63703e68a4535bfef3d6c0529e07cc84ff29
|
9710dc0f2dfe797f413756272d4bec83cf6ca1c9
|
refs/heads/master
| 2020-04-07T04:25:25.382027
| 2017-01-04T15:38:09
| 2017-01-04T15:38:09
| 46,500,334
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,967
|
py
|
# Optimise all 3 pseudo-ellipse permutations for the CaM rotor synthetic frame order data.
# These 3 solutions should mimic the rotor solution.
# Python module imports.
from numpy import array, cross, float64, transpose, zeros
from numpy.linalg import norm
import sys
# relax module imports.
from lib.geometry.coord_transform import spherical_to_cartesian
from lib.geometry.rotations import R_to_euler_zyz
from lib.text.sectioning import section
# The real rotor parameter values.
AVE_POS_X, AVE_POS_Y, AVE_POS_Z = [ -21.269217407269576, -3.122610661328414, -2.400652421655998]
AVE_POS_ALPHA, AVE_POS_BETA, AVE_POS_GAMMA = [5.623469076122531, 0.435439405668396, 5.081265529106499]
AXIS_THETA = 0.9600799785953431
AXIS_PHI = 4.0322755062196229
CONE_SIGMA_MAX = 30.0 / 360.0 * 2.0 * pi
# Reconstruct the rotation axis.
AXIS = zeros(3, float64)
spherical_to_cartesian([1, AXIS_THETA, AXIS_PHI], AXIS)
# Create a full normalised axis system.
x = array([1, 0, 0], float64)
y = cross(AXIS, x)
y /= norm(y)
x = cross(y, AXIS)
x /= norm(x)
AXES = transpose(array([x, y, AXIS], float64))
# The Euler angles.
eigen_alpha, eigen_beta, eigen_gamma = R_to_euler_zyz(AXES)
# Printout.
print("Torsion angle: %s" % CONE_SIGMA_MAX)
print("Rotation axis: %s" % AXIS)
print("Full axis system:\n%s" % AXES)
print("cross(x, y) = z:\n %s = %s" % (cross(AXES[:, 0], AXES[:, 1]), AXES[:, 2]))
print("cross(x, z) = -y:\n %s = %s" % (cross(AXES[:, 0], AXES[:, 2]), -AXES[:, 1]))
print("cross(y, z) = x:\n %s = %s" % (cross(AXES[:, 1], AXES[:, 2]), AXES[:, 0]))
print("Euler angles (alpha, beta, gamma): (%.15f, %.15f, %.15f)" % (eigen_alpha, eigen_beta, eigen_gamma))
# Load the optimised rotor state for creating the pseudo-ellipse data pipes.
state.load(state='frame_order_true', dir='..')
# Set up the dynamic system.
value.set(param='ave_pos_x', val=AVE_POS_X)
value.set(param='ave_pos_y', val=AVE_POS_Y)
value.set(param='ave_pos_z', val=AVE_POS_Z)
value.set(param='ave_pos_alpha', val=AVE_POS_ALPHA)
value.set(param='ave_pos_beta', val=AVE_POS_BETA)
value.set(param='ave_pos_gamma', val=AVE_POS_GAMMA)
value.set(param='eigen_alpha', val=eigen_alpha)
value.set(param='eigen_beta', val=eigen_beta)
value.set(param='eigen_gamma', val=eigen_gamma)
# Set the torsion angle to the rotor opening half-angle.
value.set(param='cone_sigma_max', val=0.1)
# Set the cone opening angles.
value.set(param='cone_theta_x', val=0.3)
value.set(param='cone_theta_y', val=0.6)
# Fix the true pivot point.
frame_order.pivot([ 37.254, 0.5, 16.7465], fix=True)
# Change the model.
frame_order.select_model('pseudo-ellipse')
# Loop over the 3 permutations.
pipe_name = 'pseudo-ellipse'
tag = ''
for perm in [None, 'A', 'B']:
# The original permutation.
if perm == None:
# Title printout.
section(file=sys.stdout, text="Pseudo-ellipse original permutation")
# Create a new data base data pipe for the pseudo-ellipse.
pipe.copy(pipe_from='frame order', pipe_to='pseudo-ellipse')
pipe.switch(pipe_name='pseudo-ellipse')
# Operations for the 'A' and 'B' permutations.
else:
# Title printout.
section(file=sys.stdout, text="Pseudo-ellipse permutation %s" % perm)
# The pipe name and tag.
pipe_name = 'pseudo-ellipse perm %s' % perm
tag = '_perm_%s' % perm
# Create a new data pipe.
pipe.copy(pipe_from='frame order', pipe_to=pipe_name)
pipe.switch(pipe_name=pipe_name)
# Permute the axes.
frame_order.permute_axes(permutation=perm)
# Create a pre-optimisation PDB representation.
frame_order.pdb_model(ave_pos=None, rep='fo_orig'+tag, compress_type=2, force=True)
# High precision optimisation.
frame_order.num_int_pts(num=10000)
minimise.execute('simplex', func_tol=1e-4)
# Create the PDB representation.
frame_order.pdb_model(ave_pos=None, rep='fo'+tag, compress_type=2, force=True)
# Sanity check.
pipe.display()
|
[
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] |
bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5
|
c253644311d7fe2b49eac8dac03132f4f1cdd8ba
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/302/66800/submittedfiles/testes.py
|
7c56144edfa042834f7911ced03a33c1d0ca5381
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
idade = int(input('Digite sua idade'))
if idade => 18:
print ("maior de idade")
else:
print ('menor de idade')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
72f1a1f05e457a9f11bdee1d6b7442f9a3fe8ee7
|
e436e729b0a78c7062311e0f48c55dd25d13faef
|
/tests/core/test_utils.py
|
b2bc8e17e86be01fde91a5b6c1f2ba12e3fdf488
|
[
"MIT"
] |
permissive
|
cad106uk/market-access-public-frontend
|
71ff602f4817666ed2837432b912f108010a30a1
|
092149105b5ddb1307c613123e94750b0b8b39ac
|
refs/heads/master
| 2023-02-03T18:48:45.838135
| 2020-12-24T09:38:56
| 2020-12-24T09:38:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
import datetime
from unittest import TestCase
from apps.core.utils import convert_to_snake_case, chain, get_future_date
class UtilsTestCase(TestCase):
def test_get_future_date(self):
now = datetime.datetime.now()
future_date_str = get_future_date(60)
extra_days = datetime.datetime.strptime(future_date_str, "%a, %d-%b-%Y %H:%M:%S GMT") - now
# +- 1 day is acceptable here
assert extra_days.days in range(59, 60)
def test_convert_to_snake_case(self):
test_string = "Some Test String"
assert "some_test_string" == convert_to_snake_case(test_string)
def test_chain(self):
l1 = (1, 2, 3)
l2 = [4, 5, 6]
assert [*l1, *l2] == list(chain(l1, l2))
|
[
"noreply@github.com"
] |
cad106uk.noreply@github.com
|
1808c14f89677eda21489c6ca86615cddc39f671
|
762db71e9bb66ab5821bd91eff7e0fa813f795a0
|
/code/python/echomesh/util/math/LargestInvertible.py
|
d29937b39ff5a64bf2a144c83e74a0f9632c2172
|
[
"MIT"
] |
permissive
|
huochaip/echomesh
|
0954d5bca14d58c0d762a5d3db4e6dcd246bf765
|
be668971a687b141660fd2e5635d2fd598992a01
|
refs/heads/master
| 2020-06-17T20:21:47.216434
| 2016-08-16T16:49:56
| 2016-08-16T16:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import fractions
import math
from six.moves import xrange
# http://stackoverflow.com/questions/4798654/modular-multiplicative-inverse-function-in-python
# from https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
def egcd(a, b):
x, y, u, v = 0, 1, 1, 0
while a:
q, r = b // a, b % a
m, n = x - u*q, y - v*q
b, a, x, y, u, v = a, r, u, v, m, n
return b, x, y
def modinv(a, m):
g, x, y = egcd(a, m)
if g == 1:
return x % m
raise Exception('modular inverse does not exist')
def largest_invertible(x):
"""In the ring Mod(x), returns the invertible number nearest to x / 2, and
its inverse."""
if x >= 5:
for i in xrange(int(x / 2), 1, -1):
try:
ii = (i if i < (x / 2) else x - i)
return ii, modinv(ii, x)
except:
pass
return 1, 1
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
3e788e30fcf2f685d56dbf028eb1b93f22e164be
|
6a07912090214567f77e9cd941fb92f1f3137ae6
|
/cs212/Problem Set 1/2.py
|
97b6cb647b9c05d52c0f4bd57cf754e82586bf20
|
[] |
no_license
|
rrampage/udacity-code
|
4ab042b591fa3e9adab0183d669a8df80265ed81
|
bbe968cd27da7cc453eada5b2aa29176b0121c13
|
refs/heads/master
| 2020-04-18T08:46:00.580903
| 2012-08-25T08:44:24
| 2012-08-25T08:44:24
| 5,352,942
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,333
|
py
|
# cs212 ; Problem Set 1 ; 2
# CS 212, hw1-2: Jokers Wild
#
# -----------------
# User Instructions
#
# Write a function best_wild_hand(hand) that takes as
# input a 7-card hand and returns the best 5 card hand.
# In this problem, it is possible for a hand to include
# jokers. Jokers will be treated as 'wild cards' which
# can take any rank or suit of the same color. The
# black joker, '?B', can be used as any spade or club
# and the red joker, '?R', can be used as any heart
# or diamond.
#
# The itertools library may be helpful. Feel free to
# define multiple functions if it helps you solve the
# problem.
#
# -----------------
# Grading Notes
#
# Muliple correct answers will be accepted in cases
# where the best hand is ambiguous (for example, if
# you have 4 kings and 3 queens, there are three best
# hands: 4 kings along with any of the three queens).
import itertools
def best_wild_hand(hand):
"Try all values for jokers in all 5-card selections."
# Your code here
def test_best_wild_hand():
assert (sorted(best_wild_hand("6C 7C 8C 9C TC 5C ?B".split()))
== ['7C', '8C', '9C', 'JC', 'TC'])
assert (sorted(best_wild_hand("TD TC 5H 5C 7C ?R ?B".split()))
== ['7C', 'TC', 'TD', 'TH', 'TS'])
assert (sorted(best_wild_hand("JD TC TH 7C 7D 7S 7H".split()))
== ['7C', '7D', '7H', '7S', 'JD'])
return 'test_best_wild_hand passes'
# ------------------
# Provided Functions
#
# You may want to use some of the functions which
# you have already defined in the unit to write
# your best_hand function.
def hand_rank(hand):
"Return a value indicating the ranking of a hand."
ranks = card_ranks(hand)
if straight(ranks) and flush(hand):
return (8, max(ranks))
elif kind(4, ranks):
return (7, kind(4, ranks), kind(1, ranks))
elif kind(3, ranks) and kind(2, ranks):
return (6, kind(3, ranks), kind(2, ranks))
elif flush(hand):
return (5, ranks)
elif straight(ranks):
return (4, max(ranks))
elif kind(3, ranks):
return (3, kind(3, ranks), ranks)
elif two_pair(ranks):
return (2, two_pair(ranks), ranks)
elif kind(2, ranks):
return (1, kind(2, ranks), ranks)
else:
return (0, ranks)
def card_ranks(hand):
"Return a list of the ranks, sorted with higher first."
ranks = ['--23456789TJQKA'.index(r) for r, s in hand]
ranks.sort(reverse = True)
return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks
def flush(hand):
"Return True if all the cards have the same suit."
suits = [s for r,s in hand]
return len(set(suits)) == 1
def straight(ranks):
"""Return True if the ordered
ranks form a 5-card straight."""
return (max(ranks)-min(ranks) == 4) and len(set(ranks)) == 5
def kind(n, ranks):
"""Return the first rank that this hand has
exactly n-of-a-kind of. Return None if there
is no n-of-a-kind in the hand."""
for r in ranks:
if ranks.count(r) == n: return r
return None
def two_pair(ranks):
"""If there are two pair here, return the two
ranks of the two pairs, else None."""
pair = kind(2, ranks)
lowpair = kind(2, list(reversed(ranks)))
if pair and lowpair != pair:
return (pair, lowpair)
else:
return None
|
[
"raunak1001@gmail.com"
] |
raunak1001@gmail.com
|
33791d780f140caa7af658d364f82aa0c8a86f28
|
aa1972e6978d5f983c48578bdf3b51e311cb4396
|
/nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/cluster/clusternodegroup_streamidentifier_binding.py
|
e684932798111ad39584954df57a8ca7c17454bc
|
[
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MayankTahil/nitro-ide
|
3d7ddfd13ff6510d6709bdeaef37c187b9f22f38
|
50054929214a35a7bb19ed10c4905fffa37c3451
|
refs/heads/master
| 2020-12-03T02:27:03.672953
| 2017-07-05T18:09:09
| 2017-07-05T18:09:09
| 95,933,896
| 2
| 5
| null | 2017-07-05T16:51:29
| 2017-07-01T01:03:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,873
|
py
|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class clusternodegroup_streamidentifier_binding(base_resource) :
""" Binding class showing the streamidentifier that can be bound to clusternodegroup.
"""
def __init__(self) :
self._identifiername = None
self._name = None
self.___count = 0
@property
def name(self) :
r"""Name of the nodegroup to which you want to bind a cluster node or an entity.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the nodegroup to which you want to bind a cluster node or an entity.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def identifiername(self) :
r"""stream identifier and rate limit identifier that need to be bound to this nodegroup.
"""
try :
return self._identifiername
except Exception as e:
raise e
@identifiername.setter
def identifiername(self, identifiername) :
r"""stream identifier and rate limit identifier that need to be bound to this nodegroup.
"""
try :
self._identifiername = identifiername
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(clusternodegroup_streamidentifier_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.clusternodegroup_streamidentifier_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = clusternodegroup_streamidentifier_binding()
updateresource.name = resource.name
updateresource.identifiername = resource.identifiername
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [clusternodegroup_streamidentifier_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].identifiername = resource[i].identifiername
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = clusternodegroup_streamidentifier_binding()
deleteresource.name = resource.name
deleteresource.identifiername = resource.identifiername
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [clusternodegroup_streamidentifier_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].identifiername = resource[i].identifiername
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch clusternodegroup_streamidentifier_binding resources.
"""
try :
if not name :
obj = clusternodegroup_streamidentifier_binding()
response = obj.get_resources(service, option_)
else :
obj = clusternodegroup_streamidentifier_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of clusternodegroup_streamidentifier_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = clusternodegroup_streamidentifier_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count clusternodegroup_streamidentifier_binding resources configued on NetScaler.
"""
try :
obj = clusternodegroup_streamidentifier_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of clusternodegroup_streamidentifier_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = clusternodegroup_streamidentifier_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class clusternodegroup_streamidentifier_binding_response(base_response) :
def __init__(self, length=1) :
self.clusternodegroup_streamidentifier_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.clusternodegroup_streamidentifier_binding = [clusternodegroup_streamidentifier_binding() for _ in range(length)]
|
[
"Mayank@Mandelbrot.local"
] |
Mayank@Mandelbrot.local
|
125671ac083b8ab5d77142fb5411d4afa74e234c
|
7673df8dec063e83aa01187d5a02ca8b4ac3761d
|
/Basic/functions.py
|
8f1badb2bde7f5c4aa358988eb3330bc69a6532a
|
[] |
no_license
|
jedthompson99/Python_Course
|
cc905b42a26a2aaf008ce5cb8aaaa6b3b66df61e
|
618368390f8a7825459a20b4bc28e80c22da5dda
|
refs/heads/master
| 2023-07-01T08:39:11.309175
| 2021-08-09T17:28:32
| 2021-08-09T17:28:32
| 361,793,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
def full_name(first, last):
print(f'{first} {last}')
full_name('Kristine', 'Hudgens')
def auth(email, password):
if email == 'kristine@hudgens.com' and password == 'secret':
print('You are authorized')
else:
print('You are not authorized')
auth('kristine@hudgens.com', 'asdf')
def hundred():
for num in range(1, 101):
print(num)
hundred()
def counter(max_value):
for num in range(1, max_value):
print(num)
counter(501)
|
[
"jedthompson@gmail.com"
] |
jedthompson@gmail.com
|
ecfe49b03baa1334ccc75a2b3bdbf0eb1e4e241a
|
4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422
|
/_0163_Missing_Ranges.py
|
b5c8d5b3ad79b18657c10fbcb233bf4e9f0f2ccd
|
[] |
no_license
|
mingweihe/leetcode
|
a2cfee0e004627b817a3c0321bb9c74128f8c1a7
|
edff905f63ab95cdd40447b27a9c449c9cefec37
|
refs/heads/master
| 2021-06-19T07:46:46.897952
| 2021-05-02T05:13:17
| 2021-05-02T05:13:17
| 205,740,338
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
class Solution(object):
def findMissingRanges(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: List[str]
"""
res = []
for x in nums:
if x == lower:
lower += 1
elif lower < x:
if lower + 1 == x:
res.append(str(lower))
else:
res.append('%s->%s' % (lower, x-1))
lower = x + 1
if lower == upper:
res.append(str(upper))
elif lower < upper:
res.append('%s->%s' % (lower, upper))
return res
|
[
"10962421@qq.com"
] |
10962421@qq.com
|
a149aaf98e52f7341e3dcc68f0efb14590b43c19
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02700/s274282920.py
|
922c558418997e6ed17552a908b8b8cb32996882
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
A,B,C,D = (int(x) for x in input().split())
while True:
C -= B
if C <= 0:
print('Yes')
break
else:
A -= D
if A <= 0:
print('No')
break
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6b854b39440765b0f5c80e3c3f73c5fdf6d4f8b8
|
4d10250b7ce80730414468e5e0060a207253a6d0
|
/jplephem/test.py
|
bc8ec152f0e375d2117b0930f489d0e20a305d78
|
[] |
no_license
|
NatalieP-J/python
|
c68fdb84a6c9c432b34e57ae4e376f652451578a
|
c74bcfabde4704939550875bc42fc3e8a5dbc5bf
|
refs/heads/master
| 2021-01-23T03:08:06.448979
| 2013-08-21T04:04:11
| 2013-08-21T04:04:11
| 10,916,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
"""Tests for ``jplephem``.
See the accompanying ``jpltest`` module for a more intense numerical
test suite that can verify that ``jplephem`` delivers, in a large number
of cases, the same results as when the ephemerides are run at JPL. This
smaller and more feature-oriented suite can be run with::
python -m unittest discover jplephem
"""
import numpy as np
from functools import partial
from jplephem import Ephemeris, DateError
from unittest import TestCase
class Tests(TestCase):
def check0(self, x, y, z, dx, dy, dz):
eq = partial(self.assertAlmostEqual, delta=1.0)
eq(x, 39705023.28)
eq(y, 131195345.65)
eq(z, 56898495.41)
eq(dx, -2524248.19)
eq(dy, 619970.11)
eq(dz, 268928.26)
def check1(self, x, y, z, dx, dy, dz):
eq = partial(self.assertAlmostEqual, delta=1.0)
eq(x, -144692624.00)
eq(y, -32707965.14)
eq(z, -14207167.26)
eq(dx, 587334.38)
eq(dy, -2297419.36)
eq(dz, -996628.74)
def test_scalar_input(self):
import de421
e = Ephemeris(de421)
self.check0(*e.compute('earthmoon', 2414994.0))
self.check1(*e.compute('earthmoon', 2415112.5))
def test_array_input(self):
import de421
e = Ephemeris(de421)
v = e.compute('earthmoon', np.array([2414994.0, 2415112.5]))
v = np.array(v)
self.check0(*v[:,0])
self.check1(*v[:,1])
def test_ephemeris_end_date(self):
import de421
e = Ephemeris(de421)
x, y, z = e.position('earthmoon', e.jomega)
self.assertAlmostEqual(x, -2.81196460e+07, delta=1.0)
self.assertAlmostEqual(y, 1.32000379e+08, delta=1.0)
self.assertAlmostEqual(z, 5.72139011e+07, delta=1.0)
def test_too_early_date(self):
import de421
e = Ephemeris(de421)
self.assertRaises(DateError, e.compute, 'earthmoon', e.jalpha - 0.01)
def test_too_late_date(self):
import de421
e = Ephemeris(de421)
self.assertRaises(DateError, e.compute, 'earthmoon', e.jomega + 16.01)
|
[
"natalie.price.jones@mail.utoronto.ca"
] |
natalie.price.jones@mail.utoronto.ca
|
b416e000c05055c966ef50e7bead35df903c7b05
|
8b8a06abf18410e08f654fb8f2a9efda17dc4f8f
|
/app/request_session.py
|
f6a0cb38f59f5353b537a1d430baac107a5c80f0
|
[] |
no_license
|
corporacionrst/software_RST
|
d903dfadf87c97c692a821a9dd3b79b343d8d485
|
7a621c4f939b5c01fd222434deea920e2447c214
|
refs/heads/master
| 2021-04-26T23:23:27.241893
| 2018-10-05T23:21:34
| 2018-10-05T23:21:34
| 123,985,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
from sistema.usuarios.models import Perfil
def getPerfil(request):
return Perfil.objects.get(usuario=request.user)
# def getStore(request):
# return Perfil.objects.get(usuario=request.user).tienda
def OKadmin(request):
if request.user.is_authenticated():
if "ADMIN" in Perfil.objects.get(usuario=request.user).puesto.nombre:
return True
return False
def OKbodega(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "BODEGA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKconta(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "CONTA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKmultitienda(request):
if request.user.is_authenticated():
return Perfil.objects.get(usuario=request.user).multitienda
return False
def OKcobros(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "COBROS" in ppl or "ADMIN" in ppl:
return True
return False
def OKventas(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "VENTA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKpeople(request):
if request.user.is_authenticated():
return True
return False
def sumar_DATO(request,numero):
val=Perfil.objects.get(usuario=request.user)
if numero=="4":
v = val.documento4.split("~")
val.documento4=v[0]+"~"+v[1]+"~"+str(int(v[2])+1)
val.save()
return v[0]+"~"+v[1]+"~"+str(int(v[2])+1)
def obtenerPlantilla(request):
if OKadmin(request):
return "admin.html"
elif OKconta(request):
return "conta.html"
elif OKbodega(request):
return "bodega.html"
elif OKcobros(request):
return "cobros.html"
else:
return "ventas.html"
|
[
"admin@corporacionrst.com"
] |
admin@corporacionrst.com
|
28baac5a621d65ae8bfeae46ed657209afc3d95a
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res/scripts/client/gui/shared/utils/requesters/tokenrequester.py
|
1ace65ad86b0304adeff25edcc9173651083c9f2
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 3,878
|
py
|
# 2015.11.10 21:29:45 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/utils/requesters/TokenRequester.py
import cPickle
from functools import partial
import BigWorld
from adisp import async
from constants import REQUEST_COOLDOWN, TOKEN_TYPE
from debug_utils import LOG_CURRENT_EXCEPTION
from TokenResponse import TokenResponse
from ids_generators import SequenceIDGenerator
def _getAccountRepository():
import Account
return Account.g_accountRepository
class TokenRequester(object):
__idsGen = SequenceIDGenerator()
def __init__(self, tokenType, wrapper = TokenResponse, cache = True):
super(TokenRequester, self).__init__()
if callable(wrapper):
self.__wrapper = wrapper
else:
raise ValueError, 'Wrapper is invalid: {0}'.format(wrapper)
self.__tokenType = tokenType
self.__callback = None
self.__lastResponse = None
self.__requestID = 0
self.__cache = cache
self.__timeoutCbID = None
return
def isInProcess(self):
return self.__callback is not None
def clear(self):
self.__callback = None
repository = _getAccountRepository()
if repository:
repository.onTokenReceived -= self.__onTokenReceived
self.__lastResponse = None
self.__requestID = 0
self.__clearTimeoutCb()
return
def getReqCoolDown(self):
return getattr(REQUEST_COOLDOWN, TOKEN_TYPE.COOLDOWNS[self.__tokenType], 10.0)
@async
def request(self, timeout = None, callback = None):
requester = getattr(BigWorld.player(), 'requestToken', None)
if not requester or not callable(requester):
if callback:
callback(None)
return
elif self.__cache and self.__lastResponse and self.__lastResponse.isValid():
if callback:
callback(self.__lastResponse)
return
else:
self.__callback = callback
self.__requestID = self.__idsGen.next()
if timeout:
self.__loadTimeout(self.__requestID, self.__tokenType, max(timeout, 0.0))
repository = _getAccountRepository()
if repository:
repository.onTokenReceived += self.__onTokenReceived
requester(self.__requestID, self.__tokenType)
return
def __onTokenReceived(self, requestID, tokenType, data):
if self.__requestID != requestID or tokenType != self.__tokenType:
return
else:
repository = _getAccountRepository()
if repository:
repository.onTokenReceived -= self.__onTokenReceived
try:
self.__lastResponse = self.__wrapper(**cPickle.loads(data))
except TypeError:
LOG_CURRENT_EXCEPTION()
self.__requestID = 0
if self.__callback is not None:
self.__callback(self.__lastResponse)
self.__callback = None
return
def __clearTimeoutCb(self):
if self.__timeoutCbID is not None:
BigWorld.cancelCallback(self.__timeoutCbID)
self.__timeoutCbID = None
return
def __loadTimeout(self, requestID, tokenType, timeout):
self.__clearTimeoutCb()
self.__timeoutCbID = BigWorld.callback(timeout, partial(self.__onTimeout, requestID, tokenType))
def __onTimeout(self, requestID, tokenType):
self.__clearTimeoutCb()
self.__onTokenReceived(requestID, tokenType, cPickle.dumps({'error': 'TIMEOUT'}, -1))
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\utils\requesters\tokenrequester.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:29:46 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
0b85630a9123b498e5f50e15d65fb027b4057127
|
1c6b5d41cc84c103ddb2db3689f61f47eaa2c13b
|
/CV_ToolBox-master/VOC_2_COCO/xml_helper.py
|
c97bb05d81b946aa96ae1e1ee0c4209f0f9cc9a7
|
[] |
no_license
|
Asher-1/DataAugmentation
|
e543a93912239939ccf77c98d9156c8ed15e1090
|
c9c143e7cccf771341d2f18aa11daf8b9f817670
|
refs/heads/main
| 2023-07-01T22:49:10.908175
| 2021-08-13T10:01:56
| 2021-08-13T10:01:56
| 395,602,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,300
|
py
|
# -*- coding=utf-8 -*-
import os
import xml.etree.ElementTree as ET
import xml.dom.minidom as DOC
# 从xml文件中提取bounding box信息, 格式为[[x_min, y_min, x_max, y_max, name]]
def parse_xml(xml_path):
'''
输入:
xml_path: xml的文件路径
输出:
从xml文件中提取bounding box信息, 格式为[[x_min, y_min, x_max, y_max, name]]
'''
tree = ET.parse(xml_path)
root = tree.getroot()
objs = root.findall('object')
coords = list()
for ix, obj in enumerate(objs):
name = obj.find('name').text
box = obj.find('bndbox')
x_min = int(box[0].text)
y_min = int(box[1].text)
x_max = int(box[2].text)
y_max = int(box[3].text)
coords.append([x_min, y_min, x_max, y_max, name])
return coords
# 将bounding box信息写入xml文件中, bouding box格式为[[x_min, y_min, x_max, y_max, name]]
def generate_xml(img_name, coords, img_size, out_root_path):
'''
输入:
img_name:图片名称,如a.jpg
coords:坐标list,格式为[[x_min, y_min, x_max, y_max, name]],name为概况的标注
img_size:图像的大小,格式为[h,w,c]
out_root_path: xml文件输出的根路径
'''
doc = DOC.Document() # 创建DOM文档对象
annotation = doc.createElement('annotation')
doc.appendChild(annotation)
title = doc.createElement('folder')
title_text = doc.createTextNode('Tianchi')
title.appendChild(title_text)
annotation.appendChild(title)
title = doc.createElement('filename')
title_text = doc.createTextNode(img_name)
title.appendChild(title_text)
annotation.appendChild(title)
source = doc.createElement('source')
annotation.appendChild(source)
title = doc.createElement('database')
title_text = doc.createTextNode('The Tianchi Database')
title.appendChild(title_text)
source.appendChild(title)
title = doc.createElement('annotation')
title_text = doc.createTextNode('Tianchi')
title.appendChild(title_text)
source.appendChild(title)
size = doc.createElement('size')
annotation.appendChild(size)
title = doc.createElement('width')
title_text = doc.createTextNode(str(img_size[1]))
title.appendChild(title_text)
size.appendChild(title)
title = doc.createElement('height')
title_text = doc.createTextNode(str(img_size[0]))
title.appendChild(title_text)
size.appendChild(title)
title = doc.createElement('depth')
title_text = doc.createTextNode(str(img_size[2]))
title.appendChild(title_text)
size.appendChild(title)
for coord in coords:
object = doc.createElement('object')
annotation.appendChild(object)
title = doc.createElement('name')
title_text = doc.createTextNode(coord[4])
title.appendChild(title_text)
object.appendChild(title)
pose = doc.createElement('pose')
pose.appendChild(doc.createTextNode('Unspecified'))
object.appendChild(pose)
truncated = doc.createElement('truncated')
truncated.appendChild(doc.createTextNode('1'))
object.appendChild(truncated)
difficult = doc.createElement('difficult')
difficult.appendChild(doc.createTextNode('0'))
object.appendChild(difficult)
bndbox = doc.createElement('bndbox')
object.appendChild(bndbox)
title = doc.createElement('xmin')
title_text = doc.createTextNode(str(int(float(coord[0]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('ymin')
title_text = doc.createTextNode(str(int(float(coord[1]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('xmax')
title_text = doc.createTextNode(str(int(float(coord[2]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('ymax')
title_text = doc.createTextNode(str(int(float(coord[3]))))
title.appendChild(title_text)
bndbox.appendChild(title)
# 将DOM对象doc写入文件
f = open(os.path.jpin(out_root_path, img_name[:-4] + '.xml'), 'w')
f.write(doc.toprettyxml(indent=''))
f.close()
|
[
"ludahai19@163.com"
] |
ludahai19@163.com
|
a504526e7afcb6817c2878fa279d32e1dfc65ac6
|
72f5adc4b6f79dd40e975c86abcdbd3d0ccada86
|
/venv/bin/pip3.7
|
3786525abb997c921a0c0979436550edefdc7960
|
[] |
no_license
|
katrek/flask_vacancy_parser
|
77101604ec5bfeb47c009b9d8329b42d9d30bf4a
|
bbea4ae860bb78f7264b05e92c6664f8e4c4b3cf
|
refs/heads/master
| 2023-01-11T11:58:09.275448
| 2019-08-29T06:36:53
| 2019-08-29T06:36:53
| 204,666,913
| 1
| 1
| null | 2023-01-03T12:19:03
| 2019-08-27T09:22:35
|
Python
|
UTF-8
|
Python
| false
| false
| 420
|
7
|
#!/Users/artemtkachev/PycharmProjects/flask_parser2/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"akatrek@gmail.com"
] |
akatrek@gmail.com
|
48dd32f18373f2b389e32630ded0044734fd4b19
|
4d44674625100e62be2bb5033339fb641bd454ac
|
/snippet/example/python/project/project/db/sqlalchemy/models.py
|
782d92b417a09747274a173923da7001f80a4da4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
xgfone/snippet
|
8b9004a649d2575b493a376c4b4f3d4a7c56a4b0
|
b0b734dd35478b7ef3e6193623981f4f29b6748c
|
refs/heads/master
| 2022-03-18T12:41:09.033144
| 2022-02-20T15:26:35
| 2022-02-20T15:26:35
| 41,615,643
| 158
| 61
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
# coding: utf-8
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
from sqlalchemy.ext.declarative import declarative_base
from oslo_db.sqlalchemy import models
from sqlalchemy import create_engine
from sqlalchemy import Column, String, Integer, DateTime
from sqlalchemy.sql import fun
LOG = logging.getLogger(__name__)
BASE = declarative_base()
class TestData(models.ModelBase, BASE):
__tablename__ = 'test_data'
id = Column(Integer, primary_key=True, autoincrement=True)
data = Column(String(256), nullable=False)
create_time = Column(DateTime, server_default=func.now(), nullable=False)
def __init__(self, *args, **kwargs):
super(TestData, self).__init__()
for k, v in kwargs.items():
setattr(self, k, v)
def create_tables(engine=None):
if not engine:
try:
import sys
engine = sys.argv[1]
except IndexError:
engine = "sqlite:///:memory:"
engine = create_engine(engine, echo=True)
BASE.metadata.create_all(engine)
if __name__ == '__main__':
create_tables("sqlite:///:memory:")
|
[
"xgfone@126.com"
] |
xgfone@126.com
|
a1f9f2880c5805d0642099f67fac1e61760b9185
|
c342d39a064441d7c83b94e896dfbac1dc155666
|
/setup.py
|
cc22030282c6d003af194c2c298389e898f5d44d
|
[
"MIT"
] |
permissive
|
arsho/generator
|
a67d876bf9dded9bacdbd50a9ab3999f90c81731
|
5dc346850ec99a47ca7c074e3e5dec0b5fff30e2
|
refs/heads/master
| 2021-01-01T16:54:41.955771
| 2017-07-21T14:37:34
| 2017-07-21T14:37:34
| 97,951,569
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
# -*- coding: utf-8 -*-
from setuptools import setup
def readme():
with open('README.rst', encoding='utf8') as f:
return f.read()
setup(name='generator',
version='0.0.1',
description='Generator is a package for generating strong password and check strength of user defined password.',
long_description=readme(),
install_requires=[],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords='password generator strength pass',
url='http://github.com/arsho/generator',
author='Ahmedur Rahman Shovon',
author_email='shovon.sylhet@gmail.com',
license='MIT',
packages=['generator'],
include_package_data=True,
zip_safe=False
)
|
[
"shovon.sylhet@gmail.com"
] |
shovon.sylhet@gmail.com
|
8f09ee1c175eaa67db58c061ed1f27c69414af94
|
20ade86a0c0f0ca6be3fae251488f985c2a26241
|
/exp/analyze_5.py
|
d038d5fa9c073324d036a898b7df5cf86f573c6a
|
[] |
no_license
|
adysonmaia/phd-sp-static
|
69344fdd4edb4c216e4b88b0193308b33a30e72c
|
79038d165c19f90e1f54597f7049553720f34c74
|
refs/heads/master
| 2023-04-14T15:59:07.414873
| 2019-10-24T07:56:37
| 2019-10-24T07:56:37
| 355,110,847
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,099
|
py
|
import csv
import numpy as np
import scipy.stats as st
import matplotlib
import matplotlib.pyplot as plt
DPI = 100
Y_PARAM = {
'max_dv': {
'label': 'Deadline Violation - ms',
'limit': [0.0, 10.0]
},
'dsr': {
'label': 'Deadline Satisfaction - %',
'limit': [40.0, 100.0]
},
'avg_rt': {
'label': 'Response Time - ms',
'limit': [0.0, 18.0]
},
'cost': {
'label': 'Cost',
'limit': [1000.0, 1500.0]
},
'max_unavail': {
'label': 'Availability - %',
'limit': [70.0, 100.0]
},
'avg_unavail': {
'label': 'Unavailability - %',
'limit': [0.0, 10.0]
},
'avg_avail': {
'label': 'Availability - %',
'limit': [0.0, 100.0]
},
'time': {
'label': 'Execution Time - s',
'limit': [0.0, 300.0]
},
}
X_PARAM = {
'probability': {
'label': 'Elite Probability',
'limit': [10, 90],
},
'stop_threshold': {
'label': 'Stop Threshold',
'limit': [0, 1],
}
}
def get_data_from_file(filename):
results = []
with open(filename) as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count > 0:
results.append(row)
line_count += 1
return results
def filter_data(data, **kwargs):
def to_string_values(values):
str_values = []
if not isinstance(values, list):
values = [values]
for value in values:
str_values.append(str(value))
return str_values
def in_filter(row):
for key, value in row.items():
if key in f_values and value not in f_values[key]:
return False
return True
f_values = {k: to_string_values(v) for k, v in kwargs.items()}
return list(filter(lambda row: in_filter(row), data))
def format_metric(value, metric):
value = float(value)
if metric == 'max_unavail':
value = 100.0 * (1.0 - value)
elif metric == 'avg_unavail':
value = 100.0 * value
elif metric == 'avg_avail':
value = 100.0 * value
elif metric == 'dsr':
value = 100.0 * value
return value
def format_field(value, field):
value = float(value)
if field == 'stop_threshold':
value = round(value, 2)
return value
def calc_stats(values):
nb_runs = len(values)
mean = np.mean(values)
sem = st.sem(values)
if sem > 0.0:
# Calc confidence interval, return [mean - e, mean + e]
error = st.t.interval(0.95, nb_runs - 1, loc=mean, scale=sem)
error = error[1] - mean
else:
error = 0.0
return mean, error
def gen_figure(data, metric, x, x_field, data_filter, filename=None):
plt.clf()
matplotlib.rcParams.update({'font.size': 20})
filtered = filter_data(data, **data_filter)
y = []
y_errors = []
for x_value in x:
x_filter = {x_field: x_value}
x_data = filter_data(filtered, **x_filter)
values = list(map(lambda r: format_metric(r[metric], metric), x_data))
mean, error = calc_stats(values)
y.append(mean)
y_errors.append(error)
print("{} x={:.1f}, y={:.1f}".format(metric, x_value, mean))
x = [format_field(i, x_field) for i in x]
plt.errorbar(x, y, yerr=y_errors, markersize=10, fmt='-o')
plt.subplots_adjust(bottom=0.2, top=0.97, left=0.12, right=0.96)
x_param = X_PARAM[x_field]
y_param = Y_PARAM[metric]
plt.xlabel(x_param['label'])
plt.ylabel(y_param['label'])
plt.ylim(*y_param['limit'])
# plt.xlim(*x_param['limit'])
plt.xticks(x)
plt.grid(True)
if not filename:
plt.show()
else:
plt.savefig(filename, dpi=DPI, bbox_inches='tight', pad_inches=0.05)
def run():
data = get_data_from_file('exp/output/exp_5.csv')
all_solutions = [
('moga', 'preferred'),
]
metric_solutions = {
'max_dv': all_solutions,
# 'dsr': all_solutions,
# 'avg_rt': all_solutions,
'cost': all_solutions,
'avg_unavail': all_solutions,
'time': all_solutions
}
params = [
{
'title': 'st',
'filter': {},
'x_field': 'stop_threshold',
'x_values': np.arange(0.0, 0.6, 0.1)
},
]
for param in params:
for metric, solutions in metric_solutions.items():
for solution, sol_version in solutions:
fig_title = param['title']
filter = param['filter']
filter['solution'] = solution
filter['version'] = sol_version
x = param['x_values']
x_field = param['x_field']
filename = "exp/figs/exp_5/fig_{}_{}_{}_{}.png".format(
fig_title, metric, solution, sol_version
)
gen_figure(data, metric, x, x_field, filter, filename)
if __name__ == '__main__':
print("Execute as 'python3 analyze.py exp_5'")
|
[
"adyson.maia@gmail.com"
] |
adyson.maia@gmail.com
|
17d235e0928561692d73711efe48d58fd5d371fa
|
06aa3ec3262f6dd6866ea194ed6385f8e53509bf
|
/manuscript_codes/AML211DiffALL/remove_nonexistent_fromAnnotatedcsv.py
|
409adfbaa7c37d20329ae26f43f38331d13472ce
|
[] |
no_license
|
KuehLabUW/UPSIDE
|
95ce078382792d1beb0574c3b19c04e467befa58
|
3c90de9677f24e258800cb95bce6cb528f4ad4ac
|
refs/heads/master
| 2023-07-13T15:58:07.963672
| 2021-08-30T21:14:48
| 2021-08-30T21:14:48
| 329,134,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 16:21:25 2019
this script concatenates fluorescent data and subim data from separate csv files
and return a merged csv file for all positions
@author: phnguyen
"""
import pandas as pd
import os
csvs_dirname = '/media/phnguyen/Data2/Imaging/CellMorph/data/AML211DiffALL/csvs/'
os.chdir(csvs_dirname)
filename = 'AML211DiffALL_LargeMask_Annotated.csv'
df = pd.read_csv(filename)
print(len(df))
pos = [];
for i in range(len(df.index)):
if os.path.isfile(df.dirname[i]) == False:
pos.append(i)
print(i)
df.drop(df.index[pos], inplace=True)
#save the combined dataframe
df.to_csv(csvs_dirname+'AML211DiffALL_LargeMask_Annotated_trimmed.csv', sep=',')
|
[
"kuehlab@uw.edu"
] |
kuehlab@uw.edu
|
b39f7d7bc5979960cc3a326e3a5e41d319fc3636
|
16c5a7c5f45a6faa5f66f71e043ce8999cb85d80
|
/app/honor/student/listen_everyday/object_page/history_page.py
|
014714a71b529b852af33e51e693c88f7b3b6757
|
[] |
no_license
|
vectorhuztt/test_android_copy
|
ca497301b27f49b2aa18870cfb0fd8b4640973e5
|
f70ab6b1bc2f69d40299760f91870b61e012992e
|
refs/heads/master
| 2021-04-03T19:26:48.009105
| 2020-06-05T01:29:51
| 2020-06-05T01:29:51
| 248,389,861
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,494
|
py
|
# coding: utf-8
# -------------------------------------------
# Author: Vector
# Date: 2018/12/17 16:11
# -------------------------------------------
from selenium.webdriver.common.by import By
from app.honor.student.login.object_page.home_page import HomePage
from conf.base_page import BasePage
from conf.decorator import teststep
from utils.wait_element import WaitElement
class HistoryPage(BasePage):
wait = WaitElement()
home = HomePage()
@teststep
def wait_check_history_page(self):
locator = (By.XPATH, "//android.widget.TextView[@text='历史推荐']")
return self.wait.wait_check_element(locator)
@teststep
def wait_check_clear_button_page(self):
locator = (By.ID, self.id_type() + 'clear')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_red_hint_page(self):
locator = (By.ID, self.id_type() + 'tv_hint')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_img_page(self):
locator = (By.ID, self.id_type() + 'img')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_tips_page(self):
locator = (By.ID, self.id_type() + 'md_content')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def game_name(self):
locator = (By.ID, self.id_type() + 'game_name')
return self.wait.wait_find_elements(locator)
@teststep
def right_rate(self, game_name):
locator = (By.XPATH, '//android.widget.TextView[contains(@text,"{0}")]/../following-sibling::android.widget.'
'TextView[contains(@resource-id, "{1}right_rate")]'.format(game_name, self.id_type()))
return self.wait.wait_find_element(locator)
@teststep
def game_date(self, game_name):
locator = (By.XPATH, '//android.widget.TextView[contains(@text,"{0}")]/../following-sibling::'
'android.widget.TextView[contains(@resource-id,"time")]'.format(game_name))
return self.wait.wait_find_element(locator)
@teststep
def tips_operate_commit(self):
"""温馨提示 页面信息 -- 确定"""
if self.wait_check_tips_page(): # 温馨提示 页面
self.home.tips_content()
self.home.commit_button() # 确定按钮
@teststep
def history_page_operate(self):
print('听力历史处理页面')
game_names = self.game_name()
game_num = len(game_names) if len(game_names) < 10 else len(game_names) - 1
print('游戏个数:', game_num)
for i in range(game_num):
if self.wait_check_history_page():
name = game_names[i].text
right_rate = self.right_rate(name).text
game_date = self.game_date(name).text
print(name)
print(right_rate)
print(game_date)
if i == 3 or i == 5 or i == 7:
if name == '听音连句':
game_names[i].click()
if not self.wait_check_clear_button_page():
self.base_assert.except_error('Error-- 未发现听音连句的清除按钮')
else:
print('进入听音连句游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
if name == '听后选择':
game_names[i].click()
if not self.wait_check_red_hint_page():
self.base_assert.except_error('Error-- 未发现听后选择的红色提示')
else:
print('进入听后选择游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
if name == '听音选图':
game_names[i].click()
if not self.wait_check_img_page():
self.base_assert.except_error('Error-- 未发现听音选图的图片')
else:
print('进入听音选图游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
print('-'*30, '\n')
self.home.click_back_up_button()
|
[
"vectorztt@163.com"
] |
vectorztt@163.com
|
a9f60f3ed1fe3f516a90a7101d86cf5d08986545
|
3b80ec0a14124c4e9a53985d1fa0099f7fd8ad72
|
/realestate/urls.py
|
11e290ebf235d7ae4d3ce6986f61c81f4176ded0
|
[] |
no_license
|
aayushgupta97/RealEstate_Django_TTN
|
ec4dde7aa3a1bcfa4d88adb5ea7ebb20127e7489
|
9af7c26c85c46ac5b0e3b3fad4a7b1067df20c47
|
refs/heads/master
| 2020-05-04T08:09:03.917026
| 2019-04-18T08:30:05
| 2019-04-18T08:30:05
| 179,041,202
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from pages import views as page_views
urlpatterns = [
path('properties/', include('properties.urls')),
path('', include('pages.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('contacts/', include('contacts.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
handler404 = page_views.handler404
# handler500 = page_views.handler500
|
[
"aayushgupta2097@gmail.com"
] |
aayushgupta2097@gmail.com
|
fc2f5b4eaf1d9c7e2539b1ef43e5b12ba9fbe924
|
38fecea29fa82eb203fd964acd54ffacc7e4c388
|
/chapter03/page048_colored_grid.py
|
9a62621c8c535c213b8b8c6e2da4ef4c1286ade9
|
[] |
no_license
|
mjgpy3/mfp-python3-examples
|
3c74f09c6155e9fbf35bd8ec104bdfe4429b9f4b
|
09547141d25859fe93a6a0e70c828877ee93f736
|
refs/heads/master
| 2020-12-03T18:38:30.411800
| 2020-01-18T20:42:20
| 2020-01-18T20:42:20
| 231,431,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#!/usr/bin/env python3
from page040_grid import Grid
class ColoredGrid(Grid):
# Cannot do specific property setters in Python
def set_distances(self, distances):
self.distances = distances
farthest, self.maximum = distances.max()
def background_color_for(self, cell):
distance = self.distances[cell]
if not distance:
return (255, 255, 255)
intensity = float(self.maximum - distance) / self.maximum
dark = round(255 * intensity)
bright = 128 + round(127 * intensity)
return (dark, bright, dark)
|
[
"mjg.py3@gmail.com"
] |
mjg.py3@gmail.com
|
de0beb1610545ee78ac1dcc707d7fc40e2c1a0fb
|
748bbab674d1a5ae6a59bfd4ac22efcb4355e82a
|
/Prog-II/Back_Front/back/modelo.py
|
500e048c4dda6a3d2bb759c389dc9ab5b947b11b
|
[] |
no_license
|
Lima001/Tecnico-IFC
|
8819114a35080eb914a2d836a0accbf79d3268d8
|
771fa39dd6046a9d92860fbde70c10dcecd975a3
|
refs/heads/master
| 2021-02-07T01:31:04.929420
| 2020-06-24T02:09:38
| 2020-06-24T02:09:38
| 243,967,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from peewee import *
arq = "dados.db"
db = SqliteDatabase(arq)
class BaseModel(Model):
class Meta:
database = db
class Cachorro(BaseModel):
nome = CharField()
idade = IntegerField()
raca = CharField()
if __name__ == "__main__":
db.connect()
db.create_tables([Cachorro])
dog1 = Cachorro.create(nome="Rex",idade=1,raca="Pastor Alemao")
print(dog1.nome + "|" + str(dog1.idade) + "|" + dog1.raca)
|
[
"limaedugabriel@gmail.com"
] |
limaedugabriel@gmail.com
|
0dabd218576ed96dbe4a021fce762f03727b90ae
|
b4948c322401435a02370dd96708399fda4a48fc
|
/demo/simple_code/test_pass.py
|
453fa756db68194cdd14c29692c9fa5fb24807be
|
[] |
no_license
|
fengbingchun/Python_Test
|
413e2c9bb844a5b3641e6e6daf37df277589006e
|
eaedcd55dbc156b685fa891538e1120ea68fa343
|
refs/heads/master
| 2023-06-21T02:28:07.310364
| 2023-06-11T04:46:29
| 2023-06-11T04:46:29
| 99,814,791
| 7
| 6
| null | 2022-09-30T00:38:06
| 2017-08-09T14:01:48
|
C++
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
# Blog: https://blog.csdn.net/fengbingchun/article/details/125242357
# 1. empty function
def func():
pass # remember to implement this
func()
# 2. empty class
class fbc:
pass
fbc()
# 3. loop
num = 5
for i in range(num):
pass
# 4. conditional statement
a = 5
b = 10
if (a < b):
pass
else:
print("b<=a")
for letter in "Python3":
if letter == "h":
pass
else:
print("", letter, end="")
print("\ntest finish")
|
[
"fengbingchun@163.com"
] |
fengbingchun@163.com
|
fa791cbb653d2472098d0a3b028680e2bc9b6414
|
61432a6d3b25e5b3142fe1f154acf5764bc2d596
|
/custom_report/controllers/controllers.py
|
0d654793e8486cc3dde196ee71832650723dcae7
|
[] |
no_license
|
halltech-ci/tfc_agro
|
8c2c7911901e8c7bcf548fb05ca8f7891ab4ef51
|
a737dfdccfca51136cb01894a00f21f5365a771a
|
refs/heads/master_1
| 2020-12-22T08:59:40.507801
| 2020-08-17T21:20:18
| 2020-08-17T21:20:18
| 236,734,216
| 0
| 3
| null | 2020-05-09T23:19:24
| 2020-01-28T12:50:00
|
Python
|
UTF-8
|
Python
| false
| false
| 788
|
py
|
# -*- coding: utf-8 -*-
from odoo import http
# class CustomReport(http.Controller):
# @http.route('/custom_report/custom_report/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/custom_report/custom_report/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('custom_report.listing', {
# 'root': '/custom_report/custom_report',
# 'objects': http.request.env['custom_report.custom_report'].search([]),
# })
# @http.route('/custom_report/custom_report/objects/<model("custom_report.custom_report"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('custom_report.object', {
# 'object': obj
# })
|
[
"maurice.atche@halltech-africa.com"
] |
maurice.atche@halltech-africa.com
|
a58b76fab4d8fa60abf11ac71cab242a7beccad6
|
c5a1c95e9d8ce937f71caf8340cf11fe98e64f56
|
/day9/problem5/[이재형] 하샤드 수.py
|
ff36e229a9d9bb46a7cac28263c8e782cc36fcf6
|
[] |
no_license
|
Boot-Camp-Coding-Test/Programmers
|
963e5ceeaa331d99fbc7465f7b129bd68e96eae3
|
83a4b62ba2268a47859a6ce88ae1819bc96dcd85
|
refs/heads/main
| 2023-05-23T08:21:57.398594
| 2021-06-12T16:39:21
| 2021-06-12T16:39:21
| 366,589,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
def solution(x):
a = []
for i in range(len(str(x))):
a.append(int(str(x)[i]))
if x % sum(a) == 0:
return True
else :
return False
|
[
"noreply@github.com"
] |
Boot-Camp-Coding-Test.noreply@github.com
|
1e1a220013ea65a97547f55b52bf0e6e8ba7ee32
|
4b742f57981b3db902e7048fe05faf263ff52138
|
/base/migrations/0010_pgpkey_passphrase.py
|
174c1f9c2f96097e66f55808d6348a2d55d10933
|
[
"MIT"
] |
permissive
|
erichuang2015/Hiren-MailBox
|
eace0c90b5815f3e4a660dfda75910256704db96
|
ff4cad0998007e8c9a2a200af3a2e05a3d947d12
|
refs/heads/master
| 2020-04-02T01:31:55.680288
| 2018-09-13T15:21:46
| 2018-09-13T15:21:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# Generated by Django 2.0.4 on 2018-05-22 04:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0009_auto_20180504_0501'),
]
operations = [
migrations.AddField(
model_name='pgpkey',
name='passphrase',
field=models.TextField(default=''),
preserve_default=False,
),
]
|
[
"git.pyprism@gmail.com"
] |
git.pyprism@gmail.com
|
f8b32217c9daae58faab52a87b96758125de8793
|
4fe52c6f01afb05ac787a361a239466ceac69964
|
/pyjournal2/build_util.py
|
9acc2f6977346f32e542ec3806689de1074d6201
|
[
"BSD-3-Clause"
] |
permissive
|
cmsquared/pyjournal2
|
85beec6e3a0423d0ee873d189c3a879dd9a7db7c
|
cfa67529033c5fd7bcd5c60b87c8122ef8c22425
|
refs/heads/master
| 2020-04-03T18:30:15.119923
| 2018-10-31T00:41:07
| 2018-10-31T00:41:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,934
|
py
|
"""This module controls building the journal from the entry sources"""
import os
import webbrowser
import pyjournal2.shell_util as shell_util
def get_source_dir(defs):
"""return the directory where we put the sources"""
return "{}/journal-{}/source/".format(defs["working_path"], defs["nickname"])
def get_topics(defs):
"""return a list of the currently known topics"""
source_dir = get_source_dir(defs)
topics = []
# get the list of directories in source/ -- these are the topics
for d in os.listdir(source_dir):
if os.path.isdir(os.path.join(source_dir, d)) and not d.startswith("_"):
topics.append(d)
return topics
def create_topic(topic, defs):
"""create a new topic directory"""
source_dir = get_source_dir(defs)
try:
os.mkdir(os.path.join(source_dir, topic))
except:
sys.error("unable to create a new topic")
def build(defs, show=0):
"""build the journal. This entails writing the TOC files that link to
the individual entries and then running the Sphinx make command
"""
source_dir = get_source_dir(defs)
topics = get_topics(defs)
# for each topic, we want to create a "topic.rst" that then has
# things subdivided by year-month, and that a
# "topic-year-month.rst" that includes the individual entries
for topic in topics:
tdir = os.path.join(source_dir, topic)
os.chdir(tdir)
# look over the directories here, they will be in the form YYYY-MM-DD
years = []
entries = []
for d in os.listdir(tdir):
if os.path.isdir(os.path.join(tdir, d)):
y, _, _ = d.split("-")
if y not in years:
years.append(y)
entries.append(d)
years.sort()
entries.sort()
# we need to create ReST files of the form YYYY.rst. These
# will each then contain the links to the entries for that
# year
for y in years:
y_entries = [q for q in entries if q.startswith(y)]
with open("{}.rst".format(y), "w") as yf:
yf.write("****\n")
yf.write("{}\n".format(y))
yf.write("****\n\n")
yf.write(".. toctree::\n")
yf.write(" :maxdepth: 2\n")
yf.write(" :caption: Contents:\n\n")
for entry in y_entries:
yf.write(" {}/{}.rst\n".format(entry, entry))
# now write the topic.rst
with open("{}.rst".format(topic), "w") as tf:
tf.write(len(topic)*"*" + "\n")
tf.write("{}\n".format(topic))
tf.write(len(topic)*"*" + "\n")
tf.write(".. toctree::\n")
tf.write(" :maxdepth: 2\n")
tf.write(" :caption: Contents:\n\n")
for y in years:
tf.write(" {}.rst\n".format(y))
# now write the index.rst
os.chdir(source_dir)
with open("index.rst", "w") as mf:
mf.write("Research Journal\n")
mf.write("================\n\n")
mf.write(".. toctree::\n")
mf.write(" :maxdepth: 2\n")
mf.write(" :caption: Contents:\n\n")
for topic in sorted(topics):
mf.write(" {}/{}\n".format(topic, topic))
mf.write("\n")
mf.write("Indices and tables\n")
mf.write("==================\n\n")
mf.write("* :ref:`genindex`\n")
mf.write("* :ref:`modindex`\n")
mf.write("* :ref:`search`\n")
# now do the building
build_dir = "{}/journal-{}/".format(defs["working_path"], defs["nickname"])
os.chdir(build_dir)
_, _, rc = shell_util.run("make html")
if rc != 0:
print("build may have been unsuccessful")
index = os.path.join(build_dir, "build/html/index.html")
# use webbrowser module
if show == 1:
webbrowser.open_new_tab(index)
|
[
"michael.zingale@stonybrook.edu"
] |
michael.zingale@stonybrook.edu
|
30a77a5b2a326c40c06e455066908091bac0870a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_113/ch44_2020_09_30_10_47_17_987015.py
|
f078ba9a9524ee5b166c21af69a4c0e35a23748f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#x=True
lista = ['Janeiro','Fevereiro','Março','Abril','Maio','Junho','Julho','Agosto','Setembro','Outubro','Novembro','Dezembro']
#while x==True:
mes = input('Qual o mês? ')
print(lista[mes])
|
[
"you@example.com"
] |
you@example.com
|
224efd07081d700cef2f4bff2f9f658dcccc15e2
|
256efb0e9ff8b7420b412c260e6c05cd7c52c5ce
|
/B/resolve.py
|
5e0f2bb0fd1bde1c3ffc1f155dfc45171749a311
|
[
"MIT"
] |
permissive
|
staguchi0703/ABC176
|
37a85f6d83570967696712a98dd39e1f1a08b04b
|
16f2f188ef5c73f85d08b028f14cd963b33d55af
|
refs/heads/master
| 2022-12-07T18:15:02.659948
| 2020-08-24T15:00:29
| 2020-08-24T15:00:29
| 289,476,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
def resolve():
'''
code here
'''
N = input()
sum_num = 0
for item in N:
sum_num += int(item)
if sum_num % 9 == 0:
print('Yes')
else:
print('No')
|
[
"s.taguchi0703@gmail.com"
] |
s.taguchi0703@gmail.com
|
611a492f714cd96b2ba9c94b3644617e50c8c6ce
|
86294539ffa65b34a862b200c84ee068187dc743
|
/do2things/manage.py
|
78b2063220ba03afa6e0bd0a501b0280f45ed107
|
[
"MIT"
] |
permissive
|
tlake/do2things
|
6acb4f43990b0d0e4a9b80090e21246c1d39398a
|
4e83bea1fc579006200e9ca3a627c1bc04a6a53b
|
refs/heads/master
| 2021-01-21T04:24:57.108087
| 2016-08-22T08:56:11
| 2016-08-22T08:56:11
| 39,576,039
| 0
| 0
| null | 2015-08-27T01:28:15
| 2015-07-23T15:40:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "do2things.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"tanner.lake@gmail.com"
] |
tanner.lake@gmail.com
|
3d2281ceea099e3636a2d5593f4e69d3ab66ddbf
|
c7846ee0828539c2a2019928c1cbf3abd35665bf
|
/1226.py
|
e40445bed21211b32f058a29fb64d1cef368c25a
|
[] |
no_license
|
whiteblue0/sw_problems
|
10476601c8d6d68d42e2f30af87fcde1e5dbbcc5
|
1cefc6236cccc20477bf4eadb458a0fd06b09126
|
refs/heads/master
| 2020-06-20T10:44:57.463275
| 2020-05-03T07:27:57
| 2020-05-03T07:27:57
| 197,098,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
import sys
sys.stdin = open('1226.txt')
def ispass(y,x):
if 0<=y<L and 0<=x<L and data[y][x] != 1 and visited[y][x] == 0:
return True
else:
return False
def DFS(sy,sx):
global end
visited[sy][sx] = 1
if data[sy][sx] == 3:
end = 1
for i in range(4):
ny = sy + dy[i]
nx = sx + dx[i]
if ispass(ny, nx):
visited[ny][nx] = 1
DFS(ny,nx)
# 우하좌상
dy = [0,1,0,-1]
dx = [1,0,-1,0]
T = 10
for tc in range(1,T+1):
N = int(input())
L = 16
data = [list(map(int, input())) for _ in range(L)]
visited = [[0]*L for _ in range(L)]
for i in range(L):
for j in range(L):
if data[i][j] == 2:
start = (i,j)
end = 0
DFS(start[0],start[1])
# for i in range(L):
# print(visited[i])
print('#{} {}'.format(tc,end))
|
[
"21port@naver.com"
] |
21port@naver.com
|
0296d247cff0d46ffe781196db159f2dc53ad9a7
|
0dc3e9b70da8ccd056e0a0fab2b1d8f850c3d470
|
/lantern/django/django_celery/src/apps/dealers/models.py
|
bc779127742bd20a2e9942ebdd4779103f3156e4
|
[] |
no_license
|
ArturYefriemov/green_lantern
|
28e7150af7b9d2281a107ad80026828ad77af62a
|
2841b647e1bfae4a7505e91e8a8695d03f35a3a2
|
refs/heads/master
| 2021-03-01T16:54:58.881835
| 2020-11-17T19:42:23
| 2020-11-17T19:42:23
| 245,799,969
| 0
| 0
| null | 2020-07-14T18:51:13
| 2020-03-08T11:13:32
|
Python
|
UTF-8
|
Python
| false
| false
| 735
|
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class Country(models.Model):
name = models.CharField(max_length=32, unique=True)
class City(models.Model):
name = models.CharField(max_length=32, db_index=True)
country = models.ForeignKey(to='Country', on_delete=models.CASCADE, null=True)
class Address(models.Model):
address1 = models.CharField(max_length=128)
address2 = models.CharField(max_length=128, blank=True)
zip_code = models.PositiveSmallIntegerField()
city = models.ForeignKey(to='City', on_delete=models.CASCADE)
class Dealer(AbstractUser):
address = models.ForeignKey(to='Address', on_delete=models.CASCADE, null=True)
|
[
"odarchenko@ex.ua"
] |
odarchenko@ex.ua
|
0f12e75f326736ce1da7a7a6b1fb5297088bafd5
|
5bfbf31332a5c4750ab57d305f400aa5e20bf6bd
|
/contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_utah_zip.py
|
5c11f0c756f7c389107ebd4a9b7e6f5e7f2270a7
|
[
"Apache-2.0"
] |
permissive
|
alexsherstinsky/great_expectations
|
9d4ae4c06546c5ab2ee0d04fb7840e3515c25677
|
2fc4bb36a5b3791c8ada97c5364531cd7510d4ed
|
refs/heads/develop
| 2023-08-04T13:13:38.978967
| 2023-07-24T18:29:46
| 2023-07-24T18:29:46
| 203,888,556
| 1
| 0
|
Apache-2.0
| 2020-07-27T09:12:21
| 2019-08-22T23:31:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,481
|
py
|
from typing import Optional
import zipcodes
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_utah_zip(zip: str):
list_of_dicts_of_utah_zips = zipcodes.filter_by(state="UT")
list_of_utah_zips = [d["zip_code"] for d in list_of_dicts_of_utah_zips]
if len(zip) > 10:
return False
elif type(zip) != str:
return False
elif zip in list_of_utah_zips:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidUtahZip(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_utah_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_utah_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidUtahZip(ColumnMapExpectation):
"""Expect values in this column to be valid Utah zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_utah_zip": ["84001", "84320", "84713", "84791"],
"invalid_utah_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_utah_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_utah_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_utah_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidUtahZip().print_diagnostic_checklist()
|
[
"noreply@github.com"
] |
alexsherstinsky.noreply@github.com
|
ec61f2c11c142888f2e43279e15779776f084d75
|
b75b3bb6a2c6dd8b4a5b89718eb83d6451000cd4
|
/hackbright.py
|
715553d9b927069928d6bfc85808ce5824d2e0b2
|
[] |
no_license
|
CodeHotPink/project-tracking-flask
|
22efebeaddf83d2746ba9137f1b478da8c34b1a9
|
bdd58b17034406f28d5ceaa0c834eb0d6ad06be3
|
refs/heads/master
| 2020-04-03T18:46:04.010020
| 2018-10-31T04:02:38
| 2018-10-31T04:02:38
| 155,496,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,034
|
py
|
"""Hackbright Project Tracker.
A front-end for a database that allows users to work with students, class
projects, and the grades students receive in class projects.
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy()
def connect_to_db(app):
"""Connect the database to our Flask app."""
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///hackbright'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
def get_student_by_github(github):
"""Given a GitHub account name, print info about the matching student."""
QUERY = """
SELECT first_name, last_name, github
FROM students
WHERE github = :github
"""
db_cursor = db.session.execute(QUERY, {'github': github})
row = db_cursor.fetchone()
print(row)
print("Student: {} {}\nGitHub account: {}".format(row[0], row[1], row[2]))
return row
def make_new_student(first_name, last_name, github):
"""Add a new student and print confirmation.
Given a first name, last name, and GitHub account, add student to the
database and print a confirmation message.
"""
QUERY = """
INSERT INTO students (first_name, last_name, github)
VALUES (:first_name, :last_name, :github)
"""
db.session.execute(QUERY, {'first_name': first_name,
'last_name': last_name,
'github': github,})
db.session.commit()
print(f"Successully added student: {first_name} {last_name}")
def get_project_by_title(title):
"""Given a project title, print information about the project."""
QUERY = """
SELECT title, description, max_grade
FROM projects
WHERE title = :title
"""
db_cursor = db.session.execute(QUERY, {'title': title})
row = db_cursor.fetchone()
print(f"Title: {row[0]}\nDescription: {row[1]}\nMaximum Grade: {row[2]}")
def get_grade_by_github_title(github, title):
"""Print grade student received for a project."""
QUERY = """
SELECT student_github, project_title, grade
FROM grades
WHERE student_github = :github AND project_title = :title
"""
db_cursor = db.session.execute(QUERY, {'github': github,
'title': title})
row = db_cursor.fetchone()
print(f"Github: {row[0]}\nProject Title: {row[1]}\nGrade: {row[2]}")
def assign_grade(github, title, grade):
"""Assign a student a grade on an assignment and print a confirmation."""
QUERY = """
INSERT INTO grades (student_github, project_title, grade)
VALUES (:github, :title, :grade)
"""
db.session.execute(QUERY,{'github': github,
'title': title,
'grade': grade})
db.session.commit()
print(f"Successfully added {github}'s grade for {title}")
def add_project(title, description, max_grade):
"""Creates new project in projects table in Hackbright database. Will print confirmation."""
QUERY = """
INSERT INTO projects (title, description, max_grade)
VALUES (:title,:description, :max_grade)
"""
db.session.execute(QUERY,{'title': title,
'description': description,
'max_grade': max_grade})
print(f"Successfully added {title}.")
def handle_input():
"""Main loop.
Repeatedly prompt for commands, performing them, until 'quit' is received
as a command.
"""
command = None
while command != "quit":
input_string = input("HBA Database> ")
tokens = input_string.split()
command = tokens[0]
args = tokens[1:]
print(args)
if command == "student":
github = args[0]
get_student_by_github(github)
elif command == "new_student":
first_name, last_name, github = args # unpack!
make_new_student(first_name, last_name, github)
elif command == "project":
title = args[0]
get_project_by_title(title)
elif command == "github_grade":
github, title = args # unpack!
get_grade_by_github_title(github, title)
elif command == "assign_grade":
github, title, grade = args # unpack!
assign_grade(github, title, grade)
elif command == "add_project":
title = args[0]
project_desc = args[1:-1]
print(type(project_desc))
grade_max = args[-1]
add_project(title, project_desc, grade_max)
else:
if command != "quit":
print("Invalid Entry. Try again.")
if __name__ == "__main__":
connect_to_db(app)
handle_input()
# To be tidy, we close our database connection -- though,
# since this is where our program ends, we'd quit anyway.
db.session.close()
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
23c2de5fd645c39cbadd4ecdb4a8572487884ba8
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/urlpatterns/path_same_name_urls.py
|
d7ea5431b1e2e70e97338b78591e99ba67df435e
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
from django.urls import path, re_path, register_converter
from . import converters, views
register_converter(converters.DynamicConverter, "to_url_value_error")
urlpatterns = [
# Different number of arguments.
path("number_of_args/0/", views.empty_view, name="number_of_args"),
path("number_of_args/1/<value>/", views.empty_view, name="number_of_args"),
# Different names of the keyword arguments.
path("kwargs_names/a/<a>/", views.empty_view, name="kwargs_names"),
path("kwargs_names/b/<b>/", views.empty_view, name="kwargs_names"),
# Different path converters.
path("converter/path/<path:value>/", views.empty_view, name="converter"),
path("converter/str/<str:value>/", views.empty_view, name="converter"),
path("converter/slug/<slug:value>/", views.empty_view, name="converter"),
path("converter/int/<int:value>/", views.empty_view, name="converter"),
path("converter/uuid/<uuid:value>/", views.empty_view, name="converter"),
# Different regular expressions.
re_path(r"^regex/uppercase/([A-Z]+)/", views.empty_view, name="regex"),
re_path(r"^regex/lowercase/([a-z]+)/", views.empty_view, name="regex"),
# converter.to_url() raises ValueError (no match).
path(
"converter_to_url/int/<value>/",
views.empty_view,
name="converter_to_url",
),
path(
"converter_to_url/tiny_int/<to_url_value_error:value>/",
views.empty_view,
name="converter_to_url",
),
]
|
[
"felisiak.mariusz@gmail.com"
] |
felisiak.mariusz@gmail.com
|
cb4b16dd237ab801af0b21ca00cf08970de29bf8
|
e8c82271070e33bb6b181616a0a518d8f8fc6158
|
/fce/numpy/distutils/tests/f2py_ext/tests/PaxHeader/test_fib2.py
|
a56021af7be6b185d62870db000c6c9d53082297
|
[] |
no_license
|
DataRozhlas/profil-volice-share
|
aafa0a93b26de0773fa6bf2b7d513a5ec856ce38
|
b4424527fe36e0cd613f7bde8033feeecb7e2e94
|
refs/heads/master
| 2020-03-18T01:44:26.136999
| 2018-05-20T12:19:24
| 2018-05-20T12:19:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
17 gid=713727123
15 uid=3629613
20 ctime=1458667064
20 atime=1458667064
23 SCHILY.dev=16777220
23 SCHILY.ino=31296593
18 SCHILY.nlink=1
|
[
"honza@datastory.cz"
] |
honza@datastory.cz
|
00b42fcbfbde767ac076c1bdd0d7fb34c5b3382c
|
67b0379a12a60e9f26232b81047de3470c4a9ff9
|
/comments/models.py
|
27fec916d93089637204f146a37c7c27c5e70df4
|
[] |
no_license
|
vintkor/whitemandarin
|
8ea9022b889fac718e0858873a07c586cf8da729
|
5afcfc5eef1bb1cc2febf519b04a4819a7b9648f
|
refs/heads/master
| 2021-05-06T03:35:09.367375
| 2017-12-20T15:43:08
| 2017-12-20T15:43:08
| 114,904,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,969
|
py
|
# -*- coding: utf-8 -*-
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from tinymce import models as tinymce_model
import datetime
class Comments(MPTTModel):
prod_name = models.CharField(max_length=250, blank=True, db_index=True, verbose_name="Название")
paket = models.CharField(max_length=250, db_index=True, verbose_name="Пакет")
item_model = models.CharField(max_length=250, db_index=True, verbose_name="Модель")
item_id = models.IntegerField(db_index=True, null=True, verbose_name="id")
published_in_category = models.BooleanField(default=False, verbose_name='Показывать в категории')
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', verbose_name=u"Родитель")
name = models.CharField(max_length=250, verbose_name="Название")
text = tinymce_model.HTMLField(blank=True, verbose_name="Полное описание")
published = models.BooleanField(verbose_name="Опубликован")
date_add = models.DateTimeField(default=datetime.datetime.today ,verbose_name="Дата публикации")
vote = models.DecimalField(max_digits=2, decimal_places=1,db_index=True, null=True, verbose_name="Оценка")
positive = models.IntegerField(null=True, blank=True, default=0, verbose_name="Позитивных")
negative = models.IntegerField(null=True, blank=True, default=0, verbose_name="Негативных")
def save(self):
super(Comments, self).save()
try:
paket = self.paket
item_model = self.item_model
id = self.item_id
count_comments = Comments.objects.filter(paket=paket, item_model=item_model, item_id=int(id), published = True).count()
# assert False, count_comments
exec "from %s.models import %s" % (paket, item_model)
p = eval("%s.objects.get(pk=%d)" % (item_model, int(id)))
p.comments_count = count_comments
min_vote = 5
max_vote = 0
all_reit = 0.0
prod_votes = Comments.objects.filter(paket=paket, item_model=item_model, item_id=int(id), published = True).values('vote')
for item in prod_votes:
if min_vote > item['vote']:
min_vote = item['vote']
if max_vote < item['vote']:
max_vote = item['vote']
all_reit = all_reit + float(item['vote'])
# assert False, min_vote
p.min_reit = min_vote
p.max_reit = max_vote
p.reit = all_reit / count_comments
p.save()
self.prod_name = p.name
except:
pass
super(Comments, self).save()
if not self.date_add:
self.date_add = datetime.datetime.today()
super(Comments, self).save()
def get_name(self):
paket = self.paket
item_model = self.item_model
id = self.item_id
# count_comments = Comments.objects.filter(paket=paket, item_model=item_model, item_id=int(id), published = True).count()
# assert False, count_comments
exec "from %s.models import %s" % (paket, item_model)
p = eval("%s.objects.get(pk=%d)" % (item_model, int(id)))
return p.name
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = "Коментарии "
verbose_name = "Коментарий"
ordering = ['-id']
class MPTTMeta:
order_insertion_by = ['name']
class Utility(models.Model):
comment = models.ForeignKey(Comments, blank=True, null=True, verbose_name="Коммент")
positive = models.BooleanField(verbose_name="Позитивная оценка")
def __unicode__(self):
return self.comment.name
class Meta:
verbose_name_plural = "Оценки"
verbose_name = "Оценка"
|
[
"alkv84@yandex.ru"
] |
alkv84@yandex.ru
|
372ffb8f05abddeea2704b81e3dfd8ba8d5fa88e
|
236332a967f8f02291b58cab7addfeabdfe7b9a2
|
/experiments/tests/testing_2_3.py
|
207477ec55fb3dd07e0cb74d22864d4061c012cc
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ConsumerAffairs/django-experiments
|
2dbf04b7f0e7ebdff6d5e7879afeb26f7fdb5150
|
4f1591c9b40390f7302f3777df231ffe3629f00d
|
refs/heads/master
| 2021-01-20T11:10:30.199586
| 2018-04-20T21:26:18
| 2018-04-20T21:26:18
| 101,666,220
| 0
| 10
|
MIT
| 2018-04-20T21:26:19
| 2017-08-28T16:56:16
|
Python
|
UTF-8
|
Python
| false
| false
| 218
|
py
|
# coding=utf-8
try:
from unittest import mock, skip
except ImportError:
import mock
class DummyLockTests(object):
@classmethod
def new(cls):
test_class = cls
return skip(test_class)
|
[
"fran.hrzenjak@gmail.com"
] |
fran.hrzenjak@gmail.com
|
f6bb5a74f05f10651cae3ee6b1e226e5f896c8de
|
65e0c11d690b32c832b943fb43a4206739ddf733
|
/bsdradius/tags/release20060404_v_0_4_0/bsdradius/Typecast.py
|
436a5e6ae0899419c20edf50298dd09eb597dcaf
|
[
"BSD-3-Clause"
] |
permissive
|
Cloudxtreme/bsdradius
|
b5100062ed75c3201d179e190fd89770d8934aee
|
69dba67e27215dce49875e94a7eedbbdf77bc784
|
refs/heads/master
| 2021-05-28T16:50:14.711056
| 2015-04-30T11:54:17
| 2015-04-30T11:54:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,761
|
py
|
## BSDRadius is released under BSD license.
## Copyright (c) 2006, DATA TECH LABS
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
## * Neither the name of the DATA TECH LABS nor the names of its contributors
## may be used to endorse or promote products derived from this software without
## specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Functions and methods for casting types from string to
other types.
Contains functions and class for inheritance in other classes.
Supported types: 'str', 'string', 'int', 'hex', 'oct', 'dec', 'bool', 'Template'
"""
# HeadURL $HeadURL: file:///Z:/backup/svn/bsdradius/tags/release20060404_v_0_4_0/bsdradius/Typecast.py $
# Author: $Author: valts $
# File version: $Revision: 201 $
# Last changes: $Date: 2006-04-04 17:22:11 +0300 (Ot, 04 Apr 2006) $
# for typecasting to Template
from string import Template
### functions ###
def getstr (input):
return str(input)
def getstring (input):
return getstr(input)
def getint (input):
return int(str(input))
def gethex (input):
return int(str(input), 16)
def getoct (input):
return int(str(input), 8)
def getdec (input):
return int(str(input), 10)
_booleanStates = {'1': True, 'yes': True, 'y': True, 'true': True, 'on': True,
'0': False, 'no': False, 'n': False, 'false': False, 'off': False}
def getbool (input):
inp = str(input)
if inp.lower() not in _booleanStates:
raise ValueError, 'Not a boolean: %s' % inp
return _booleanStates[inp.lower()]
def getTemplate (input):
return Template(str(input))
class Typecast:
"""Use this class as base class in your classes to
add typecasting functionality. This class defines
methods which are wrappers to functions in module
namespace.
You can override attribute "data" in derived classes.
Since self.data is dictionary (with multiple levels) you can
pass any number of keys to typecasting methods. They all use method
_getItem() which searches recursively in self.data for rightmost key value.
"""
_booleanStates = _booleanStates
def __init__(self):
self.data = {}
def _getItem(self, keys):
"""Search recursively for item by given keys
Input: (str) keys Example: t._getItem('key1', 'key2', 'key3')
Output: (mixed) value
"""
if not keys:
raise KeyError, 'No key specified'
tmp = None
for key in keys:
if tmp is None:
tmp = self.data[key]
else:
tmp = tmp[key]
return tmp
def getstr (self, *keys):
return getstr(self._getItem(keys))
def getstring (self, *keys):
return getstring(self._getItem(keys))
def getint (self, *keys):
return getint(self._getItem(keys))
def gethex (self, *keys):
return gethex(self._getItem(keys))
def getoct (self, *keys):
return getoct(self._getItem(keys))
def getdec (self, *keys):
return getdec(self._getItem(keys))
def getbool (self, *keys):
return getbool(self._getItem(keys))
def getTemplate (self, *keys):
return getTemplate(self._getItem(keys))
# holds references to all supported typecast methods
typecastMethods = {
'str' : getstr,
'string' : getstring,
'int' : getint,
'hex' : gethex,
'oct' : getoct,
'dec' : getdec,
'bool' : getbool,
'Template' : getTemplate,
}
# holds references to all supported typecast methods
Typecast.typecastMethods = {
'str' : Typecast.getstr,
'string' : Typecast.getstring,
'int' : Typecast.getint,
'hex' : Typecast.gethex,
'oct' : Typecast.getoct,
'dec' : Typecast.getdec,
'bool' : Typecast.getbool,
'Template' : Typecast.getTemplate,
}
|
[
"valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef"
] |
valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef
|
77fdcf1dbfc3a529545552210737968c88bf404b
|
ffaeaf54e891c3dcca735347f27f1980f66b7a41
|
/python/1.POP/1.base/01.helloworld.py
|
015e87763fe1f07c2320ce7fe71f056ea13d317c
|
[
"Apache-2.0"
] |
permissive
|
dunitian/BaseCode
|
9804e3d8ff1cb6d4d8cca96978b20d168072e8bf
|
4855ef4c6dd7c95d7239d2048832d8acfe26e084
|
refs/heads/master
| 2020-04-13T09:51:02.465773
| 2018-12-24T13:26:32
| 2018-12-24T13:26:32
| 137,184,193
| 0
| 0
|
Apache-2.0
| 2018-06-13T08:13:38
| 2018-06-13T08:13:38
| null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
'''三个单引号多行注释:
print("Hello World!")
print("Hello World!")
print("Hello World!")'''
"""三个双引号多行注释:
print("Hello World!")
print("Hello World!")
print("Hello World!")"""
# 单行注释 输出
print("Hello World!")
# 定义一个变量并输出
name = "小明"
print(name)
print("x" * 10)
print("dnt.dkill.net/now", end='')
print("带你走进中医经络")
print("dnt.dkill.net/now", end="")
print("带你走进中医经络")
# 如果字符串内部既包含'又包含"怎么办?可以用转义字符\来标识
print("I\'m \"OK\"!")
# 如果字符串里面有很多字符都需要转义,就需要加很多\,为了简化,Python还允许用r''表示''内部的字符串默认不转义
print(r'\\\t\\')
# 如果字符串内部有很多换行,用\n写在一行里不好阅读,为了简化,Python允许用'''...'''的格式表示多行内容
print('''我请你吃饭吧~
晚上吃啥?
去厕所,你说呢?''')
|
[
"39723758+lotapp@users.noreply.github.com"
] |
39723758+lotapp@users.noreply.github.com
|
88ea9f503fbe4878090275c2480106a7648b48f2
|
f94e4955f9d16b61b7c9bff130b9d9ee43436bea
|
/labs/lab06/lab06.py
|
1c1dac20000f742c21337b538d3d8ac3b9563bc4
|
[] |
no_license
|
j2chu/dsc80-sp19
|
bd1dade66c19b920a54b0f8551fd999185449f86
|
dd48210a7cbadfb6470104b275f34085437e4766
|
refs/heads/master
| 2020-06-01T23:22:32.727488
| 2019-06-07T01:46:11
| 2019-06-07T01:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,275
|
py
|
import os
import pandas as pd
import numpy as np
import requests
import bs4
import json
# ---------------------------------------------------------------------
# Question # 1
# ---------------------------------------------------------------------
def question1():
"""
NOTE: You do NOT need to do anything with this function.
The function for this question makes sure you
have a correctly named HTML file in the right
place. Note: This does NOT check if the supplementary files
needed for your page are there!
>>> os.path.exists('lab06_1.html')
True
"""
# Don't change this function body!
# No python required; create the HTML file.
return
# ---------------------------------------------------------------------
# Question # 2
# ---------------------------------------------------------------------
def extract_book_links(text):
"""
:Example:
>>> fp = os.path.join('data', 'products.html')
>>> out = extract_book_links(open(fp, encoding='utf-8').read())
>>> url = 'scarlet-the-lunar-chronicles-2_218/index.html'
>>> out[0] == url
True
"""
return ...
def get_product_info(text):
"""
:Example:
>>> fp = os.path.join('data', 'Frankenstein.html')
>>> out = get_product_info(open(fp, encoding='utf-8').read())
>>> isinstance(out, dict)
True
>>> 'UPC' in out.keys()
True
>>> out['Rating']
'Two'
"""
return ...
def scrape_books(k):
"""
:param k: number of book-listing pages to scrape.
:returns: a dataframe of information on (certain) books
on the k pages (as described in the question).
:Example:
>>> out = scrape_books(1)
>>> out.shape
(1, 10)
>>> out['Rating'][0] == 'Five'
True
>>> out['UPC'][0] == 'ce6396b0f23f6ecc'
True
"""
return ...
# ---------------------------------------------------------------------
# Question 3
# ---------------------------------------------------------------------
def send_requests(apiKey, *args):
"""
:param apiKey: apiKey from newsapi website
:param args: number of languages as strings
:return: a list of dictionaries, where keys correspond to languages
and values correspond to Response objects
>>> responses = send_requests(os.environ['API_KEY'], "ru", "fr")
>>> isinstance(responses[0], dict)
True
>>> isinstance(responses[1], dict)
True
"""
return ...
def gather_info(resp):
"""
Finds some basic information from the obtained responses
:param resp: a list of dictionaries
:return: a list with the following items:
language that has the most number of news
most common base url for every language
>>> responses = send_requests(os.environ['API_KEY'], "ru", "fr")
>>> result = gather_info(responses)
>>> isinstance(result[0], str)
True
>>> len(result) == len(responses) + 1
True
"""
return ...
# ---------------------------------------------------------------------
# Question # 4
# ---------------------------------------------------------------------
def depth(comments):
"""
:Example:
>>> fp = os.path.join('data', 'comments.csv')
>>> comments = pd.read_csv(fp, sep='|')
>>> depth(comments).max() == 5
True
"""
return ...
# ---------------------------------------------------------------------
# DO NOT TOUCH BELOW THIS LINE
# IT'S FOR YOUR OWN BENEFIT!
# ---------------------------------------------------------------------
# Graded functions names! DO NOT CHANGE!
# This dictionary provides your doctests with
# a check that all of the questions being graded
# exist in your code!
GRADED_FUNCTIONS = {
'q01': ['question1'],
'q02': ['extract_book_links', 'get_product_info', 'scrape_books'],
'q03': ['send_requests', 'gather_info'],
'q04': ['depth']
}
def check_for_graded_elements():
"""
>>> check_for_graded_elements()
True
"""
for q, elts in GRADED_FUNCTIONS.items():
for elt in elts:
if elt not in globals():
stmt = "YOU CHANGED A QUESTION THAT SHOULDN'T CHANGE! \
In %s, part %s is missing" % (q, elt)
raise Exception(stmt)
return True
|
[
"aaron.fraenkel@gmail.com"
] |
aaron.fraenkel@gmail.com
|
e9eef0ae487bb90ae983c14902b33bc6d26c7a4f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/429/usersdata/313/105928/submittedfiles/jogoDaVelha.py
|
6702e638a0b195b5ad2f3fe0c84a675088f19cc5
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
print (" BEM-VINDO AO JOGO DA VELHA ")
print("O primeiro jogador será o (X) e o segundo o (O) ")
posicao = """ posicoes do jogo
1 | 2 | 3
-----------
4 | 5 | 6
-----------
7 | 8 | 9
"""
print (posicao)
posicoes = [
(5,7),
(5,5),
(5,3),
(9,7),
(9,5),
(9,3),
(7,7),
(7,5),
(7,3),
]
ganhador = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[1, 4, 7],
[2, 5, 8],
[3, 6, 9],
[1, 5, 9],
[3, 5, 7],
]
jogo = []
for vertical in posicao.splitlines():
jogo.append(list(vertical))
jogador = "X"
jogando = True
jogadas = 0
while True:
if jogadas == 9:
print (" DEU VELHA!")
break
jogada = int(input(" digite a posicao de 1 à 9 (jogador %s): " % jogador ))
if jogada<1 or jogada>9:
print ("posicao fora das posicoes do jogo")
continue
if jogo[posicoes[jogada][0]][posicoes[jogada][1]] != " ":
print ("Essa posicao já foi ocupada")
continue
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
bc95461ad6fdc0ef95c4d671c174b643f840fd99
|
3354e6bdd4aeb2ddec84e6a8036c90cd24b6577a
|
/(구)자료구조와 알고리즘/(구)Quizes/backjoon/back_1002.py
|
b90ab7cd42413d530cb002dd703cd4c028e3c9d1
|
[] |
no_license
|
hchayan/Data-Structure-and-Algorithms
|
1125d7073b099d8c6aae4b14fbdb5e557dcb9412
|
be060447e42235e94f93a0b2f94f84d2fd560ffe
|
refs/heads/master
| 2023-01-05T10:15:02.862700
| 2020-11-04T08:16:56
| 2020-11-04T08:16:56
| 209,513,516
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
import sys
import math
n = int(sys.stdin.readline().rstrip())
def getAns():
x1, y1, r1, x2, y2, r2 = map(int, sys.stdin.readline().rstrip().split())
leng = math.sqrt((x2-x1)**2+(y2-y1)**2)
if r1 == r2 and x1 == x2 and y1 == y2:
return -1
if r1 > r2:
r1, r2 = r2, r1
if leng > r1+r2:
return 0
elif leng == r1+r2:
return 1
else:
if leng+r1 == r2:
return 1
elif leng+r1 < r2:
return 0
return 2
for nn in range(n):
print(getAns())
|
[
"k852012@naver.com"
] |
k852012@naver.com
|
4acf925d2f474e88d0b195933e8e7df31a2aa765
|
9446feb2a94486ac16c585f712dbcbea7d112a9d
|
/src/taskmaster/cli/master.py
|
b78926059cf4a36ee7d184b223ba2326de9179e4
|
[
"Apache-2.0"
] |
permissive
|
jdunck/taskmaster
|
c16c879a546dd2ac383f804788e2d8ae2606abd1
|
04a03bf0853facf318ce98192db6389cdaaefe3c
|
refs/heads/master
| 2023-08-23T19:29:22.605052
| 2012-05-16T00:52:24
| 2012-05-16T00:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
"""
taskmaster.cli.master
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
def run(target, reset=False, size=10000, address='tcp://0.0.0.0:3050'):
from taskmaster.server import Server, Controller
server = Server(address, size=size)
controller = Controller(server, target)
if reset:
controller.reset()
controller.start()
def main():
import optparse
import sys
parser = optparse.OptionParser()
parser.add_option("--address", dest="address", default='tcp://127.0.0.1:3050')
parser.add_option("--size", dest="size", default='10000', type=int)
parser.add_option("--reset", dest="reset", default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) != 1:
print 'Usage: tm-master <callback>'
sys.exit(1)
sys.exit(run(args[0], **options.__dict__))
if __name__ == '__main__':
main()
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
92467aabe2d3e0851ea17a982715577fa57c6fde
|
4aa6b7c3a5ae3817007e09ad1289c1e9f7a355c0
|
/dynamic_programming/best-time-to-buy-and-sell-stock-iv.py
|
57dc30b64f80d1c90423152d8d4b8f9a47789989
|
[] |
no_license
|
liuhuipy/Algorithm-python
|
8f5143e06cf5fa2de2c178e3ba9e5fd12b9bcdf7
|
4e92a0b874f956d1df84d1493f870a5d1f06cde2
|
refs/heads/master
| 2021-06-03T04:19:01.946149
| 2021-01-08T07:44:40
| 2021-01-08T07:44:40
| 99,838,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,937
|
py
|
"""
买卖股票的最佳时机IV:
给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。
设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。
注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。
示例 1:
输入: [2,4,1], k = 2
输出: 2
解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。
示例 2:
输入: [3,2,6,5,0,3], k = 2
输出: 7
解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。
随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。
"""
from typing import List
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
if not prices:
return 0
len_prices = len(prices)
if k >= len_prices / 2:
res = 0
for i in range(1, len_prices):
if prices[i] > prices[i - 1]:
res += prices[i] - prices[i - 1]
return res
dp = [[[0 for _ in range(k + 1)], [0 for _ in range(k + 1)]] for _ in range(len_prices)]
for i in range(k + 1):
dp[0][0][i] = -prices[0]
for i in range(1, len_prices):
dp[i][0][0] = max(-prices[i], dp[i - 1][0][0])
for j in range(1, k + 1):
dp[i][0][j] = max(dp[i - 1][1][j] - prices[i], dp[i - 1][0][j])
dp[i][1][j] = max(dp[i - 1][0][j - 1] + prices[i], dp[i - 1][1][j])
print(dp)
return max(dp[len_prices - 1][1])
if __name__ == '__main__':
print(Solution().maxProfit(2, [2,1,4,5,2,9,7]))
|
[
"liuhui_py@163.com"
] |
liuhui_py@163.com
|
001acef57576b87eb38040f53889537d452e2f72
|
552865ae5daa143bc6a7dec46f7febe49f0a7226
|
/src/mr/cabot/kml.py
|
96d3de4531e1a03cd61c963cb5568f2f5a0be081
|
[] |
no_license
|
collective/mr.cabot
|
231a4a96c38e793356c4d06438d236d447e97bc8
|
3e905d80ed5eac52a258b74d19abf5ab182d49e2
|
refs/heads/master
| 2023-03-22T15:30:19.171188
| 2013-01-27T17:54:22
| 2013-01-27T18:32:03
| 6,816,996
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
import datetime
import urllib
import os
import simplekml
from mr.cabot.interfaces import IListing, IGeolocation
import sebastian
colors = {"commit": "ff00ff00", "mailing-list": "ffff0000", "answer": "ff00ffff"}
def join(objs):
kml = simplekml.Kml()
unique_locations = set()
for obj in objs:
loc = IGeolocation(obj).coords
if loc not in unique_locations:
unique_locations.add(loc)
add_point(kml, obj)
return kml.kml()
def add_point(kml, obj):
loc = IGeolocation(obj).coords
if not loc:
return ''
else:
lat, lon = loc
listing = IListing(obj)
listing_type = listing.__name__
summary = listing.summary
if isinstance(summary, str):
summary = listing.summary.decode("utf-8", "ignore")
summary = summary.encode("ascii","xmlcharrefreplace")
point = kml.newpoint(name=listing.__name__, description=summary, coords=[(lon, lat)])
point.style.iconstyle.color = colors[listing_type]
point.style.iconstyle.scale = 1
|
[
"git@matthewwilkes.name"
] |
git@matthewwilkes.name
|
9c1c1496d9e87ef0b64186d9951572487e4eec52
|
2d5648035b8bd32b4a6ded311e48975e5ea100d4
|
/runs/bro/100KB/src2-tgt1/ssl-par-ssl-iter00100.cfg.py
|
0df43d2c6562ed9dcdd7e02e3967e1cde40ca70a
|
[
"MIT"
] |
permissive
|
Largio/broeval
|
3975e54a1eaead6686c53e5e99250a00becbe1e0
|
89e831d07f066100afdd1a5b220f9f08f1c10b3d
|
refs/heads/master
| 2021-05-08T08:54:06.498264
| 2017-11-10T17:09:02
| 2017-11-10T17:09:02
| 92,508,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
# Write results to this file
OUTFILE = 'runs/bro/100KB/src2-tgt1/ssl-par-ssl-iter00100.result.csv'
# Source computers for the request
SOURCE = ['10.0.0.1', '10.0.0.3']
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# IDS Mode. (ATM: noids, min, max, http, ssl, ftp, icmp, mysql)
IDSMODE = 'ssl'
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repititions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repitition
ITER = 100
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
# Protocol to be used e.g. HTTP, SSL, FTP, MYSQL
PROTOCOL = 'ssl'
|
[
"larswiete@googlemail.com"
] |
larswiete@googlemail.com
|
46f9074e93f7bef5beaa27844351f2b1ba6935da
|
3307766701d680af6d12a726a2d98df2cb1830e5
|
/jams/gcj/2013/1C/C/C.py
|
0660c807359fca4cfb396ebfa66c729d1b5b2f9e
|
[] |
no_license
|
dpaneda/code
|
c1a54037a275fa7044eb5c2d6079f052dd968615
|
7da1ede33a6a7cd19cbd0db517d91e7cccfbbfff
|
refs/heads/master
| 2023-01-07T18:41:00.816363
| 2022-12-30T09:24:22
| 2022-12-30T09:24:22
| 1,583,913
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,495
|
py
|
#!/usr/bin/python2
import sys
import bisect
def calculate_atacks(tribes):
# We calculate attacks day by day, until no tribe have any attacks left
attacks = {}
for tribe in tribes:
for i in xrange(0, tribe[1]):
d = tribe[0]
if d not in attacks:
attacks[d] = []
attacks[d].append((tribe[2], tribe[3], tribe[4]))
# Change tribe status
tribe[0] += tribe[5]
tribe[2] += tribe[6]
tribe[3] += tribe[6]
tribe[4] += tribe[7]
return attacks
def raise_wall(wall, wallh, w, e, s):
# print wall, wallh
# print w, e, s
a = bisect.bisect_right(wall, w)
if a > 0:
a -= 1
b = bisect.bisect_right(wall, e)
print a, b
insert = False
if wall[a] < w and wallh[a] < s:
wall.insert(a + 1, w)
wallh.insert(a + 1, s)
b += 1
insert = True
elif wall[a] == w and wallh[a] < s:
wallh[a] = s
insert = True
if insert:
if b >= len(wall):
wall.insert(a + 2, e)
wallh.insert(a + 2, 0)
elif wall[b] > e:
wall.insert(a + 2, e)
wallh.insert(a + 2, wall[b])
for i in xrange(a + 2, b):
if wallh[i] < s:
del(wall[i])
del(wallh[i])
# print wall, wallh
def wall_minimum_height(wall, wallh, w, e):
a = bisect.bisect_right(wall, w) - 1
if a < 0:
a = 0
b = bisect.bisect_right(wall, e)
if a == b:
return 0
return min(wallh[a:b])
def succeed(wall, wallh, w, e, s):
#print w, e, s
m = wall_minimum_height(wall, wallh, w, e)
return m < s
def simulate_attacks(attacks):
wall = [0]
wallh = [0]
s = 0
days = sorted(attacks.iterkeys())
for day in days:
for attack in attacks[day]:
if succeed(wall, wallh, attack[0], attack[1], attack[2]):
s += 1
for attack in attacks[day]:
raise_wall(wall, wallh, attack[0], attack[1], attack[2])
return s
def Solve():
ntribes = int(sys.stdin.readline().strip())
tribes = []
for i in xrange(0, ntribes):
d, n, w, e, s, di, pi, si = map(int, sys.stdin.readline().strip().split())
tribes.append([d, n, w, e, s, di, pi, si])
attacks = calculate_atacks(tribes)
return simulate_attacks(attacks)
num = int(sys.stdin.readline())
for case in range(1, num + 1):
print "Case #%d: %s " % (case, Solve())
|
[
"dpaneda@gmail.com"
] |
dpaneda@gmail.com
|
7cb4c2732a9e0437ad2c3c1be8df7a72b03dab80
|
b8062e01860960131b37e27298b6b755b4191f5f
|
/python/level1_single_api/9_amct/amct_pytorch/resnet-101/src/resnet-101_calibration.py
|
1fb64a80ea43a7e08efa9490757866a88b3a89a4
|
[
"Apache-2.0"
] |
permissive
|
RomanGaraev/samples
|
4071fcbe6bf95cf274576665eb72588568d8bcf2
|
757aac75a0f3921c6d1b4d98599bd7d4ffda936b
|
refs/heads/master
| 2023-07-16T02:17:36.640036
| 2021-08-30T15:14:05
| 2021-08-30T15:14:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,051
|
py
|
"""
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import os
import argparse
import torch # pylint: disable=E0401
from PIL import Image # pylint: disable=E0401
from torchvision import transforms # pylint: disable=E0401
import onnxruntime as ort # pylint: disable=E0401
import amct_pytorch as amct # pylint: disable=E0401
from resnet import resnet101 # pylint: disable=E0401, C0415
PATH = os.path.realpath('./')
IMG_DIR = os.path.join(PATH, 'data/images')
LABEL_FILE = os.path.join(IMG_DIR, 'image_label.txt')
PARSER = argparse.ArgumentParser(description='whether use nuq')
PARSER.add_argument('--nuq', dest='nuq', action='store_true', help='whether use nuq')
ARGS = PARSER.parse_args()
if ARGS.nuq:
OUTPUTS = os.path.join(PATH, 'outputs/nuq')
else:
OUTPUTS = os.path.join(PATH, 'outputs/calibration')
TMP = os.path.join(OUTPUTS, 'tmp')
def get_labels_from_txt(label_file):
"""Read all images' name and label from label_file"""
images = []
labels = []
with open(label_file, 'r') as f:
lines = f.readlines()
for line in lines:
images.append(line.split(' ')[0])
labels.append(int(line.split(' ')[1]))
return images, labels
def prepare_image_input(images):
"""Read all images"""
input_tensor = torch.zeros(len(images), 3, 224, 224) # pylint: disable=E1101
preprocess = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
for index, image in enumerate(images):
input_image = Image.open(image).convert('RGB')
input_tensor[index, ...] = preprocess(input_image)
return input_tensor
def img_postprocess(probs, labels):
"""Do image post-process"""
# calculate top1 and top5 accuracy
top1_get = 0
top5_get = 0
prob_size = probs.shape[1]
for index, label in enumerate(labels):
top5_record = (probs[index, :].argsort())[prob_size - 5: prob_size]
if label == top5_record[-1]:
top1_get += 1
top5_get += 1
elif label in top5_record:
top5_get += 1
return float(top1_get) / len(labels), float(top5_get) / len(labels)
def model_forward(model, batch_size, iterations):
"""Do pytorch model forward"""
images, labels = get_labels_from_txt(LABEL_FILE)
images = [os.path.join(IMG_DIR, image) for image in images]
top1_total = 0
top5_total = 0
for i in range(iterations):
input_batch = prepare_image_input(images[i * batch_size: (i + 1) * batch_size])
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch)
top1, top5 = img_postprocess(output, labels[i * batch_size: (i + 1) * batch_size])
top1_total += top1
top5_total += top5
print('****************iteration:{}*****************'.format(i))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
return top1_total / iterations, top5_total / iterations
def onnx_forward(onnx_model, batch_size, iterations):
"""Do onnx model forward"""
ort_session = ort.InferenceSession(onnx_model)
images, labels = get_labels_from_txt(LABEL_FILE)
images = [os.path.join(IMG_DIR, image) for image in images]
top1_total = 0
top5_total = 0
for i in range(iterations):
input_batch = prepare_image_input(images[i * batch_size: (i + 1) * batch_size])
output = ort_session.run(None, {'input': input_batch.numpy()})
top1, top5 = img_postprocess(output[0], labels[i * batch_size: (i + 1) * batch_size])
top1_total += top1
top5_total += top5
print('****************iteration:{}*****************'.format(i))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
return top1_total / iterations, top5_total / iterations
def main():
"""Sample main function"""
model = resnet101(pretrained=True)
model.eval()
ori_top1, ori_top5 = model_forward(model, batch_size=32, iterations=5)
# Quantize configurations
args_shape = [(1, 3, 224, 224)]
input_data = tuple([torch.randn(arg_shape) for arg_shape in args_shape]) # pylint: disable=E1101
if torch.cuda.is_available():
input_data = tuple([data.to('cuda') for data in input_data])
model.to('cuda')
config_json_file = os.path.join(TMP, 'config.json')
skip_layers = []
batch_num = 2
if ARGS.nuq:
config_defination = os.path.join(PATH, 'src/nuq_conf/nuq_quant.cfg')
amct.create_quant_config(
config_json_file, model, input_data, skip_layers, batch_num, config_defination=config_defination)
else:
amct.create_quant_config(config_json_file, model, input_data, skip_layers, batch_num)
# Phase1: do conv+bn fusion, weights calibration and generate
# calibration model
record_file = os.path.join(TMP, 'record.txt')
modified_model = os.path.join(TMP, 'modified_model.onnx')
calibration_model = amct.quantize_model(
config_json_file, modified_model, record_file, model, input_data, input_names=['input'],
output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
# Phase2: do calibration
model_forward(calibration_model, batch_size=32, iterations=batch_num)
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Phase3: save final model, one for onnx do fake quant test, one
# deploy model for ATC
result_path = os.path.join(OUTPUTS, 'resnet-101')
amct.save_model(modified_model, record_file, result_path)
# Phase4: run fake_quant model test
quant_top1, quant_top5 = onnx_forward(
'%s_%s' % (result_path, 'fake_quant_model.onnx'), batch_size=32, iterations=5)
print('[INFO] ResNet101 before quantize top1:{:>10} top5:{:>10}'.format(ori_top1, ori_top5))
print('[INFO] ResNet101 after quantize top1:{:>10} top5:{:>10}'.format(quant_top1, quant_top5))
if __name__ == '__main__':
main()
|
[
"derek.qian.wang@huawei.com"
] |
derek.qian.wang@huawei.com
|
4af4f611f29d8399e7635e13af155fc04e99e0b9
|
9e1dcb4f71b7eda84bbf0855d574eb38719d21a9
|
/nested_loops_prime_number.py
|
09ead76ff7a45ba184bcf3f6b8ff47bf66b017c6
|
[] |
no_license
|
ayoubabounakif/edX-Python
|
689c2730458513151fc3b7a69f6a3e8b25462028
|
2449616fd6d9d8d8d74819cff24f3a54bff9dd4b
|
refs/heads/master
| 2020-12-30T03:46:10.271688
| 2020-02-07T05:28:09
| 2020-02-07T05:28:09
| 238,849,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
#ALGORITHM
'''
1. Select a number
2. Select a divisor and set it equal to 2.
3. Assume number is prime
4. If divisor is less then the number go to step 5 else go to step 8
5. If remainder of (number/divisor) is 0 then number is not prime(exit/stop)
6. Add one to the divisor
7. Go to step 4
8. Number is prime
'''
# A program that prints the prime numbers
#between x (start_number) and y (end_number)
#CODE (using while loop)
ask_user = int(input("Enter a value for x: "))
ask_user_2 = int(input("Enter a value for y: "))
x = ask_user
y = ask_user_2
current_number = x
while current_number <= y:
current_divisor = 2
current_number_prime = True
while (current_divisor < current_number):
if current_number % current_divisor == 0:
current_number_prime = False
break
current_divisor = current_divisor + 1
if current_number_prime:
print (current_number, "is prime")
current_number = current_number + 1
print ("DONE! These are all the prime numbers between your values!")
#CODE (using for loop)
ask_user = int(input("Enter a value for x: "))
ask_user_2 = int(input("Enter a value for y: "))
x = ask_user
y = ask_user_2
current_number = x
for current_number in range(x, y+1):
current_number_prime = True
for current_divisor in range (2, current_number):
if current_number % current_divisor == 0:
current_number_prime = False
break
if current_number_prime:
print (current_number, "is prime")
print ("DONE! These are all the prime numbers between your values!")
|
[
"noreply@github.com"
] |
ayoubabounakif.noreply@github.com
|
556a064c6aaa406e6208c1055530438590c6f151
|
9b2f4810b093639209b65bbcb5fa07125e17266f
|
/src/radical/pilot/umgr/staging_input/__init__.py
|
66be18b437eb382c61a394d1bf9f1abbbf8f82d4
|
[
"MIT"
] |
permissive
|
karahbit/radical.pilot
|
887d25d370d08e3455f19cd240677b62278ef67f
|
c611e1df781749deef899dcf5815728e1d8a962e
|
refs/heads/devel
| 2020-12-21T09:54:10.622036
| 2020-08-20T18:18:12
| 2020-08-20T18:18:12
| 254,967,331
| 0
| 0
|
NOASSERTION
| 2020-05-01T00:47:51
| 2020-04-11T22:37:20
| null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .base import UMGRStagingInputComponent as Input
|
[
"andre@merzky.net"
] |
andre@merzky.net
|
4285a06223ef406e7b6a8cfcba809f60b3d98731
|
57eb2354f8fba9d46c8edcfac60c13fc0468d950
|
/Lekhaka/deformer_noiser.py
|
af37dc110bc7fa9c610374b8ecf483f63c73effc
|
[] |
no_license
|
rakeshvar/Lekhaka
|
597e91e60c30c566e6f792af2d1378205f698087
|
1d2d31035fe8a29f002adb5a70d762669102a0f3
|
refs/heads/main
| 2023-06-16T11:18:30.121653
| 2021-07-09T08:35:56
| 2021-07-09T08:35:56
| 370,766,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,731
|
py
|
import numpy as np
from scipy import ndimage as nd
from scipy.special import cosdg, sindg
def _summary(mat, name):
print(f"{name}\tshape:{mat.shape}\tmax:{mat.max():.2f} min:{mat.min():.2f}")
pass
class Deformer:
def __init__(self, translation=0, zoom=0, elastic_magnitude=0, sigma=1, angle=0, nearest=False, debug=False):
self.translation = translation
self.zoom = zoom
self.elastic_magnitude = elastic_magnitude
self.sigma = sigma
self.angle = angle
self.nearest = nearest
# Build a gaussian filter for elastic distortion
if elastic_magnitude:
self.nrounds = 2
nsds = 2
sigma //= self.nrounds
filt = np.exp(-.5 * np.linspace(-nsds, nsds, int(2*nsds*sigma+1)) ** 2)
filt /= filt.sum()
if debug:
print(f"Gaussian Filter Range: {filt.max():.4f}-{filt.min():.4f} "
f"Ratio:{filt.max()/filt.min():.2f} Sum:{filt.sum()}")
self.filt = filt
self.summary = _summary if debug else lambda _, __: None
def __str__(self):
print('Elastic Translation:{:} Zoom:{} Mag:{:d} Sig:{:d} Angle:{} Interpolation:{}'.format(
self.translation, self.zoom, self.elastic_magnitude, self.sigma, self.angle,
'Nearest' if self.nearest else 'Linear'))
def __call__(self, inpt):
# Degenerate Case
if not (self.elastic_magnitude or self.translation or self.angle or self.zoom):
return inpt
b, h, w = inpt.shape
_hwidx = np.indices((h, w)).astype('float')
target = np.stack([_hwidx for _ in range(b)])
self.summary(target, "initial traget")
if self.elastic_magnitude:
# Elastic
elast = self.elastic_magnitude * np.random.normal(size=(b, 2, h, w))
for _ in range(self.nrounds):
for ax in (-1, -2):
nd.correlate1d(elast, self.filt, axis=ax, output=elast)
target += elast
self.summary(elast, "elastic")
# Zoom and Rotate
if self.zoom or self.angle:
# Center at 'about' half way
origin = np.random.uniform(.4, .6, size=(b, 2, 1, 1)) * np.array((h, w)).reshape((1, 2, 1, 1))
target -= origin
self.summary(origin, "origin")
# Zoom
if self.zoom:
zoomer = np.exp(self.zoom * np.random.uniform(-1, size=(b, 2, 1, 1)))
target *= zoomer
self.summary(zoomer, "zoom")
# Rotate
if self.angle:
theta = self.angle * np.random.uniform(-1, size=b)
c, s = cosdg(theta), sindg(theta)
rotate = np.array([[c, -s], [s, c]])
rotate = np.moveaxis(rotate, -1, 0) # b x 2 x 2
for i in range(b):
target[i] = np.tensordot(rotate[i], target[i], axes=(0, 0))
self.summary(rotate, "rotate")
# Uncenter
target += origin
# Make sure you do not go below zero along the width (vertical axis because of Transpose)
least_vert_disp = target[:, 0, 0].min(axis=-1)
self.summary(least_vert_disp[:, None, None], "least_vert_disp")
target[:, 0] -= least_vert_disp[:, None, None]
if self.translation:
transln = self.translation * np.random.uniform(-1, size=(b, 2, 1, 1))
transln[:, 0] = -2 * np.abs(transln[:, 0]) # Along slab width translation is (0, 2translation)
target += transln
self.summary(transln, "translation")
for i in range(b):
self.summary(target[i, 0], f"{i} final traget y")
self.summary(target[i, 1], f"{i} final traget x")
transy = np.clip(target[:, 0], 0, h - 1 - .001)
transx = np.clip(target[:, 1], 0, w - 1 - .001)
output = np.empty_like(inpt)
if self.nearest:
vert = np.rint(transy).astype(int)
horz = np.rint(transx).astype(int)
for i in range(b):
output[i] = inpt[i, vert[i], horz[i]]
else:
topp = np.floor(transy)
left = np.floor(transx)
fraction_y = transy - topp
fraction_x = transx - left
topp = topp.astype('int32')
left = left.astype('int32')
for i in range(b):
output[i] = inpt[i, topp, left] * (1 - fraction_y) * (1 - fraction_x) + \
inpt[i, topp, left + 1] * (1 - fraction_y) * fraction_x + \
inpt[i, topp + 1, left] * fraction_y * (1 - fraction_x) + \
inpt[i, topp + 1, left + 1] * fraction_y * fraction_x
self.summary(inpt, "input")
self.summary(output, "output")
return output
class Noiser:
def __init__(self, num_blots=0, erase_fraction=.5, minsize=0, maxsize=0):
self.num_blots = num_blots
self.erase_fraction = erase_fraction
self.minsize = minsize
self.maxsize = maxsize
def __call__(self, inpt):
batch_sz, h, w = inpt.shape
size = batch_sz, self.num_blots
colors = np.random.binomial(n=1, p=1-self.erase_fraction, size=size)
xs = np.random.randint(h, size=size)
dxs = np.random.randint(self.minsize, self.maxsize, size=size)
ys = np.random.randint(w, size=size)
dys = np.random.randint(self.minsize, self.maxsize, size=size)
for i in range(batch_sz):
for x, dx, y, dy, c in zip(xs[i], dxs[i], ys[i], dys[i], colors[i]):
inpt[i, x:(x+dx), y:(y+dy)] = c
return inpt
|
[
"rakeshvar@gmail.com"
] |
rakeshvar@gmail.com
|
c6984060bdb66e9297a30262564f0ec5543acd5e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03146/s790644084.py
|
7e0cb3ce3d0317c1b444b17f7e0a4ff736bda753
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
s = int(input())
a = s
prev = set()
for i in range(1, 1500000):
if a in prev:
print(i)
exit()
prev.add(a)
if a % 2 == 0:
a //= 2
else:
a = 3 * a + 1
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
295338183b59fe88a08317b8e639fd6a5734f638
|
1ee4c8d3208d1b51a72d30e4732a9b2082da605c
|
/sao_portal/asgi.py
|
42ad8861fc2ad5d0afd93f540fdc60c77c34b824
|
[] |
no_license
|
abhiram-g/SAO_service_dashboard
|
8336f52a9968019102884e24edc735e8e4f38bc6
|
4d2cde4cefe6c10bc644223981b67755cf6c1145
|
refs/heads/master
| 2022-10-15T10:23:30.537956
| 2020-06-08T12:43:51
| 2020-06-08T12:43:51
| 270,624,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for sao_portal project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sao_portal.settings')
application = get_asgi_application()
|
[
"abc@gmail.com"
] |
abc@gmail.com
|
2adf1b16dc0fe58417825d349d3e29ccf10e3135
|
a247e3a40bca426f604ee057319ae3f7fce5c22f
|
/django1/venv/bin/django-admin
|
8279c65ae89eb5715eb0a7f394b21f42bacec363
|
[
"MIT"
] |
permissive
|
stephenndele/django-1
|
fcb5cd2a8598b5d68855814fb588a231e06efc09
|
11be0289bc3b6b3234b1e34979f282bd06cbce2e
|
refs/heads/main
| 2023-03-26T14:55:11.769279
| 2021-03-25T12:12:55
| 2021-03-25T12:12:55
| 349,335,362
| 0
| 2
| null | 2021-03-25T12:12:56
| 2021-03-19T07:22:05
|
Python
|
UTF-8
|
Python
| false
| false
| 345
|
#!/home/moringa/Desktop/moringa-school-projects/core/Django/django-playlist/django1/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"stephenndele09346@gmail.com"
] |
stephenndele09346@gmail.com
|
|
d7833d20c9de724dea0ff27dce90bb80523ae797
|
a22cc323b29f50da397d8363ac2521e3542a0fd7
|
/tests/dpaycli/test_witness.py
|
a4b82ed9882df3b715a284b0fdf967a5516a4db1
|
[
"MIT"
] |
permissive
|
dpays/dpay-cli
|
1a58c7dae45218e3b05b7e17ff5ce03e918d27b9
|
dfa80898e1faea2cee92ebec6fe04873381bd40f
|
refs/heads/master
| 2020-04-01T09:26:43.200933
| 2018-10-15T08:03:06
| 2018-10-15T08:03:06
| 153,075,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,801
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import super
import unittest
from parameterized import parameterized
from pprint import pprint
from dpaycli import DPay
from dpaycli.witness import Witness, Witnesses, WitnessesVotedByAccount, WitnessesRankedByVote
from dpaycli.instance import set_shared_dpay_instance
from dpaycli.nodelist import NodeList
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
nodelist = NodeList()
nodelist.update_nodes(dpay_instance=DPay(node=nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
cls.bts = DPay(
node=nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
keys={"active": wif},
num_retries=10
)
cls.testnet = DPay(
# node="https://testnet.timcliff.com",
node=nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
keys={"active": wif},
num_retries=10
)
# from getpass import getpass
# self.bts.wallet.unlock(getpass())
set_shared_dpay_instance(cls.bts)
cls.bts.set_default_account("test")
@parameterized.expand([
("normal"),
("testnet"),
])
def test_feed_publish(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
bts.txbuffer.clear()
w = Witness("gtg", dpay_instance=bts)
tx = w.feed_publish("4 BBD", "1 BEX")
self.assertEqual(
(tx["operations"][0][0]),
"feed_publish"
)
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["publisher"])
@parameterized.expand([
("normal"),
("testnet"),
])
def test_update(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
bts.txbuffer.clear()
w = Witness("gtg", dpay_instance=bts)
props = {"account_creation_fee": "0.1 BEX",
"maximum_block_size": 32000,
"bbd_interest_rate": 0}
tx = w.update(wif, "", props)
self.assertEqual((tx["operations"][0][0]), "witness_update")
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["owner"])
@parameterized.expand([
("normal"),
("testnet"),
])
def test_witnesses(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = Witnesses(dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_WitnessesVotedByAccount(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = WitnessesVotedByAccount("gtg", dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_WitnessesRankedByVote(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = WitnessesRankedByVote(dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_export(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
owner = "gtg"
if bts.rpc.get_use_appbase():
witness = bts.rpc.find_witnesses({'owners': [owner]}, api="database")['witnesses']
if len(witness) > 0:
witness = witness[0]
else:
witness = bts.rpc.get_witness_by_account(owner)
w = Witness(owner, dpay_instance=bts)
keys = list(witness.keys())
json_witness = w.json()
exclude_list = ['votes', 'virtual_last_update', 'virtual_scheduled_time']
for k in keys:
if k not in exclude_list:
if isinstance(witness[k], dict) and isinstance(json_witness[k], list):
self.assertEqual(list(witness[k].values()), json_witness[k])
else:
self.assertEqual(witness[k], json_witness[k])
|
[
"jaredricelegal@gmail.com"
] |
jaredricelegal@gmail.com
|
baaf7396d7d64ca02b696064862bf5652b225a14
|
568ed7fdc9ccbd7967dd2950669c68002b454869
|
/yotta/test/cli/test.py
|
ccec43116468a2790ebad484c3f8dcd52ce643de
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
headlessme/yotta
|
ade06c41108dca045e295bd2e0fdb2b7baef8c89
|
947ab074b629c8f18ca91ab84ebaa29096b011c6
|
refs/heads/master
| 2021-01-17T11:10:07.569198
| 2015-12-08T11:45:12
| 2015-12-08T11:45:12
| 27,595,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,592
|
py
|
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import copy
# internal modules:
from yotta.lib.detect import systemDefaultTarget
from . import cli
from . import util
Test_Tests = {
'module.json':'''{
"name": "test-tests",
"version": "0.0.0",
"description": "Test yotta's compilation of tests.",
"author": "James Crosby <james.crosby@arm.com>",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
]
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
'test-tests/foo.h':'int foo();',
'test/a/bar.c':'#include "test-tests/foo.h"\nint main(){ foo(); return 0; }',
'test/b/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/b/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/c/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/c/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/d/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/d/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/e/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/e/b/a/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/f/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/f/a/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/g/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/g/a/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }'
}
Test_Fitler_Pass = copy.copy(Test_Tests)
Test_Fitler_Pass['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"grep",
"!"
]
}
}'''
Test_Fitler_Fail = copy.copy(Test_Tests)
Test_Fitler_Fail['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"grep",
"string that isnt in the output"
]
}
}'''
Test_Fitler_NotFound = copy.copy(Test_Tests)
Test_Fitler_NotFound['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"commandthatshouldntexist"
]
}
}'''
class TestCLITest(unittest.TestCase):
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_tests(self):
test_dir = util.writeTestFiles(Test_Tests, True)
output = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
output = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
self.assertIn('test-a passed', output)
self.assertIn('test-c passed', output)
self.assertIn('test-d passed', output)
self.assertIn('test-e passed', output)
self.assertIn('test-f passed', output)
self.assertIn('test-g passed', output)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterPassing(self):
test_dir = util.writeTestFiles(Test_Fitler_Pass, True)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterFailing(self):
test_dir = util.writeTestFiles(Test_Fitler_Fail, True)
stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir)
if statuscode == 0:
print(stdout)
print(stderr)
self.assertIn('test-a failed', '%s %s' % (stdout, stderr))
self.assertIn('test-c failed', '%s %s' % (stdout, stderr))
self.assertIn('test-d failed', '%s %s' % (stdout, stderr))
self.assertIn('test-e failed', '%s %s' % (stdout, stderr))
self.assertIn('test-f failed', '%s %s' % (stdout, stderr))
self.assertIn('test-g failed', '%s %s' % (stdout, stderr))
self.assertNotEqual(statuscode, 0)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterNotFound(self):
test_dir = util.writeTestFiles(Test_Fitler_NotFound, True)
stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir)
if statuscode == 0:
print(stdout)
print(stderr)
self.assertNotEqual(statuscode, 0)
util.rmRf(test_dir)
def runCheckCommand(self, args, test_dir):
stdout, stderr, statuscode = cli.run(args, cwd=test_dir)
if statuscode != 0:
print('command failed with status %s' % statuscode)
print(stdout)
print(stderr)
self.assertEqual(statuscode, 0)
return '%s %s' % (stdout, stderr)
|
[
"James.Crosby@arm.com"
] |
James.Crosby@arm.com
|
f1516933ea445803defec8a1fa0c6335c45eb5e6
|
491d3ad04c852d2efe3e49842ccfcd20e40eab96
|
/mysite/blog/admin.py
|
6f0dd47e26f5ddf14bfd772d3edc6b2cfbd7becd
|
[] |
no_license
|
marianwitkowski/python-24082019
|
746c9824c15c2072caceeac8a9b610d79c63f0f6
|
df849d09aa7d9b7a08d8276a9c2b557d9f9d7ce7
|
refs/heads/master
| 2020-07-07T03:55:55.398961
| 2019-09-29T16:12:40
| 2019-09-29T16:12:40
| 203,239,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
from django.contrib import admin
from .models import Post
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'status','created_on')
list_filter = ("status",)
search_fields = ['title', 'content']
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Post)
|
[
"marian.witkowski@gmail.com"
] |
marian.witkowski@gmail.com
|
9f70e24acb6247d89104f02908ac2638143ee173
|
ca4910e944cca453050299cb6c8e856c06a76fb0
|
/blog/settings.py
|
1175ab8bc4cd442f1245d312eacf024ca32835cc
|
[] |
no_license
|
SonerArslan2019/djangoRESTAPI
|
f8e33cd8570f86f14810ef8fabea918503e0fc90
|
5398b578524fc5c6eb3b7ed51db68bc4f3687221
|
refs/heads/master
| 2023-04-18T01:45:45.093105
| 2021-04-24T09:50:22
| 2021-04-24T09:50:22
| 360,634,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,603
|
py
|
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-$eiq=w_$+n^n#iy6c45zc0hsni!wjycxipc!4yrx+zq+!(js43'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework_simplejwt.authentication.JWTAuthentication',
# 'rest_framework.authentication.SessionAuthentication'
# ],
# 'DEFAULT_THROTTLE_CLASSES': (
# 'rest_framework.throttling.ScopedRateThrottle',
# ),
# 'DEFAULT_THROTTLE_RATES': {
# 'registerthrottle': '15/hour',
# # 'hasan' : '5/hour'
# }
# }
# SIMPLE_JWT = {
# 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=15)
# }
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'post',
'comment',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"soner@arslanyapi.com.tr"
] |
soner@arslanyapi.com.tr
|
f6a760119a4c4b2c583957abb4a7066cbb64a2eb
|
dc67e70a303f265ee6cb4c1a2d61fe811053fb3d
|
/beginner/066/A.py
|
cabb38041ad8e0ea035492830c9cef953fb894b2
|
[] |
no_license
|
cry999/AtCoder
|
d39ce22d49dfce805cb7bab9d1ff0dd21825823a
|
879d0e43e3fac0aadc4d772dc57374ae72571fe6
|
refs/heads/master
| 2020-04-23T13:55:00.018156
| 2019-12-11T05:23:03
| 2019-12-11T05:23:03
| 171,214,066
| 0
| 0
| null | 2019-05-13T15:17:02
| 2019-02-18T04:24:01
|
Python
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
def ringring(a: int, b: int, c: int)->int:
return sum(sorted([a, b, c])[:2])
if __name__ == "__main__":
a, b, c = map(int, input().split())
ans = ringring(a, b, c)
print(ans)
|
[
"when.the.cry999@gmail.com"
] |
when.the.cry999@gmail.com
|
72d7de871b2fb085d76442aa9a24ad3405cd961b
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/centerface/preprocess.py
|
a985c6b64428994c27265c5fcd6ff413bee6b92a
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pre process for 310 inference"""
import os
import shutil
import cv2
import numpy as np
from src.model_utils.config import config
from dependency.centernet.src.lib.detectors.base_detector import CenterFaceDetector
def preprocess(dataset_path, preprocess_path):
event_list = os.listdir(dataset_path)
input_path = os.path.join(preprocess_path, "input")
meta_path = os.path.join(preprocess_path, "meta/meta")
if not os.path.exists(input_path):
os.makedirs(os.path.join(preprocess_path, "input"))
if not os.path.exists(meta_path):
os.makedirs(os.path.join(preprocess_path, "meta/meta"))
detector = CenterFaceDetector(config, None)
name_list = []
meta_list = []
i = 0
for _, event in enumerate(event_list):
file_list_item = os.listdir(os.path.join(dataset_path, event))
im_dir = event
for _, file in enumerate(file_list_item):
im_name = file.split('.')[0]
zip_name = '%s/%s' % (im_dir, file)
img_path = os.path.join(dataset_path, zip_name)
image = cv2.imread(img_path)
for scale in config.test_scales:
_, meta = detector.pre_process(image, scale)
img_file_path = os.path.join(input_path, file)
shutil.copyfile(img_path, img_file_path)
meta_file_path = os.path.join(preprocess_path + "/meta/meta", im_name + ".txt")
with open(meta_file_path, 'w+') as f:
f.write(str(meta))
name_list.append(im_name)
meta_list.append(meta)
i += 1
print(f"preprocess: no.[{i}], img_name:{im_name}")
np.save(os.path.join(preprocess_path + "/meta", "name_list.npy"), np.array(name_list))
np.save(os.path.join(preprocess_path + "/meta", "meta_list.npy"), np.array(meta_list))
if __name__ == '__main__':
preprocess(config.dataset_path, config.preprocess_path)
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
95869793a95931568444941801533d4d5e6cb5eb
|
d6be2453d1c4428a4b9d9f78ea80e7e1a39f0f5b
|
/src/utils.py
|
20225ec0e46d35e08388cbfdfc634ce8c9a2e343
|
[] |
no_license
|
bcrestel/sls
|
8f6a6356264747285fb193b2ebfa1c2914aa0fe3
|
f0392135e5c4072e3341998651091c8455a882fb
|
refs/heads/master
| 2020-12-15T16:51:03.663284
| 2020-10-06T14:22:58
| 2020-10-06T14:22:58
| 235,185,248
| 0
| 0
| null | 2020-01-20T19:47:07
| 2020-01-20T19:47:06
| null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
import hashlib
import pickle
import json
import os
import itertools
import torch
import numpy as np
def hash_dict(dictionary):
"""Create a hash for a dictionary."""
dict2hash = ""
for k in sorted(dictionary.keys()):
if isinstance(dictionary[k], dict):
v = hash_dict(dictionary[k])
else:
v = dictionary[k]
dict2hash += "%s_%s_" % (str(k), str(v))
return hashlib.md5(dict2hash.encode()).hexdigest()
def save_pkl(fname, data):
"""Save data in pkl format."""
# Save file
fname_tmp = fname + "_tmp.pkl"
with open(fname_tmp, "wb") as f:
pickle.dump(data, f)
os.rename(fname_tmp, fname)
def load_pkl(fname):
"""Load the content of a pkl file."""
with open(fname, "rb") as f:
return pickle.load(f)
def load_json(fname, decode=None):
with open(fname, "r") as json_file:
d = json.load(json_file)
return d
def save_json(fname, data):
with open(fname, "w") as json_file:
json.dump(data, json_file, indent=4, sort_keys=True)
def torch_save(fname, obj):
""""Save data in torch format."""
# Define names of temporal files
fname_tmp = fname + ".tmp"
torch.save(obj, fname_tmp)
os.rename(fname_tmp, fname)
def read_text(fname):
# READS LINES
with open(fname, "r", encoding="utf-8") as f:
lines = f.readlines()
# lines = [line.decode('utf-8').strip() for line in f.readlines()]
return lines
|
[
"issam.laradji@gmail.com"
] |
issam.laradji@gmail.com
|
91489aef1cfcb6675882a5ed78249f485727af5a
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/access_package_request_state.py
|
8d2207eae29267f561c8c719c8dc968d0f903cb0
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
from enum import Enum
class AccessPackageRequestState(str, Enum):
Submitted = "submitted",
PendingApproval = "pendingApproval",
Delivering = "delivering",
Delivered = "delivered",
DeliveryFailed = "deliveryFailed",
Denied = "denied",
Scheduled = "scheduled",
Canceled = "canceled",
PartiallyDelivered = "partiallyDelivered",
UnknownFutureValue = "unknownFutureValue",
|
[
"GraphTooling@service.microsoft.com"
] |
GraphTooling@service.microsoft.com
|
a31faa28ea7fa887dcbc8ad53795258aa189f931
|
498e792e16ab1a74ac034c53177c4cccbeef2749
|
/classification/resnet/train.py
|
662ceca52750777835c1b05e25f7eaacf8d247aa
|
[] |
no_license
|
ydwisroad/imageprocessingpytorch
|
f97bec4469c087f6bbbca5d42da180c95be8b13f
|
bd8d1af228619c9c6c9c1a2b880422f7d5048dd5
|
refs/heads/master
| 2023-07-29T05:05:11.145832
| 2022-02-21T23:32:03
| 2022-02-21T23:32:03
| 284,976,501
| 7
| 3
| null | 2023-07-24T01:08:22
| 2020-08-04T12:43:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
import torch
import torch.nn as nn
from torchvision import transforms, datasets
import json
import matplotlib.pyplot as plt
import os
import torch.optim as optim
from model import resnet34, resnet101
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
"val": transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}
data_root = os.path.abspath(os.path.join(os.getcwd(), "../../data")) # get data root path
image_path = data_root + "/flower_photos_simple/" # flower data set path
train_dataset = datasets.ImageFolder(root=image_path+"train",
transform=data_transform["train"])
train_num = len(train_dataset)
# {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list = train_dataset.class_to_idx
cla_dict = dict((val, key) for key, val in flower_list.items())
# write dict into json file
json_str = json.dumps(cla_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
batch_size = 4
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=True,
num_workers=0)
validate_dataset = datasets.ImageFolder(root=image_path + "val",
transform=data_transform["val"])
val_num = len(validate_dataset)
validate_loader = torch.utils.data.DataLoader(validate_dataset,
batch_size=batch_size, shuffle=False,
num_workers=0)
net = resnet34()
# load pretrain weights
#model_weight_path = "./resnet34-pre.pth"
#missing_keys, unexpected_keys = net.load_state_dict(torch.load(model_weight_path), strict=False)
# for param in net.parameters():
# param.requires_grad = False
# change fc layer structure
inchannel = net.fc.in_features
net.fc = nn.Linear(inchannel, 5)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
best_acc = 0.0
save_path = './resNet34.pth'
for epoch in range(10):
# train
net.train()
running_loss = 0.0
for step, data in enumerate(train_loader, start=0):
images, labels = data
optimizer.zero_grad()
logits = net(images.to(device))
loss = loss_function(logits, labels.to(device))
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# print train process
rate = (step+1)/len(train_loader)
a = "*" * int(rate * 50)
b = "." * int((1 - rate) * 50)
print("\rtrain loss: {:^3.0f}%[{}->{}]{:.4f}".format(int(rate*100), a, b, loss), end="")
print()
# validate
net.eval()
acc = 0.0 # accumulate accurate number / epoch
with torch.no_grad():
for val_data in validate_loader:
val_images, val_labels = val_data
outputs = net(val_images.to(device)) # eval model only have last output layer
# loss = loss_function(outputs, test_labels)
predict_y = torch.max(outputs, dim=1)[1]
acc += (predict_y == val_labels.to(device)).sum().item()
val_accurate = acc / val_num
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(net.state_dict(), save_path)
print('[epoch %d] train_loss: %.3f test_accuracy: %.3f' %
(epoch + 1, running_loss / step, val_accurate))
print('Finished Training')
|
[
"wandf12345@163.com"
] |
wandf12345@163.com
|
5bf7470e827eea42e7c8955e6c2fb564dbc45de9
|
f453f183834e3bf587a120023615ed2ddd38c157
|
/tsa/lib/encoders.py
|
969cdf1f6c1712d900097659bf0862df709f2d35
|
[
"MIT"
] |
permissive
|
chbrown/topic-sentiment-authorship
|
72c21638eb72888c370cd3b1b5f06504df09ce2e
|
e8cacf11b06583d9ed85ff790e1d5322e59f2fd6
|
refs/heads/master
| 2022-07-05T22:58:24.456139
| 2020-03-29T16:12:21
| 2020-03-29T16:12:21
| 13,025,589
| 0
| 0
|
MIT
| 2020-03-29T16:13:35
| 2013-09-23T02:53:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
import json
from datetime import datetime
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, '__json__'):
return obj.__json__()
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
# return super(JSONEncoder, self).default(obj)
return obj
# encoder = JSONEncoder()
# def json(obj):
# return encoder.encode(obj)
# c'mon, just DIY
def csv(obj):
return ','.join(map(str, obj))
|
[
"io@henrian.com"
] |
io@henrian.com
|
977cd1f34ed3ff2b174cb7a5bb2ad1829606c277
|
fbff973537eae45b724b23e9b6fc8692da959b21
|
/app/core/config.py
|
979658548ef83b1914a5730ab318dedd6ab5b824
|
[
"MIT"
] |
permissive
|
lsetiawan/cava-metadata
|
d4a8878480cd9da4bfa163b9d9c42d705a0fb263
|
e45c469a4b5cbdebfba74ab0031fb94eb59fd724
|
refs/heads/main
| 2023-04-08T02:28:24.402853
| 2021-01-27T20:02:23
| 2021-01-27T20:02:23
| 358,033,596
| 0
| 0
|
MIT
| 2021-04-14T20:26:35
| 2021-04-14T20:26:35
| null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
import os
import fsspec
# API SETTINGS
SERVICE_NAME = "Metadata Service"
SERVICE_ID = "metadata"
OPENAPI_URL = f"/{SERVICE_ID}/openapi.json"
DOCS_URL = f"/{SERVICE_ID}/"
SERVICE_DESCRIPTION = """Metadata service for Interactive Oceans."""
CORS_ORIGINS = [
"http://localhost",
"http://localhost:8000",
"http://localhost:5000",
"http://localhost:4000",
"https://appdev.ooica.net",
"https://app-dev.ooica.net",
"https://app.interactiveoceans.washington.edu",
"https://api-dev.ooica.net",
"https://api.interactiveoceans.washington.edu",
]
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# API VERSION
CURRENT_API_VERSION = 2.0
# Redis configurations
REDIS_HOST = os.environ.get("REDIS_HOST", "localhost")
REDIS_PORT = os.environ.get("REDIS_PORT", 6379)
# OOI Configurations
BASE_URL = "https://ooinet.oceanobservatories.org"
M2M_URL = "api/m2m"
USERNAME = os.environ.get("OOI_USERNAME", "")
TOKEN = os.environ.get("OOI_TOKEN", "")
# File Systems Configurations
FILE_SYSTEMS = {
"minio_s3": fsspec.filesystem(
"s3", client_kwargs={"endpoint_url": "http://minio:9000"}
),
"aws_s3": fsspec.filesystem(
"s3",
skip_instance_cache=True,
use_listings_cache=False,
config_kwargs={"max_pool_connections": 1000},
),
}
GOOGLE_SERVICE_JSON = os.environ.get("GOOGLE_SERVICE_JSON", "",)
DATA_BUCKET = 'ooi-data'
|
[
"landungs@uw.edu"
] |
landungs@uw.edu
|
f9b589aa7e5cb26eda1a3b56bc67249768ee6093
|
4b819b9c7aee9d60689f487557e437445101188d
|
/lanuch/accounts/views.py
|
e04d7ebbd2e15bedabf699d153c0170baa54e03b
|
[] |
no_license
|
Damidara16/dev
|
c2fe90fb70d4644bdee964ce9b7b85bf9f71c99a
|
f3c8666bc32b19ffb623b83019fdbf404433ece8
|
refs/heads/master
| 2020-03-10T20:14:11.173397
| 2018-04-15T00:56:56
| 2018-04-15T00:56:56
| 129,565,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,485
|
py
|
from django.shortcuts import render, redirect
from .forms import RegistrationForm, EditProfileForm, AddInfo
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from .models import Author
from blog.models import waste
from django.contrib.auth.models import User
def ViewProfile(request, author_pk):
if author_pk == request.user.id:
if request.user.is_authenicated():
user = User.objects.get(user=request.user)
#print('suceess')
return render(request, 'accounts/profile.html', {'user':user})
else:
user = User.objects.get(pk=author_pk)
#user.author_set.views += 1
#user.views += 1
#user.save()
return render(request, 'accounts/profile.html', {'user':user})
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('/home')
#return render(request, 'blog/home.html', context)
else: return redirect('/accounts/register')
else:
form = RegistrationForm()
title = 'Change Your Password'
btnName = 'Register'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
'''
def jregister(request):
if request.method =='POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('accounts:home'))
else:
form = RegistrationForm()
args = {'form': form}
return render(request, 'accounts/reg_form.html', args)
'''
def EditProfile(request):
if request.Method == 'POST':
form = EditProfileForm(request.Post, instance=request.User)
if form.is_valid():
form.save()
return re
else:
form = EditProfileForm(instance=request.user)
title = 'Edit Your Profile'
btnName = 'Done editing'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
def AddInfo(request):
if request.Method == 'POST' and request.user.is_authenicated():
form = AddInfo(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance = form.cleaned_data['description']
instance = form.cleaned_data['link']
form.save()
return redirect('/home/')
else:
return redirect('/accounts/add')
else:
form = RegistrationForm
title = 'Tell Us More'
btnName = 'Finish'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
def Changepassword(request):
if request.Method == 'POST':
form = PasswordChangeForm(data=request.Post, user=request.User)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return redirect('/accounts/profile')
else:
return redirect('/accounts/Changepassword')
else:
form = PasswordChangeForm(instance=request.user)
title = 'Change Your Password'
btnName = 'Change Password'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
|
[
"sajala8624@gmail.com"
] |
sajala8624@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.