blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c58d676834976a2ca45ac492bc666c494703314 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/cloud/subscription.py | 9a62f2d115c1f99c497f55c6ad79d419a387606b | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 1,520 | py | """Subscription information."""
from __future__ import annotations
import asyncio
import logging
from typing import Any
from aiohttp.client_exceptions import ClientError
from hass_nabucasa import Cloud, cloud_api
from .client import CloudClient
from .const import REQUEST_TIMEOUT
_LOGGER = logging.getLogger(__name__)
async def async_subscription_info(cloud: Cloud[CloudClient]) -> dict[str, Any] | None:
"""Fetch the subscription info."""
try:
async with asyncio.timeout(REQUEST_TIMEOUT):
return await cloud_api.async_subscription_info(cloud)
except asyncio.TimeoutError:
_LOGGER.error(
(
"A timeout of %s was reached while trying to fetch subscription"
" information"
),
REQUEST_TIMEOUT,
)
except ClientError:
_LOGGER.error("Failed to fetch subscription information")
return None
async def async_migrate_paypal_agreement(
cloud: Cloud[CloudClient],
) -> dict[str, Any] | None:
"""Migrate a paypal agreement from legacy."""
try:
async with asyncio.timeout(REQUEST_TIMEOUT):
return await cloud_api.async_migrate_paypal_agreement(cloud)
except asyncio.TimeoutError:
_LOGGER.error(
"A timeout of %s was reached while trying to start agreement migration",
REQUEST_TIMEOUT,
)
except ClientError as exception:
_LOGGER.error("Failed to start agreement migration - %s", exception)
return None
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
c9945ea2257dffedc8c00d4d6dec9d3d44ac2bc3 | bb4a4504a7051484173c8e9933b06fdf1384c2f7 | /src/simulator/controllers/keyboard_controller.py | c27da2bf63efeafabf08709717b1f577bf21efb9 | [] | no_license | uncobruce/PathBench2D | dd4794a5b19ee95ad7555d512d0e36063e2c6330 | 57c6397fe990de3088aa99da1602f9872b90d0b8 | refs/heads/master | 2023-01-09T17:16:35.200009 | 2020-08-05T20:45:28 | 2020-08-05T20:45:28 | 274,780,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | import pygame
from simulator.controllers.controller import Controller
from simulator.models.model import Model
from simulator.services.debug import DebugLevel
from simulator.services.event_manager.events.keyboard_event import KeyboardEvent
from simulator.services.event_manager.events.mouse_event import MouseEvent
from simulator.services.event_manager.events.quit_event import QuitEvent
from simulator.services.services import Services
class KeyboardController(Controller):
"""
Handles keyboard input
"""
def __init__(self, services: Services, model: Model) -> None:
super().__init__(services, model)
self._services.ev_manager.register_tick_listener(self)
def tick(self) -> None:
if self._services.render_engine.is_display_init() == 0:
return
# Called for each game tick. We check our keyboard presses here.
for event in self._services.render_engine.get_events():
# handle window manager closing our window
if event.type == pygame.QUIT:
self._services.ev_manager.post(QuitEvent())
# handle key down events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self._services.ev_manager.post(QuitEvent())
else:
# post any other keys to the message queue for everyone else to see
ev = KeyboardEvent(event.key)
self._services.ev_manager.post(ev)
self._services.debug.write(ev, DebugLevel.MEDIUM)
if event.type == pygame.MOUSEMOTION or \
event.type == pygame.MOUSEBUTTONDOWN or \
event.type == pygame.MOUSEBUTTONUP:
self._services.ev_manager.post(MouseEvent(event))
| [
"daniel.lenton11@imperial.ac.uk"
] | daniel.lenton11@imperial.ac.uk |
72cc761d18a77df7d2cf09c6fa7002298b049768 | 534570bbb873293bd2646a1567b63d162fbba13c | /Python/236.lowest-common-ancestor-of-a-binary-tree.py | 250e65cbb6bfc472fa756c8203cf2e14635ad403 | [] | no_license | XinheLIU/Coding-Interview | fa3df0f7167fb1bc6c8831748249ebaa6f164552 | d6034c567cef252cfafca697aa316c7ad4e7d128 | refs/heads/master | 2022-09-17T14:30:54.371370 | 2022-08-19T15:53:35 | 2022-08-19T15:53:35 | 146,382,499 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #
# @lc app=leetcode id=236 lang=python3
#
# [236] Lowest Common Ancestor of a Binary Tree
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root or root is p or root is q:
return root
l, r = self.lowestCommonAncestor(root.left, p, q), self.lowestCommonAncestor(root.right, p, q)
if l and r:
return root
elif l:
return l
else:
return r
# @lc code=end
| [
"LIUXinhe@outlook.com"
] | LIUXinhe@outlook.com |
01d21fb423c5586a3bd3e9bcb8073f54c29bc389 | 59c55725576bbf0e2f6617507ba2f1db639abb3f | /project_time_sequence/project_time_sequence.py | c5f8461cb428c8e5e03210f485c31d8270f2ee7f | [] | no_license | bmya/eficent-odoo-addons | e3426ebaf1f59e52726253fc1dd36a09d9363059 | 5d8ddfa384ab4417f42bda103b71d926848035f6 | refs/heads/7.0 | 2021-01-21T16:48:55.312452 | 2015-11-04T14:11:19 | 2015-11-04T14:11:19 | 45,649,141 | 1 | 3 | null | 2015-11-06T00:35:17 | 2015-11-06T00:35:17 | null | UTF-8 | Python | false | false | 4,604 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import time
from datetime import datetime, date, timedelta
import decimal_precision as dp
from openerp.tools.translate import _
from openerp.osv import fields, osv
import netsvc
import tools
class task(osv.osv):
_inherit = 'project.task'
def get_related_tasks(self, cr, uid, ids, context=None):
result = {}
data = []
read_data = []
tasks_br = self.browse(cr, uid, ids, context=None)
for t in tasks_br:
result[t.id] = True
for child in t.successor_ids:
result[child.id]=True
return result
def _predecessor_ids_calc(self, cr, uid, ids, prop, unknow_none, unknow_dict):
if not ids:
return []
res = []
data =[]
tasks_br = self.browse(cr, uid, ids, context=None)
for t in tasks_br:
data =[]
str_data = ""
if t.predecessor_ids:
for parent in t.predecessor_ids:
data.insert(0, str(parent.id))
else:
data.insert(0,'')
data.sort(cmp=None, key=None, reverse=False)
str_data = ', '.join(data)
res.append((t.id, str_data))
return dict(res)
def _predecessor_names_calc(self, cr, uid, ids, prop, unknow_none, unknow_dict):
if not ids:
return []
res = []
data =[]
tasks_br = self.browse(cr, uid, ids, context=None)
for t in tasks_br:
data =[]
str_data = ""
if t.predecessor_ids:
for parent in t.predecessor_ids:
data.insert(0, tools.ustr(parent.name))
else:
data.insert(0,'')
data.sort(cmp=None, key=None, reverse=False)
str_data = ', '.join(data)
res.append((t.id, str_data))
return dict(res)
_columns = {
'predecessor_ids': fields.many2many('project.task', 'project_task_predecessor_rel', 'task_id', 'parent_id', 'Predecessor Tasks'),
'successor_ids': fields.many2many('project.task', 'project_task_predecessor_rel', 'parent_id', 'task_id', 'Successor Tasks'),
'predecessor_ids_str': fields.function(_predecessor_ids_calc, method=True, type='char', string='Predecessor tasks', size=20, help='Predecessor tasks ids',
),
'predecessor_names_str': fields.function(_predecessor_names_calc, method=True, type='char', string='Predecessor tasks', size=512, help='Predecessor tasks ids',
),
}
def do_link_predecessors(self, cr, uid, task_id, link_predecessors_data, context=None):
task_br = self.browse(cr, uid, task_id, context=context)
self.write(cr, uid, [task_br.id], {
'predecessor_ids': [(6, 0, link_predecessors_data['predecessor_ids'])],
})
return True
task()
| [
"jordi.ballester@eficent.com"
] | jordi.ballester@eficent.com |
127a2f8601a9feda61e6ceb4404d4bf7bc2cd78d | 05c95c0fad58a65a8a73595d2f2fd13c5f78e2fe | /gym/gym/settings.py | 59ba6daedd854fc36183ce6344fca5f935207798 | [] | no_license | Kanchan528/gym-website | 572469445a23eda626aaea5c0629112468ee80d0 | 9e3a470115c6c44a8318af451f4ee0bc24c24330 | refs/heads/master | 2022-05-27T07:59:03.002294 | 2020-04-10T08:55:00 | 2020-04-10T08:55:00 | 254,588,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,232 | py | """
Django settings for gym project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-r)%7-q63j^)ylo9vktenq8qz1a-j=*@pi(zs0c9q9jjm-(s)f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gym.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gym.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"kanchankharel528@gmail.com"
] | kanchankharel528@gmail.com |
e1fc9196ca7dcab3aa96d0b7c34cb560ea74883c | dc6a37efdacf41babc0049d8ed8fb724fde4ca4b | /webcrawl/naverkospi.py | 850837dbd9dffc89d48278b8871bc5ec0d0552dd | [] | no_license | imjwpak/tensor_Test | 345ea238daa520acd62a7bc1af561c0d5ea286fa | a359ba4700251cfab3b031b3ade36cc5fc643207 | refs/heads/master | 2020-07-02T14:36:55.514042 | 2019-08-10T09:12:53 | 2019-08-10T09:12:53 | 201,559,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from bs4 import BeautifulSoup
import urllib.request as url
class NaverKospiCrawler:
def __init__(self, param):
self.param = param
def scrap(self):
html = url.urlopen(self.param).read()
soup = BeautifulSoup(html, 'html.parser')
txt = soup.find(id = 'KOSPI_now').text
print('코스피 : ' + txt)
| [
"imjwpak83@naver.com"
] | imjwpak83@naver.com |
c2a0142532c21e7d40a2b1033968ff79402e01eb | b266de2df8a6050173b2f97db8d7167e92258837 | /Blender/src/babylon_js/materials/nodes/principled.py | 7965b3bfefbb6fcd4200b27ad9e482e044d5b573 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | cloud173/Exporters | 6298dbdd0a0de25b261d3db43981d960e0078721 | ced3cbdb3e94f0e1aee3af65349264f5252e0cea | refs/heads/master | 2020-06-02T10:23:15.061684 | 2019-06-07T17:23:58 | 2019-06-07T17:23:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | from .abstract import *
from mathutils import Color
#===============================================================================
class PrincipledBJSNode(AbstractBJSNode):
bpyType = 'ShaderNodeBsdfPrincipled'
def __init__(self, bpyNode, socketName):
super().__init__(bpyNode, socketName)
input = self.findInput('Base Color')
defaultDiffuse = self.findTexture(input, DIFFUSE_TEX)
if defaultDiffuse is not None:
self.diffuseColor = Color((defaultDiffuse[0], defaultDiffuse[1], defaultDiffuse[2]))
self.diffuseAlpha = defaultDiffuse[3]
self.mustBakeDiffuse = input.mustBake if isinstance(input, AbstractBJSNode) else False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('Metallic')
defaultMetallic = self.findTexture(input, METAL_TEX)
if defaultMetallic is not None:
self.metallic = defaultMetallic
self.mustBakeMetal = input.mustBake if isinstance(input, AbstractBJSNode) else False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('Specular')
defaultSpecular = self.findTexture(input, SPECULAR_TEX)
if defaultSpecular is not None:
self.specularColor = Color((defaultSpecular, defaultSpecular, defaultSpecular))
self.mustBakeSpecular = input.mustBake if isinstance(input, AbstractBJSNode) else False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('Roughness')
defaultRoughness = self.findTexture(input, ROUGHNESS_TEX)
if defaultRoughness is not None:
self.roughness = defaultRoughness
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('IOR')
defaultIOR = self.findTexture(input, REFRACTION_TEX)
if defaultIOR is not None:
self.indexOfRefraction = defaultIOR
self.mustBakeRefraction = input.mustBake if isinstance(input, AbstractBJSNode) else False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('Normal')
self.findTexture(input, BUMP_TEX)
self.mustBakeNormal = input.mustBake if isinstance(input, AbstractBJSNode) else False
| [
"jeffrey.c.palmer@gmail.com"
] | jeffrey.c.palmer@gmail.com |
548c1d70cafd9449e416a68b4facefb8c5b1fd68 | be67e8736f8437d8ded442326da2b899c97cfad5 | /spider/__init__.py | 086be4c911b49658fa9b9bd4bbd7672eb168ce8e | [
"BSD-2-Clause"
] | permissive | BeiFenKu/PSpider | e4b76fc19fbd3fc23dddc10bfb84d57f3887c545 | 44d8ed3e006e0812621f45c35c1bb59557c84e2e | refs/heads/master | 2020-06-18T05:11:04.933335 | 2019-07-05T02:56:06 | 2019-07-05T06:31:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # _*_ coding: utf-8 _*_
"""
define WebSpider, and also define utilities and instances for web_spider
"""
__version__ = "2.3.0"
from .utilities import *
from .concurrent import TPEnum, WebSpider
from .instances import Fetcher, Parser, Saver, Proxieser
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
303bd68ad340b22c922e6ab0e2584613fe64719f | 195592971a36c6593372a77c9b593c9482195c38 | /rbac/context.py | 7e30600aaab1a4189a2fdfaa61618ee476a1ed88 | [
"MIT"
] | permissive | hxz2015/simple-rbac | 2a2c9b771296cbdc5d458c97ee85c40e1a717c21 | 5d975c5cd3faaaa5ba6bbe5e72b215fa66b718eb | refs/heads/master | 2020-12-25T08:59:36.881324 | 2012-05-22T06:13:06 | 2012-05-22T06:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,544 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import functools
__all__ = ["IdentityContext", "PermissionDenied"]
class PermissionContext(object):
"""A context of decorator to check the permission."""
def __init__(self, checker):
self.check = checker
self.in_context = False
def __call__(self, wrapped):
def wrapper(*args, **kwargs):
with self:
return wrapped(*args, **kwargs)
return functools.update_wrapper(wrapper, wrapped)
def __enter__(self):
self.in_context = True
self.check()
return self
def __exit__(self, exception_type, exception, traceback):
self.in_context = False
def __nonzero__(self):
try:
self.check()
except PermissionDenied:
return False
else:
return True
class IdentityContext(object):
"""A context of identity, providing the enviroment to control access."""
def __init__(self, acl, roles_loader=None):
self.acl = acl
self.set_roles_loader(roles_loader)
def set_roles_loader(self, role_loader):
"""Set a callable object (such as a function) which could return a
iteration to provide all roles of current context user.
Example:
>>> @context.set_roles_loader
... def load_roles():
... user = request.context.current_user
... for role in user.roles:
... yield role
"""
self.load_roles = role_loader
def check_permission(self, operation, resource, **exception_kwargs):
"""A decorator to check the permission.
The keyword arguments would be stored into the attribute `kwargs` of
the exception `PermissionDenied`.
"""
checker = functools.partial(self._docheck, operation=operation,
resource=resource, **exception_kwargs)
return PermissionContext(checker)
def _docheck(self, operation, resource, **exception_kwargs):
roles = self.load_roles()
if not self.acl.is_any_allowed(roles, operation, resource):
exception = exception_kwargs.pop("exception", PermissionDenied)
raise exception(**exception_kwargs)
return True
class PermissionDenied(Exception):
"""The exception for denied access request."""
def __init__(self, message="", **kwargs):
super(PermissionDenied, self).__init__(message)
self.kwargs = kwargs
self.kwargs['message'] = message
| [
"tonyseek@gmail.com"
] | tonyseek@gmail.com |
475fea2c52e68d17ae70c60ec9c7696f1541de5d | 51a2fb45db6a074c7bd5af32c8ee8471251436f4 | /第六章-pytest框架/pycharm执行pytest脚本04.py | aa07b70cda12e66c1cc6219137e7c0b1cd9a4b43 | [] | no_license | JiangHuYiXiao/Web-Autotest-Python | c5e2cf61a5a62d132df048d3218dfb973be8784e | 65b30360337b56b6ca4eba21f729c922f1665489 | refs/heads/master | 2021-08-26T07:46:42.957744 | 2021-08-12T02:24:11 | 2021-08-12T02:24:11 | 253,945,694 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2020/5/1 10:19
# @Software : Web-Autotest-Python
# @Python_verison : 3.7
# 之前执行python文件时候,我们在文件中直接右键,然后run
# 执行unittest脚本时候,我们是run unittest
# 但是执行pytest脚本时直接执行是不生效的,这个时候,需要我们设置一下编辑器pycharm的关于该文件的默认runner
# 配置路径:setting--->baidu_tools--->python integrated baidu_tools --->testing--->default tester runner
# 配置完成后需要在该文件的父目录下右键进行执行,在该文件上执行还是不行的
| [
"1163270704@qq.com"
] | 1163270704@qq.com |
fe4318402e9a3ece4700c76f73180ff008a72990 | b9efe70d12c2cbd55065d02e974f5725534583ee | /src/visualize.py | 8f0d478c6b67f04feb8db8746118f6f3d3f78fe5 | [] | no_license | diegoami/bankdomain_PY | 5089581ea7b7db6233243dff305488ff27dc8e90 | 83816e1beb96d3e9e0f746bec7f9db9521f32ee7 | refs/heads/master | 2022-12-17T05:05:13.557911 | 2020-06-03T22:19:44 | 2020-06-03T22:19:44 | 131,530,574 | 0 | 0 | null | 2022-12-08T01:30:27 | 2018-04-29T21:12:25 | HTML | UTF-8 | Python | false | false | 719 | py |
import yaml
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from query import QueryExecutor
if __name__ == '__main__':
config = yaml.safe_load(open('config.yml'))
models_dir = config['models_dir']
mongo_connection = config['mongo_connection']
query_executor = QueryExecutor(mongo_connection, models_dir)
doc2vec_similar(query_executor)
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# create a scatter plot of the projection
pyplot.scatter(result[:, 0], result[:, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))
pyplot.show() | [
"diego.amicabile@gmail.com"
] | diego.amicabile@gmail.com |
30dadc0842eb8cfffe8c22e53818c815aa56b7cd | 3ea104409b5ab5f1d1928af7d31b4a58b11d220a | /venv/Lib/site-packages/networkx/algorithms/approximation/steinertree.py | f999694d811236225ab2b782b57a53c9e3e2f377 | [
"Apache-2.0"
] | permissive | farhananwari07/flask-image-processing | 0103ab0600995a760e27ffc644ffb313de4eaade | a4a4ad717ffd074afbe31cbf8803060764034375 | refs/heads/main | 2023-09-02T01:21:27.328049 | 2021-11-10T07:58:17 | 2021-11-10T07:58:17 | 425,517,466 | 0 | 0 | Apache-2.0 | 2021-11-07T13:55:56 | 2021-11-07T13:55:56 | null | UTF-8 | Python | false | false | 3,376 | py | from itertools import chain
from networkx.utils import pairwise, not_implemented_for
import networkx as nx
__all__ = ["metric_closure", "steiner_tree"]
@not_implemented_for("directed")
def metric_closure(G, weight="weight"):
"""Return the metric closure of a graph.
The metric closure of a graph *G* is the complete graph in which each edge
is weighted by the shortest path distance between the nodes in *G* .
Parameters
----------
G : NetworkX graph
Returns
-------
NetworkX graph
Metric closure of the graph `G`.
"""
M = nx.Graph()
Gnodes = set(G)
# check for connected graph while processing first node
all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight)
u, (distance, path) = next(all_paths_iter)
if Gnodes - set(distance):
msg = "G is not a connected graph. metric_closure is not defined."
raise nx.NetworkXError(msg)
Gnodes.remove(u)
for v in Gnodes:
M.add_edge(u, v, distance=distance[v], path=path[v])
# first node done -- now process the rest
for u, (distance, path) in all_paths_iter:
Gnodes.remove(u)
for v in Gnodes:
M.add_edge(u, v, distance=distance[v], path=path[v])
return M
@not_implemented_for("directed")
def steiner_tree(G, terminal_nodes, weight="weight"):
"""Return an approximation to the minimum Steiner tree of a graph.
The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes`
is a tree within `G` that spans those nodes and has minimum size
(sum of edge weights) among all such trees.
The minimum Steiner tree can be approximated by computing the minimum
spanning tree of the subgraph of the metric closure of *G* induced by the
terminal nodes, where the metric closure of *G* is the complete graph in
which each edge is weighted by the shortest path distance between the
nodes in *G* .
This algorithm produces a tree whose weight is within a (2 - (2 / t))
factor of the weight of the optimal Steiner tree where *t* is number of
terminal nodes.
Parameters
----------
G : NetworkX graph
terminal_nodes : list
A list of terminal nodes for which minimum steiner tree is
to be found.
Returns
-------
NetworkX graph
Approximation to the minimum steiner tree of `G` induced by
`terminal_nodes` .
Notes
-----
For multigraphs, the edge between two nodes with minimum weight is the
edge put into the Steiner tree.
References
----------
.. [1] Steiner_tree_problem on Wikipedia.
https://en.wikipedia.org/wiki/Steiner_tree_problem
"""
# H is the subgraph induced by terminal_nodes in the metric closure M of G.
M = metric_closure(G, weight=weight)
H = M.subgraph(terminal_nodes)
# Use the 'distance' attribute of each edge provided by M.
mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True)
# Create an iterator over each edge in each shortest path; repeats are okay
edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges)
# For multigraph we should add the minimal weight edge keys
if G.is_multigraph():
edges = (
(u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges
)
T = G.edge_subgraph(edges)
return T
| [
"agoes.minarno@gmail.com"
] | agoes.minarno@gmail.com |
d00e03a5d6e2f5c023b3cfd468bcf23e8c80a838 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02409/s275514572.py | 76183adbf22944843da70b0b0f778f92ff20fd49 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | bilding = [[[0 for x in range(10)] for x in range(3)]for x in range(4)]
n = int(input())
for k in range(n):
b, f, r, v = map(int, raw_input().split())
bilding[b-1][f-1][r-1] += v
for b in range(4):
for f in range(3):
print(" "+" ".join(map(str, bilding[b][f])))
if b < 3:
print("#"*20) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4806bb126dbfe932a85b60a9b3914c0aea218210 | 38558ac2e78837e7f975364f03a1f55fb02103af | /BASIC TOOL PROGRAM/fibo1.py | 80c92885b451da1d16be6a515d0476e3e78a53d9 | [] | no_license | SOURADEEP-DONNY/WORKING-WITH-PYTHON | a0bc2ff5ddab1b25563927c8f361c6512683d6ff | 5198d14f0711a3ba7f2fe8bac61d6404c20ea40c | refs/heads/master | 2023-07-14T04:49:08.399519 | 2021-08-29T15:22:33 | 2021-08-29T15:22:33 | 270,723,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | def Fibonacci(n):
if n < 0:
print("Incorrect input")
elif n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return Fibonacci(n-1) + Fibonacci(n-2)
number=int(input())
for i in range(0,number+1,1):
print(Fibonacci(i))
| [
"noreply@github.com"
] | SOURADEEP-DONNY.noreply@github.com |
6a31fc78953cf06d9acabfb6b9b2db3def13b768 | 34b94033b5bbb43c5ffd1c7e9672e46ce735ebf7 | /.circleci/checklhe/lhefile.py | ff69dcc10b63de537987cd49e8d241f81ae7e819 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | JHUGen/JHUGen | c85329874bf3778954c4b8061a3098eea1a926ef | 2854cbfc3d82122fbfce22dcea2e83ca4312f7f3 | refs/heads/master | 2023-09-01T03:39:35.212476 | 2023-08-28T20:50:59 | 2023-08-28T20:50:59 | 38,982,074 | 4 | 18 | Apache-2.0 | 2023-08-29T14:21:26 | 2015-07-12T23:29:19 | Fortran | UTF-8 | Python | false | false | 6,111 | py | import collections
import ROOT
import config
import globalvariables
import event
import particle
class LHEFile:
def __init__(self, filename):
globalvariables.init()
if filename[-4:] != ".lhe":
raise ValueError(filename + " does not end in .lhe")
self.filename = filename
self.f = open(filename)
self.nevents = 0
self.n4e = 0
self.n4mu = 0
self.n2e2mu = 0
self.linenumber = 0
self.incomment = False
self.sawinitblock = False
self.processidlist = []
self.VegasNc2 = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
print " ", self.nevents, "events"
print " ", self.n4e, "4e events"
print " ", self.n4mu, "4mu events"
print " ", self.n2e2mu, "2e2mu events"
self.f.close()
if self.VegasNc2 is not None and self.nevents != self.VegasNc2:
self.raiseerror("VegasNc2={}, but {} events!".format(self.VegasNc2, self.nevents))
def raiseerror(self, msg):
if config.raiseerror:
raise IOError(msg)
else:
print msg
def readevent(self):
while "<event" not in self.nextline():
if not self.line: #eof
return None
if "</event>" in self.line:
self.raiseerror("Extra </event>! " + str(self.linenumber))
if "<init>" in self.line:
if self.sawinitblock:
self.raiseerror("Extra init block! " + str(self.linenumber))
self.sawinitblock = True
data = self.nextline().split()
try:
[int(data[i]) for i in (0, 1, 4, 5, 6, 7, 8, 9)]
[float(data[i]) for i in (2, 3)]
except (ValueError, IndexError):
self.raiseerror("Bad init line 1!")
nprocesses = int(data[9])
for p in range(nprocesses):
data = self.nextline().split()
if "</init>" in self.line:
self.raiseerror("Not enough lines in init block!")
break
try:
[float(data[i]) for i in (0, 1, 2)]
if float(data[0]) < 0:
self.raiseerror("Pythia doesn't like negative cross sections!")
int(data[3])
for i in range(3, len(data)):
self.processidlist.append(int(data[i]))
except (ValueError, IndexError):
self.raiseerror("Bad init line %i!" % (2+p))
while "</init>" not in self.nextline() and "<event>" not in self.line:
if self.line.split():
self.raiseerror("Extra line in init block!")
if "<event>" in self.line:
self.raiseerror("No </init>!")
break
if "<!--" in self.line:
if not self.line.strip().startswith("<!--"):
self.raiseerror("Warning: comment begins in the middle of a line\n"
"(ok in itself, but other problems may not be detected)! " + str(self.linenumber))
if self.incomment:
self.raiseerror("<!-- inside a comment! " + str(self.linenumber))
self.line = self.line.replace("<!--", "", 1)
if "<!--" in self.line:
self.raiseerror("Warning: multiple <!-- in one line\n"
"(ok in itself, but other problems may not be detected!" + str(self.linenumber))
self.incomment = True
if "-->" in self.line:
if not self.line.strip().endswith("-->"):
self.raiseerror("Warning: comment ends in the middle of a line\n"
"(ok in itself, but problems may not be detected)! " + str(self.linenumber))
if not self.incomment:
self.raiseerror("--> not preceded by <!--! " + str(self.linenumber))
self.line = self.line.replace("-->", "", 1)
if "-->" in self.line:
self.raiseerror("Warning: multiple --> in one line\n"
"(ok in itself, but other problems may not be detected!" + str(self.linenumber))
self.incomment = False
if "--" in self.line and self.incomment:
self.raiseerror("-- in a comment! " + str(self.linenumber))
if self.incomment and "VegasNc2=" in self.line and ("VBFoffsh_run=*" in self.line or not any("Process={}".format(_) in self.line for _ in (66,67,68,69))):
for argument in self.line.split():
if argument.startswith("VegasNc2="):
self.VegasNc2 = int(argument.split("=")[-1])
if not self.sawinitblock:
self.raiseerror("No <init>!")
ev = event.Event(self.linenumber, self.processidlist)
ev.setfirstline(self.nextline())
while "</event>" not in self.nextline():
if not self.line:
self.raiseerror("File ends in the middle of an event!")
return None
if "<event" in self.line:
self.raiseerror("Extra <event>! " + str(self.linenumber))
try:
ev.addparticle(self.line)
except particle.BadParticleLineError:
continue
ev.finished()
self.nevents += 1
if ev.is4e(): self.n4e += 1
if ev.is4mu(): self.n4mu += 1
if ev.is2e2mu(): self.n2e2mu += 1
return ev
def nextline(self):
self.linenumber += 1
self.line = self.f.readline()
return self.line
def __iter__(self):
return self
def next(self):
ev = self.readevent()
if ev is not None:
return ev
raise StopIteration
| [
"jroskes1@jhu.edu"
] | jroskes1@jhu.edu |
338fab704d4d753f954d7a50cb2cd98a24a2f00e | f40cc44ebfc337326577c91cd88d0c1dd845b098 | /LuminarPythonPrograms/LoopingProgram/printEven.py | 681b99cde15812529009265a2004a2f511ba6e06 | [] | no_license | Aswin2289/LuminarPython | 6e07d6f9bf6c8727b59f38f97f5779a33b2fab0d | ba633a276dd79bbf214cfceac2413c894eaa1875 | refs/heads/master | 2023-01-01T07:52:41.598110 | 2020-10-13T04:34:49 | 2020-10-13T04:34:49 | 290,109,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | #program to print up to rabge
i=int(input("Enter ur Lower Limit"))
num=int(input("Enter ur range"))
while(i<=num):
if (i % 2 == 0):
print(i)
i+=1 | [
"aswinabraham4@gmail.com"
] | aswinabraham4@gmail.com |
e12f42185cab421d33cb53913f513b98dac13e7f | 3f13885fdb0649374d866d24a43f86ccc6b4c782 | /apps/workflow/api/app.py | 9d58254a7068da6ba7bf6ec51644b3c0e2badd4c | [] | no_license | linkexf/oneops | 426b271c00c5b4b4c55d1d91bf42030dab29623a | 64a9c7fd949b6220234a276614ab6555dc8cc17c | refs/heads/master | 2020-12-10T04:45:55.681731 | 2019-11-28T09:02:30 | 2019-11-28T09:02:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,417 | py | # -*- coding: utf-8 -*-
import uuid
import requests
import simplejson as json
from pprint import pprint
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from common.mixins import JSONResponseMixin
from common.utils.zabbix_api import get_access_token, get_host_ids, get_monitor_item_ids, update_monitor_item
from cmdb.models.asset import Server
from cmdb.models.business import App, BizMgtDept
from cmdb.views.ip import get_ips_by_server_id
from workflow.models import CommonFlow, CommonFlowArg
from ssh.models.host_user import HostUserAsset
from job.tasks.ansible_api import AnsibleAPI
class AnsibleHostsGroupInitAPIView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
try:
flow_id = kwargs.get('flow_id')
cf = CommonFlow.objects.get(id=flow_id)
app_id = CommonFlowArg.objects.get(cf=cf, arg='app_id').value
app = App.objects.get(id=app_id)
dept = app.biz_mgt_dept
while True:
if dept.parent_id == 2:
dept_code = dept.dept_code
break
else:
dept = BizMgtDept.objects.get(id=dept.parent_id)
pre_host = [s.hostname for s in Server.objects.filter(pre_app=app, app_env='pre')]
beta_host = [s.hostname for s in Server.objects.filter(pre_app=app, app_env='beta')]
prod_host = [s.hostname for s in Server.objects.filter(pre_app=app, app_env='prod')]
result = '''[{0}-{1}-pre]\n{2}\n[{0}-{1}-beta]\n{3}\n[{0}-{1}-prod]\n{4}\n'''.format(
dept_code, app.app_code, '\n'.join(pre_host), '\n'.join(beta_host), '\n'.join(prod_host))
res = {'code': 0, 'result': result}
except Exception as e:
res = {'code': 1, 'errmsg': '执行出错:%s' % str(e)}
return self.render_json_response(res)
class OpsProjectCreateAPIView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
try:
flow_id = kwargs.get('flow_id')
cf = CommonFlow.objects.get(id=flow_id)
app_id = CommonFlowArg.objects.get(cf=cf, arg='app_id').value
app = App.objects.get(id=app_id)
dept = app.biz_mgt_dept
while True:
if dept.parent_id == 2:
dept_code = dept.dept_code
break
else:
dept = BizMgtDept.objects.get(id=dept.parent_id)
data = {
"app_code": "prod_" + app.app_code,
"app_type": app.app_type.upper(),
"comment": app.comment,
"p_script": "/jenkins/data/deploy_war.sh" if app.app_type == 'war' else "/jenkins/data/deploy_jar.sh",
"p_tomcat": '/data/{}-{}'.format(app.tomcat_port, app.app_code),
"p_war": app.app_code,
"p_prehost": '{0}-{1}-pre'.format(dept_code, app.app_code),
"p_host1": '{0}-{1}-beta'.format(dept_code, app.app_code),
"p_host2": '{0}-{1}-prod'.format(dept_code, app.app_code)
}
res = {'code': 0, 'result': data}
except Exception as e:
res = {'code': 1, 'errmsg': '执行出错:%s' % str(e)}
return self.render_json_response(res)
def post(self, request, *args, **kwargs):
try:
post_data = request.POST.copy().dict()
p_group = ','.join(request.POST.getlist('p_group', []))
print(p_group, post_data)
post_data['p_group'] = p_group
post_data['principal'] = "1"
post_data['p_user'] = "1"
headers = {"Content-Type": "application/json"}
data = {
"jsonrpc": "2.0",
"id": 1,
"method": "project.create2",
"params": post_data
}
pprint(data)
ret = requests.post("http://opsapi.yadoom.com/api", headers=headers, json=data)
res = json.loads(json.loads(ret.text)['result'])
pprint(res)
except Exception as e:
res = {'code': 1, 'errmsg': '执行出错:%s' % str(e)}
return self.render_json_response(res)
class OpsRoleListAPIView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
try:
headers = {"Content-Type": "application/json"}
data = {
"jsonrpc": "2.0",
"id": 1,
"method": "role.getlist2",
"params": {}
}
ret = requests.post("http://opsapi.yadoom.com/api", headers=headers, json=data)
res = json.loads(ret.text)
except Exception as e:
res = {'code': 1, 'errmsg': '执行出错:%s' % str(e)}
return self.render_json_response(res)
class AppOfflineCodeBackupAPIView(LoginRequiredMixin, JSONResponseMixin, View):
def post(self, request, *args, **kwargs):
try:
app_id = kwargs.get('app_id')
app = App.objects.get(id=app_id)
for host in app.app_server.all():
ips = get_ips_by_server_id(host.id)
if not ips:
print(host.hostname, host.login_address, " 没有关联ip地址!")
continue
hua = HostUserAsset.objects.filter(asset=host, host_user__username='root')
if hua:
hu = hua[0].host_user
kwargs = {
'resource': list(),
'hosts_file': ["/data/ansible/inventory/public/hosts_all"],
'host_user': hu.id
}
playbook = ["/data/ansible/playbook/admin/app_offline.yml"]
extra_vars = {"apphost": host.login_address.split(":")[0], "app_code": app.app_code,
"tomcat_port": app.tomcat_port, "app_type": app.app_type}
ansible_api = AnsibleAPI(0, str(uuid.uuid4()), **kwargs)
print(ansible_api.run_playbook(playbook, extra_vars))
else:
print(host.hostname, host.login_address, " 未绑定用户为root的HostUser!")
res = {'code': 0, 'result': '任务已提交,请再手动确认是否执行成功!'}
except Exception as e:
res = {'code': 1, 'result': str(e)}
return self.render_json_response(res)
class AppOfflineDisableMonitorAPIView(LoginRequiredMixin, JSONResponseMixin, View):
def post(self, request, *args, **kwargs):
try:
app_code = kwargs.get('app_code')
app = App.objects.get(app_code=app_code)
ip_list = []
for host in app.app_server.all():
ips = get_ips_by_server_id(host.id)
if ips:
ip_list.append(ips[0])
tk = get_access_token()
host_ids = get_host_ids(tk, ip_list)
item_ids = get_monitor_item_ids(tk, host_ids, 'status[%d]' % app.tomcat_port)
print(tk, ip_list, host_ids, item_ids)
update_monitor_item(tk, item_ids, 0)
res = {'code': 0, 'result': '已经禁用!'}
except Exception as e:
res = {'code': 1, 'result': str(e)}
return self.render_json_response(res)
| [
"andykaiyu@163.com"
] | andykaiyu@163.com |
d1cf5a4253fa7bcd261e49182c6b3867f11c3dca | 5bb1ae9b9e6592def632b8a95def32b3a2d742d5 | /movie_wish/test.py | 2fe8adc8befd22f8eebf640259ea2602a1fec457 | [] | no_license | fiso0/my_python | af1132637a4ad92036ea0a949fa93df6f904b190 | 391def01ecdb97b8e3008235910a596bb5a9b52c | refs/heads/master | 2021-01-17T15:52:36.745999 | 2016-10-29T08:37:51 | 2016-10-29T08:37:51 | 58,641,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | import csv
with open('test1.csv',newline='') as f:
spamreader = csv.reader(f)
for line in spamreader:
print(line[0])
| [
"fiso0@126.com"
] | fiso0@126.com |
0dbf5d407aa3f004da79893bc38e9e510244c139 | 1c25798a9ae17ca228383fcd04a1e801415a78e7 | /Chapter 5 Loops/Ex_26_27_Sum of series.py | 760239321ae5adb7938fc99cbc0a019b58c4dfe3 | [] | no_license | padamcs36/Introduction_to_Python_By_Daniel | 688c56fff598617e979a5f71e9a48e50844ad7ea | 8b8f00c9d93428c58df9c90e5edd8f75a1662647 | refs/heads/main | 2023-02-22T08:11:01.450054 | 2021-01-28T09:45:01 | 2021-01-28T09:45:01 | 333,700,411 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | import math
import time
#1/3 + 3/5 + 5/7+......+97//99
#2n-1/2n+1
sum = 0
for i in range(1, 100, 2):
k = i / (i+2)
sum += k
print(format(sum, ".2f"))
#Compute Value of PI
sum = 0
startTime = time.time()
for i in range(1, 100000+1):
numerator = math.pow(-1, i+1)
denomenator = 2 * i - 1
k = numerator / denomenator
sum += k
if i == 10000 or i == 20000 or i == 30000 or i == 40000 or \
i == 50000 or i == 60000 or i == 70000 or i == 80000 or \
i == 90000 or i == 100000:
print("PI is: ",format(4 * sum, ".3f"), "for i =", i)
endTime = time.time()
totalTime = (endTime - startTime)
print(format(totalTime, ".4f"), "seconds")
print("Sum is: ",format(4 * sum, ".3f")) | [
"noreply@github.com"
] | padamcs36.noreply@github.com |
884d30f7edf976714898409cef9a4c40addf737a | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/custom_field.py | f8a5aeca1f2ddc517de2d87bccbe37aa652592cb | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,492 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CustomField:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'value': 'str'
}
attribute_map = {
'name': 'name',
'value': 'value'
}
def __init__(self, name=None, value=None):
"""CustomField - a model defined in huaweicloud sdk"""
self._name = None
self._value = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
@property
def name(self):
"""Gets the name of this CustomField.
自定义属性名
:return: The name of this CustomField.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CustomField.
自定义属性名
:param name: The name of this CustomField.
:type: str
"""
self._name = name
@property
def value(self):
"""Gets the value of this CustomField.
自定义属性对应的值
:return: The value of this CustomField.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this CustomField.
自定义属性对应的值
:param value: The value of this CustomField.
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomField):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
19f2ccbf1a31225761a59aaa8e647b1c518ebff7 | 11aac6edab131293027add959b697127bf3042a4 | /findTheDistanceValue.py | bf05470d870073c26a25a66b2d735dd8220abcf5 | [] | no_license | jdanray/leetcode | a76b3436002b31865967b757b73c85992636383b | fd736af3e79899b86dac89d4d925d5bd985944ad | refs/heads/master | 2023-08-15T01:20:05.110565 | 2023-08-14T00:25:58 | 2023-08-14T00:25:58 | 148,686,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | # https://leetcode.com/problems/find-the-distance-value-between-two-arrays/
class Solution(object):
def findTheDistanceValue(self, arr1, arr2, d):
dist = 0
for n1 in arr1:
if not any(abs(n1 - n2) <= d for n2 in arr2):
dist += 1
return dist
| [
"jdanray@users.noreply.github.com"
] | jdanray@users.noreply.github.com |
cee25608874f37bd66b4725cf44924e0d439e5e8 | 0f7cf365e00e3e116deca345ceb53588a0aee152 | /src/collective/jazzport/interfaces.py | dc33b605b2b04ec04c62ef85cb2436223579b31b | [] | no_license | datakurre/collective.jazzport | d66c59556a1055d46843c672babb4d8764732e19 | cf9d46dd50e40ea0437f6059ed9bc7ee57bb24f2 | refs/heads/master | 2023-06-11T03:42:09.980698 | 2023-05-26T06:37:17 | 2023-05-26T06:37:17 | 23,119,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
from zope import schema
from zope.i18nmessageid import MessageFactory
from zope.interface import Interface
_ = MessageFactory('collective.jazzport')
class IJazzportLayer(Interface):
"""Marker interface that defines a Zope 3 browser layer"""
class IJazzportSettings(Interface):
portal_types = schema.Set(
title=_(u'Portal types'),
description=_(u'Select downloadable portal types'),
value_type=schema.Choice(
title=_(u'Type'),
vocabulary='plone.app.vocabularies.ReallyUserFriendlyTypes'
),
required=False
)
| [
"asko.soukka@iki.fi"
] | asko.soukka@iki.fi |
0f41d9dff29f888d64ce2cea72375459185714ae | fbe05017b477a8b6c3603be3f2003c4a80854868 | /src/Ner_tag.py | c985de37f08fe224ed29a7391ead1068256d594b | [] | no_license | enningxie/user-level | 7cb8e9a30090adabea085bde046049c52c86cf84 | a3ef1c2b51b39eceef0f95c3f251a810e2bae801 | refs/heads/master | 2020-08-08T08:44:50.514365 | 2019-10-31T06:52:31 | 2019-10-31T06:52:31 | 213,796,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,740 | py | """
################################Ner_tag.py################################
程序名称: Ner_tag.py
功能描述: 实体标注
创建人名: wuxinhui
创建日期: 2019-07-12
版本说明: v1.0
################################Ner_tag.py################################
"""
import numpy as np
import re
from random import shuffle
import copy
import jieba
import cn2an
import json
import random
def utils_func(src):
def strQ2B(ustring):
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288:
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += chr(inside_code)
return rstring
return strQ2B(src).strip().lower()
# functions
# find the ids of str for tar
def find_str(src, tar, idl):
ids = []
if idl == 0:
for i in range(0, len(src) - len(tar) + 1):
if src[i:i + len(tar)] == tar:
ids.append(i)
else:
for i in range(0, len(src) - len(tar) + 1):
if src[i:i + len(tar)] == tar and reg_parse(src, i, i + len(tar) - 1) == True:
ids.append(i)
return ids
# find the parse for src
def reg_parse(src, i, j):
R1, R2 = False, False
if i == 0:
R1 = True
else:
if re.match("[a-z0-9\\-]{2}", src[i - 1:i + 1], re.I) == None:
R1 = True
if j == len(src) - 1:
R2 = True
else:
if re.match("[a-z0-9\\-]{2}", src[j:j + 2], re.I) == None:
R2 = True
if R1 == True and R2 == True:
return True
else:
return False
# the tag functions
def tag_func(sen, S, tag, label, Allow, idl):
S = set(S)
S = [s for s in S if s in sen]
# 提取索引列表
idList = []
for i in S:
ids = find_str(sen, i, idl)
ids = [list(range(w, w + len(i))) for w in ids]
idList.extend(ids)
idList.sort(key=len)
idList.reverse()
"""
# 去重索引列表
idSet = []
idList.sort(key=len)
while(len(idList) != 0):
temp = idList.pop()
lab = 0
for i in idSet:
if len(set(temp).intersection(set(i))) > 0:
lab = 1
break
if lab == 0:
idSet.append(temp)
"""
# 标注索引列表
for ids in idList:
table = [tag[i][0] for i in ids]
flag = [tag[ids[0]], tag[ids[-1]]]
if not (set(table).issubset(set(Allow))):
continue
if re.search("O$|BEG$", flag[0], re.I) == None or re.search("O$|END$", flag[1], re.I) == None:
continue
if len(ids) > 1:
tag[ids[0]] = label + "_BEG"
tag[ids[-1]] = label + "_END"
for i in ids[1:-1]:
tag[i] = label + "_MID"
else:
tag[ids[0]] = label
return tag
# extract the tag from sen
def tag_extract(sen, tag, label):
labelL = []
for ids in range(len(sen)):
if tag[ids] == label + "_BEG":
tmp = sen[ids]
elif tag[ids] == label + "_MID":
tmp += sen[ids]
elif tag[ids] == label + "_END":
tmp += sen[ids]
labelL.append(tmp)
elif tag[ids] == label:
labelL.append(sen[ids])
else:
tmp = ""
return labelL
# classes
# main Ner_tag spi class
class Ner_tag(object):
"""docstring for Ner_tag"""
def __init__(self, file):
super(Ner_tag, self).__init__()
self.__kg = json.load(open(file, "rb"))
def set_ner_kg(self, kg):
self.__kg = kg
return
def get_ner_kg(self):
return self.__kg
def ner_tag_api(self, sen):
"""
finsh the tag of the sentence, acquire all tags | char level
"""
sen = utils_func(sen)
tag = ["O"] * len(sen)
labels = [l for l in self.__kg.keys() if l not in ["B", "S", "M"]]
for l in labels:
try:
regexp = [re.compile(r) for r in self.__kg[l]["regexp"]]
S = sum([r.findall(sen) for r in regexp], [])
except:
value = self.__kg[l]["value"]
S = [v for v in value if v in sen]
tag = tag_func(sen, S, tag, l, ["O"], 0)
tag = tag_func(sen, self.__kg["B"]["value"], tag, "B", ["O"], 1)
tag = tag_func(sen, self.__kg["S"]["value"], tag, "S", ["O"], 0)
tag = tag_func(sen, self.__kg["M"]["value"], tag, "M", ["S", "O"], 0)
Blabel = tag_extract(sen, tag, "B")
Stalk = sum([self.__kg["S"]["map"][b] for b in Blabel], [])
tag = tag_func(sen, Stalk, tag, "S", ["O"], 1)
Mtalk = sum([self.__kg["M"]["map"][b] for b in Blabel], [])
tag = tag_func(sen, Mtalk, tag, "M", ["O"], 1)
return tag
def ner_log_api(self, sen):
tag = self.ner_tag_api(sen)
B = tag_extract(sen, tag, "B")
S = tag_extract(sen, tag, "S")
car_info = {}
car_info["serie"] = []
car_info["color"] = []
car_info["model"] = []
# extract the car serie
for s in S:
label = 0
for b in B:
if s.lower() in self.__kg["B"]["map"][b]:
car_info["serie"].append(b + s)
label = 1
break
if label == 0:
car_info["serie"].append(s)
for b in B:
label = 0
for i in car_info["serie"]:
if b in i:
label = 1
break
if label == 0:
car_info["serie"].append(b)
# extract the car model
Y = tag_extract(sen, tag, "Y")
N = tag_extract(sen, tag, "N")
E = tag_extract(sen, tag, "E")
G = tag_extract(sen, tag, "G")
D = tag_extract(sen, tag, "D")
Q = tag_extract(sen, tag, "Q")
I = tag_extract(sen, tag, "I")
M = tag_extract(sen, tag, "M")
car_info["model"].extend(Y)
car_info["model"].extend(N)
car_info["model"].extend(E)
car_info["model"].extend(G)
car_info["model"].extend(D)
car_info["model"].extend(Q)
car_info["model"].extend(I)
car_info["model"].extend(M)
# extract the car color
C = tag_extract(sen, tag, "C")
car_info["color"].extend(C)
return car_info
# main function
if __name__ == "__main__":
kg_file = "../data/kg.json"
Ner = Ner_tag(kg_file)
sen = "宝马x3红色豪华版2.0t4座;奥迪a6豪华版"
car_info = Ner.ner_log_api(sen)
print(car_info)
print(type(car_info))
| [
"enningxie@163.com"
] | enningxie@163.com |
f5159f6728ae1c9e4c5fe4a4bb1dd66b6e175470 | b3eb6f6144017e84e727bb65ba945916b6f5363c | /tests/integration_tests/explore/permalink/api_tests.py | a44bc70a7b49a7a0dd213871dc1f46d264ed564e | [
"Apache-2.0",
"OFL-1.1"
] | permissive | mistercrunch/superset | f838bd80144c48ea4dc27ae29db1df2521ef1bd5 | f144de4ee2bf213bb7e17f903bd3975d504c4136 | refs/heads/master | 2023-06-07T13:16:36.674565 | 2022-05-06T10:11:41 | 2022-05-06T10:11:41 | 56,703,070 | 17 | 6 | Apache-2.0 | 2023-03-04T00:13:28 | 2016-04-20T16:28:49 | TypeScript | UTF-8 | Python | false | false | 4,726 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pickle
from typing import Any, Dict, Iterator
from uuid import uuid3
import pytest
from sqlalchemy.orm import Session
from superset import db
from superset.key_value.models import KeyValueEntry
from superset.key_value.types import KeyValueResource
from superset.key_value.utils import decode_permalink_id, encode_permalink_key
from superset.models.slice import Slice
from tests.integration_tests.base_tests import login
from tests.integration_tests.fixtures.client import client
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
from tests.integration_tests.test_app import app
@pytest.fixture
def chart(load_world_bank_dashboard_with_slices) -> Slice:
with app.app_context() as ctx:
session: Session = ctx.app.appbuilder.get_session
chart = session.query(Slice).filter_by(slice_name="World's Population").one()
return chart
@pytest.fixture
def form_data(chart) -> Dict[str, Any]:
datasource = f"{chart.datasource.id}__{chart.datasource.type}"
return {
"chart_id": chart.id,
"datasource": datasource,
}
@pytest.fixture
def permalink_salt() -> Iterator[str]:
from superset.key_value.shared_entries import get_permalink_salt, get_uuid_namespace
from superset.key_value.types import SharedKey
key = SharedKey.EXPLORE_PERMALINK_SALT
salt = get_permalink_salt(key)
yield salt
namespace = get_uuid_namespace(salt)
db.session.query(KeyValueEntry).filter_by(
resource=KeyValueResource.APP,
uuid=uuid3(namespace, key),
)
db.session.commit()
def test_post(client, form_data: Dict[str, Any], permalink_salt: str):
login(client, "admin")
resp = client.post(f"api/v1/explore/permalink", json={"formData": form_data})
assert resp.status_code == 201
data = json.loads(resp.data.decode("utf-8"))
key = data["key"]
url = data["url"]
assert key in url
id_ = decode_permalink_id(key, permalink_salt)
db.session.query(KeyValueEntry).filter_by(id=id_).delete()
db.session.commit()
def test_post_access_denied(client, form_data):
login(client, "gamma")
resp = client.post(f"api/v1/explore/permalink", json={"formData": form_data})
assert resp.status_code == 404
def test_get_missing_chart(client, chart, permalink_salt: str) -> None:
from superset.key_value.models import KeyValueEntry
chart_id = 1234
entry = KeyValueEntry(
resource=KeyValueResource.EXPLORE_PERMALINK,
value=pickle.dumps(
{
"chartId": chart_id,
"datasetId": chart.datasource.id,
"formData": {
"slice_id": chart_id,
"datasource": f"{chart.datasource.id}__{chart.datasource.type}",
},
}
),
)
db.session.add(entry)
db.session.commit()
key = encode_permalink_key(entry.id, permalink_salt)
login(client, "admin")
resp = client.get(f"api/v1/explore/permalink/{key}")
assert resp.status_code == 404
db.session.delete(entry)
db.session.commit()
def test_post_invalid_schema(client) -> None:
login(client, "admin")
resp = client.post(f"api/v1/explore/permalink", json={"abc": 123})
assert resp.status_code == 400
def test_get(client, form_data: Dict[str, Any], permalink_salt: str) -> None:
login(client, "admin")
resp = client.post(f"api/v1/explore/permalink", json={"formData": form_data})
data = json.loads(resp.data.decode("utf-8"))
key = data["key"]
resp = client.get(f"api/v1/explore/permalink/{key}")
assert resp.status_code == 200
result = json.loads(resp.data.decode("utf-8"))
assert result["state"]["formData"] == form_data
id_ = decode_permalink_id(key, permalink_salt)
db.session.query(KeyValueEntry).filter_by(id=id_).delete()
db.session.commit()
| [
"noreply@github.com"
] | mistercrunch.noreply@github.com |
d28962804b2abad424e448abfdee8dd9f2b68f69 | 06cc07502c88cfda7335a7605eef7e8ec0043c01 | /app.py | dfa30cd22cd10f084ddb6cfb501479e4bd6336f8 | [] | no_license | sumayah20-meet/Y2-Individual-Project-yl1920 | 16f2b767a9f485b585b2a49e65fc54c74bbae543 | 2e9d06774feeb80b2c625972d1db700de87428fd | refs/heads/master | 2020-12-23T05:31:36.609884 | 2020-01-30T14:52:15 | 2020-01-30T14:52:15 | 237,051,980 | 0 | 0 | null | 2020-01-29T18:20:09 | 2020-01-29T18:20:08 | null | UTF-8 | Python | false | false | 1,359 | py | from flask import Flask, request, redirect, url_for, render_template
from flask import session as login_session
from databases import *
app=Flask(__name__)
app.secret_key="MY_SUPER_SECRET_KEY"
@app.route('/', methods =["GET","POST"])
def signIN():
if request.method == "GET":
return render_template('signin.html')
else:
username = request.form["uname"]
password = request.form["psw"]
s=signin(username,password)
if s:
return render_template("index.html")
else:
print("try again")
return render_template('signin.html')
@app.route('/signup',methods=["POST","GET"])
def signUp():
if request.method == "GET":
return render_template('signup.html')
else:
save(request.form['email'],request.form['psw'])
return redirect(url_for('signIN'))
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/about-us')
def hello():
return render_template ('about-us.html')
@app.route('/contact', methods=["GET","POST"])
def contact():
if request.method == "GET":
return render_template ('contact.html')
else:
username = request.form['uname']
password = request.form["psw"]
save(username,password)
return render_template('index.html',
u = username,
p = password
)
app.run(debug = True) | [
"myname21@meet.mit.edu"
] | myname21@meet.mit.edu |
d99790c7a7ab62bf4ceef5930255e68034969f27 | 49f81640f961e74668116b2600fe3c77646cc94d | /notebooks/howtofit/chapter_phase_api/src/plot/dataset_plots.py | c4d3eae50801dbc3904c5646a13ff71e9d385c1e | [] | no_license | knut0815/autofit_workspace | 5b97b37f5cf28f6b723c7cca73fa6b9a95e8ffc2 | 4a4fdacf62614150f500716cc8eca1613ae2e4af | refs/heads/master | 2023-03-01T23:32:06.681456 | 2021-02-08T19:48:18 | 2021-02-08T19:48:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,225 | py | import matplotlib.pyplot as plt
from os import path
from src.dataset import dataset as ds
from src.plot import line_plots
"""
These functions are simple matplotlib calls that plot components of our Line class, specifically its data and
noise-map. We additional include a function that plots the dataset on a single subplot.
Storing simple functions like this for plotting components of our `Dataset` will prove beneficial when using the
`Aggregator`, when it comes to inspecting the results of a model-fit after they have been completed.
"""
def subplot_dataset(
dataset: ds.Dataset,
output_path: str = None,
output_filename: str = None,
output_format: str = "show",
):
"""
Plot the `Dataset` using a subplot containing both its data and noise-map.
Parameters
-----------
dataset : Dataset
The observed `Dataset` which is plotted.
output_path : str
The path where the image of the data is output, if saved as a `.png`.
output_filename : str
The name of the file the image of the data is output too, if saved as a `.png`.
output_format : str
Whether the data is output as a `.png` file ("png") or displayed on the screen ("show").
"""
plt.figure(figsize=(18, 8))
plt.subplot(1, 2, 1)
data(
dataset=dataset,
output_path=output_path,
output_filename=output_filename,
output_format=output_format,
bypass_show=True,
)
plt.subplot(1, 2, 2)
noise_map(
dataset=dataset,
output_path=output_path,
output_filename=output_filename,
output_format=output_format,
bypass_show=True,
)
if "show" in output_format:
plt.show()
elif "png" in output_format:
plt.savefig(path.join(output_path, f"{output_filename}.png"))
plt.clf()
def data(
dataset: ds.Dataset,
output_path: str = None,
output_filename: str = None,
output_format: str = "show",
bypass_show: bool = False,
):
"""
Plot the data values of a `Dataset` object.
Parameters
-----------
dataset : Dataset
The observed `Dataset` whose data is plotted.
output_path : str
The path where the image of the data is output, if saved as a `.png`.
output_filename : str
The name of the file the image of the data is output too, if saved as a `.png`.
output_format : str
Whether the data is output as a `.png` file ("png") or displayed on the screen ("show").
bypass_show : bool
If `True` the show or savefig function is bypassed. This is used when plotting subplots.
"""
line_plots.figure(
xvalues=dataset.xvalues,
line=dataset.data,
errors=dataset.noise_map,
title="Data",
ylabel="Data Values",
color="k",
output_path=output_path,
output_filename=output_filename,
output_format=output_format,
bypass_show=bypass_show,
)
def noise_map(
dataset: ds.Dataset,
output_path: str = None,
output_filename: str = None,
output_format: str = "show",
bypass_show: bool = False,
):
"""
Plot the noise-map of a `Dataset` object.
Parameters
-----------
dataset : Dataset
The observed `Dataset` whose noise-map is plotted.
output_path : str
The path where the image of the noise-map is output, if saved as a `.png`.
output_filename : str
The name of the file the image of the noise-map is output too, if saved as a `.png`.
output_format : str
Whether the noise-map is output as a `.png` file ("png") or displayed on the screen ("show").
bypass_show : bool
If `True` the show or savefig function is bypassed. This is used when plotting subplots.
"""
line_plots.figure(
xvalues=dataset.xvalues,
line=dataset.noise_map,
title="Noise-Map",
ylabel="Noise-Map",
color="k",
output_path=output_path,
output_filename=output_filename,
output_format=output_format,
bypass_show=bypass_show,
)
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
5345f42313ef7de64d8be3c515006672cfbe3f6a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02623/s047885182.py | 44b5644e8a45249917468d9e82d48b09397b8cd8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | n, m, k =map(int, input().split())
a=list(map(int, input().split()))
b=list(map(int, input().split()))
ta=sum(a)
a.append(0)
tb=0
ans=0
j=0
for i in range(n+1):
ta -= a[n-i]
if ta>k:
continue
while tb + ta<=k:
if j ==m:
ans=max(ans,n-i+j)
break
ans=max(ans,n-i+j)
tb += b[j]
j +=1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
adca2c6960f7bbac282ee2b716b6491ccc961149 | ce0a3a73c7825f7327b8319fb2593b6b01659bb0 | /django2/django2/urls.py | 595e965be38913aca0dabcf5b90b553d7fa2b72b | [] | no_license | soccergame/deeplearning | 28b0a6ed85df12e362b3a451050fab5a2a994be7 | cbc65d3eba453992a279cfd96a9d3640d8fe6b9f | refs/heads/master | 2020-03-28T22:38:26.085464 | 2018-08-31T11:22:39 | 2018-08-31T11:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from django.conf.urls import url
from django.contrib import admin
from . import view, testdb, search, search2
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^hello$', view.hello),
url(r'^testdb$', testdb.testdb),
url(r'^search-form$', search.search_form),
url(r'^search$', search.search),
url(r'^search-post$', search2.search_post),
]
| [
"18811442380@163.com"
] | 18811442380@163.com |
f0e1b3caf276b73936a22e0d07640e6442fe1083 | 43e5441f74359d620be6f7f80c99622769ea9774 | /apps/userprofile/views.py | 7e593193b5d5447e4364e51d60e04116950e793b | [] | no_license | 33Da/deeplearn_eassy | 96f1bd09fe3df907c650378215eb686e4ab2801e | 82d60c5ec3aec60822d68d13f11ef1320d0bba2e | refs/heads/master | 2023-02-07T15:02:00.202693 | 2021-01-05T05:03:22 | 2021-01-05T05:03:22 | 326,892,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,884 | py | import re
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework import authentication
from .serializers import *
from apps.utils.util import send_email,create_vaildcode
from django.contrib.auth.backends import ModelBackend
from django.db.models import Q
User = get_user_model()
class CumstomBackend(ModelBackend):
def authenticate(self, request, username=None,email=None, password=None, **kwargs):
try:
user = User.objects.get(username=username)
print(1)
if user.check_password(password):
return user
except Exception as e:
return None
"""用户"""
class RegisterViewSet(APIView):
"""注册用户"""
def post(self,request,*args,**kwargs):
# 校验参数
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# 保存
serializer.save()
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": [],
}, status=status.HTTP_200_OK)
# 用户修改
class UserViewset(mixins.UpdateModelMixin, mixins.CreateModelMixin,mixins.RetrieveModelMixin,viewsets.GenericViewSet, mixins.ListModelMixin):
"""
retrieve:查看信息
update:更新用户,用户修改信息
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
serializer_class = UserDetailSerializer
permission_classes = (IsAuthenticated,)
def update(self, request, *args, **kwargs):
# 获取用户
user = request.user
email = request.data.get('email',None)
username = request.data.get('username',None)
if not all([email,username]):
raise ValidationError('参数不全')
emailcount = UserProfile.objects.filter(email=email).exclude(id=request.user.id).count()
usernamecount = UserProfile.objects.filter(username=username).exclude(id=request.user.id).count()
if emailcount > 0:
raise ValidationError('邮箱存在')
if usernamecount > 0:
raise ValidationError('用户名存在')
user.email = email
user.username = username
user.save()
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": "修改成功",
}, status=status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
user_id = request.user.id
try:
user = UserProfile.objects.filter(id=int(user_id)).get()
except Exception as e:
print(e)
raise ValidationError("参数错误")
ret = self.get_serializer(user)
ret = ret.data
# 文案数
ret["document_count"] = len(ret["document"])
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": [ret],
}, status=status.HTTP_200_OK)
def perform_create(self, serializer):
return serializer.save()
class PasswordViewset(mixins.UpdateModelMixin,viewsets.GenericViewSet):
"""
update:更新密码
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (IsAuthenticated,)
def update(self, request, *args, **kwargs):
# 获取用户
user = request.user
serializer = PasswordSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# 校验验证码
try:
ture_vaildcode = int(cache.get(request.user.email, None))
except Exception as e:
print(e)
raise ValidationError({'error': ['验证码错误']})
if ture_vaildcode != int(serializer.validated_data["vaildcode"]):
raise ValidationError({'error': ['验证码错误']})
# 把缓存删除
cache.set(request.user.email, '555', 1)
user.set_password(serializer.validated_data["password"])
user.save()
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": "修改成功",
}, status=status.HTTP_200_OK)
class VaildcodeViewSet(APIView):
"""
生成验证码
"""
def post(self,request,*args,**kwargs):
# 获取email
email = request.data.get("email","11")
# 校验email
result = re.match(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$", email)
if result is None:
raise ValidationError("邮箱为空或格式错误")
# 生成验证码
code = create_vaildcode(email)
# 发送验证码
send_status = send_email(valid_code=code,email=email)
# send_status = 1
if send_status == 1:
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": "",
}, status=status.HTTP_200_OK)
else:
return Response({"status_code": '400',
"message": "error",
"results":"发送失败",
}, status=status.HTTP_200_OK)
class HeadPicViewSet(APIView):
"""
头像
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (IsAuthenticated,)
def get(self,request,*args,**kwargs):
user = request.user
try:
pic_url = user.head_pic.url
except Exception as e:
print(e)
pic_url = None
print(pic_url)
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": [{"pic":pic_url}],
}, status=status.HTTP_200_OK)
def post(self,request,*args,**kwargs):
user = request.user
pic = request.FILES.get('file')
if pic is None:
raise ValidationError("未上传文件")
user.head_pic = pic
user.save()
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": [],
}, status=status.HTTP_200_OK)
| [
"764720843@qq.com"
] | 764720843@qq.com |
55cecae7878e380607863d0d4a5958f4b2b29c5c | 3b8fd5b73337b3cd70b283644c266d4ec962ad54 | /2020-2021/DEV1/Chapter 4/BA6.py | ad6e0f0204c3089a9327ecfeed2c4cd9e16c8dd4 | [] | no_license | Andy00097/hrinf-development | 20f4604ca5637c710d9d25e7e218a2ae1233498b | 464ca039537d6b8ca04bf95ba070b8f1f7b81188 | refs/heads/main | 2023-01-09T03:52:54.214472 | 2020-11-11T10:11:57 | 2020-11-11T10:11:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | x = 7
y = 8
z = 5
print()
if x < y and x < z:
minimum = x
minStr = 'min is x : ' + str(x)
if y < x and y < z:
minimum = y
minStr = 'min is y : ' + str(y)
if z < x and z < y:
minimum = z
minStr = 'min is z : ' + str(z)
print() | [
"stijn@kolkies.dev"
] | stijn@kolkies.dev |
ce9d21a2b5baf0bcf9b3667360ecdff5b94a9ce4 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/notification/BaseNotificationView.py | bd3e9ecdba53e2b53b7572bb7e917edc3733fb4d | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,429 | py | # 2017.05.04 15:27:34 Střední Evropa (letní čas)
# Embedded file name: scripts/client/notification/BaseNotificationView.py
from debug_utils import LOG_ERROR
class BaseNotificationView(object):
def __init__(self, model = None):
super(BaseNotificationView, self).__init__()
self._model = None
self.__flashIDCounter = 0
self.__flashIdToEntityIdMap = {}
self.__entityIdToFlashIdMap = {}
self.setModel(model)
return
def setModel(self, value):
self._model = value
def cleanUp(self):
self._model = None
return
def _getFlashID(self, notId):
if notId in self.__entityIdToFlashIdMap:
return self.__entityIdToFlashIdMap[notId]
else:
self.__flashIDCounter += 1
self.__flashIdToEntityIdMap[self.__flashIDCounter] = notId
self.__entityIdToFlashIdMap[notId] = self.__flashIDCounter
return self.__flashIDCounter
def _getNotificationID(self, flashId):
if flashId in self.__flashIdToEntityIdMap:
return self.__flashIdToEntityIdMap[flashId]
LOG_ERROR('Wrong notification ScaleForm id', flashId)
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\notification\BaseNotificationView.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:27:34 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
dda5436c28630a5d24d25127b608204ac8621153 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0551_0600/LeetCode555_SplitConcatenatedStrings.py | 1611f5635c66d3a1c0372441ef57cf5e8dacf617 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 874 | py | '''
Created on Aug 24, 2017
@author: MT
'''
c_ Solution(o..
___ splitLoopedString strs
"""
:type strs: List[str]
:rtype: str
"""
res N..
arr [m..(s, s[::-1]) ___ s __ strs]
___ i, s __ e..(arr
___ start __ (s, s[::-1]
___ j __ r..(l..(start)+1
__ n.. res:
res start[j:] + ''.j..(arr[i+1:]+arr[:i]) + start[:j]
____
res m..(res, start[j:] + ''.j..(arr[i+1:]+arr[:i]) + start[:j])
r.. res
___ test
testCases [
'abc', 'xyz' ,
]
___ strs __ testCases:
print('strs: %s' % strs)
result splitLoopedString(strs)
print('result: %s' % result)
print('-='*30+'-')
__ _____ __ _____
Solution().test()
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
e45c68224c72d2987f3a4acb7dbc4ce2ca5d0784 | 7c615414af2591146f2898444fb68f60e00a8482 | /8-20/flask-test/runserver.py | dbb9fd623641a4e7a706257506126f99569bc4a7 | [] | no_license | guulyfox/Demonkeyse-Manell | 15da1db0f0abf734cd638184d46015357de02612 | 745e552ac956c5bf087943dd3f075dede9c212ac | refs/heads/master | 2021-01-01T04:37:29.080726 | 2019-03-11T00:43:40 | 2019-03-11T00:43:40 | 97,210,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from main import app
from controller import user_role
from conf.config import listen_port
from main import api
api.add_resource(user_role.Userinfo,'/getpage/')
app.run(debug = True, host ="192.168.0.73", port =5001, threaded = True)
| [
"www.hubiwu.com@qq.com"
] | www.hubiwu.com@qq.com |
4bc1cd2926d306da1aee5a69062e99ace95c5840 | 4ba5b11860b7f046622b3ece7db4e5213efcec6e | /odoo/custom/src/private/faf_sale_project/__manifest__.py | fab3b7e7c338655cd14fe20e027298ca1fb6dcc2 | [
"BSL-1.0",
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] | permissive | newtratip/faf | 94719c3558f65791caf6be4fb084ce1d7bd28fae | bf22486d5d6849c94db9f56f90dd05c0563fce28 | refs/heads/master | 2023-03-29T14:37:54.968201 | 2021-04-08T11:33:26 | 2021-04-08T11:33:26 | 328,696,058 | 0 | 0 | BSL-1.0 | 2021-04-08T11:33:27 | 2021-01-11T14:48:13 | HTML | UTF-8 | Python | false | false | 635 | py | # Copyright 2021 Ecosoft Co., Ltd. (http://ecosoft.co.th)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "FAF - Sale Project",
"summary": "Enhance sale_project module",
"version": "14.0.1.0.0",
"category": "FAF",
"website": "http://ecosoft.co.th",
"author": "Tharathip C., Ecosoft",
"depends": [
"sale_project",
"project_status",
"sale_order_type",
"faf_sale",
],
"data": [
"views/project_views.xml",
"views/sale_views.xml",
],
"license": "AGPL-3",
"installable": True,
"maintainers": ["newtratip"],
}
| [
"tharathip.chaweewongphan@gmail.com"
] | tharathip.chaweewongphan@gmail.com |
3028f4140ed5f9dc3677bd9696eb0a366ad48b9a | a32b09a9a17c081c134d770d1da16d36dfef8951 | /ptah/testing.py | 673d0c5e4b51ab096994e799cd31b39fae0dafd4 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | webmaven/ptah | 8c1b01506736a51d25e09a79dbd648ce4891429b | 98b3afc35e2b21f0b5faed594030ddf9d7297d2e | refs/heads/master | 2021-01-18T03:46:27.575850 | 2012-05-09T05:08:51 | 2012-05-09T05:08:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,828 | py | """ base class """
import sys
import sqlalchemy
import transaction
import pkg_resources
from zope.interface import directlyProvides
from pyramid import testing
from pyramid.interfaces import \
IRequest, IAuthenticationPolicy, IAuthorizationPolicy
from pyramid.interfaces import IRouteRequest
from pyramid.view import render_view, render_view_to_response
from pyramid.path import package_name
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
if sys.version_info[:2] == (2, 6): # pragma: no cover
import unittest2 as unittest
from unittest2 import TestCase
else:
import unittest
from unittest import TestCase
import ptah
from ptah import config
class PtahTestCase(TestCase):
_init_ptah = True
_init_sqla = True
_includes = ()
_auto_commit = True
_settings = {'sqlalchemy.url': 'sqlite://'}
_packages = ()
_trusted_manage = True
_environ = {
'wsgi.url_scheme':'http',
'wsgi.version':(1,0),
'HTTP_HOST': 'example.com',
'SCRIPT_NAME': '',
'PATH_INFO': '/',}
def make_request(self, environ=None, request_iface=IRequest, **kwargs):
if environ is None:
environ=self._environ
request = testing.DummyRequest(environ=environ, **kwargs)
request.request_iface = IRequest
return request
def init_ptah(self, *args, **kw):
self.registry.settings.update(self._settings)
self.config.include('ptah')
for pkg in self._includes: # pragma: no cover
self.config.include(pkg)
pkg = package_name(sys.modules[self.__class__.__module__])
if pkg != 'ptah':
packages = []
parts = self.__class__.__module__.split('.')
for l in range(len(parts)):
pkg = '.'.join(parts[:l+1])
if pkg == 'ptah' or pkg.startswith('ptah.') or \
pkg in self._includes:
continue # pragma: no cover
try:
self.config.include(pkg)
except: # pragma: no cover
pass
self.config.scan(self.__class__.__module__)
self.config.commit()
self.config.autocommit = self._auto_commit
self.config.ptah_init_settings()
ptah.reset_session()
if self._init_sqla:
# create engine
self.config.ptah_init_sql()
# create sql tables
Base = ptah.get_base()
Base.metadata.create_all()
transaction.commit()
if self._trusted_manage:
def trusted(*args):
return True
ptah.manage.set_access_manager(trusted)
def init_pyramid(self):
self.request = request = self.make_request()
self.config = testing.setUp(
request=request, settings=self._settings, autocommit=False)
self.config.get_routes_mapper()
self.registry = self.config.registry
self.request.registry = self.registry
def setUp(self):
self.init_pyramid()
if self._init_ptah:
self.init_ptah()
def tearDown(self):
import ptah.util
ptah.util.tldata.clear()
import ptah.security
ptah.security.DEFAULT_ACL[:] = []
from ptah.config import ATTACH_ATTR
mod = sys.modules[self.__class__.__module__]
if hasattr(mod, ATTACH_ATTR):
delattr(mod, ATTACH_ATTR)
testing.tearDown()
transaction.abort()
def render_route_view(self, context, request, route_name, view=''): # pragma: no cover
directlyProvides(
request, self.registry.getUtility(IRouteRequest, route_name))
return render_view_to_response(context, request, view)
| [
"fafhrd91@gmail.com"
] | fafhrd91@gmail.com |
4df11f774fd9e4ab12f02cd8057cf8221675aafc | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/masking-personal-information/408960920.py | 32a0c0921e5ec105e0cd3e61b1685ed224acbd4c | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # title: masking-personal-information
# detail: https://leetcode.com/submissions/detail/408960920/
# datetime: Thu Oct 15 14:44:30 2020
# runtime: 24 ms
# memory: 14.1 MB
class Solution:
def maskPII(self, S: str) -> str:
at = S.find('@')
if at >= 0:
return (S[0] + '*' * 5 + S[at - 1:]).lower()
digits = [c for c in S if c.isdigit()]
if len(digits) == 10:
return '***-***-{}'.format(''.join(digits[-4:]))
return '+{}-***-***-{}'.format('*' * (len(digits) - 10), ''.join(digits[-4:]))
| [
"ljm51689@gmail.com"
] | ljm51689@gmail.com |
b017fa7e894b1aedb746f2fb6f1be61407cce1f2 | 058f6cf55de8b72a7cdd6e592d40243a91431bde | /tests/clang_plugin/dynamic/test_fp32_overflow_found/test_fp32_overflow_found.py | c78364d0304f5e18ea782535a92d541c87185be2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/FPChecker | 85e8ebf1d321b3208acee7ddfda2d8878a238535 | e665ef0f050316f6bc4dfc64c1f17355403e771b | refs/heads/master | 2023-08-30T23:24:43.749418 | 2022-04-14T19:57:44 | 2022-04-14T19:57:44 | 177,033,795 | 24 | 6 | Apache-2.0 | 2022-09-19T00:09:50 | 2019-03-21T22:34:14 | Python | UTF-8 | Python | false | false | 1,816 | py | #!/usr/bin/env python
import subprocess
import os
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["make clean"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
# returns: tuple (error, op, file, line)
#
#+-------------------------- FPChecker Warning Report --------------------------+
# Error : Underflow
# Operation : ADD
# File : dot_product.cu
# Line : 9
#+------------------------------------------------------------------------------+
#
def getFPCReport(lines):
ret = ("", "", "", "")
for i in range(len(lines)):
l = lines[i]
if "FPChecker" in l and "Report" in l and "+" in l:
err = lines[i+1].split()[2]
op = lines[i+2].split()[2]
f = lines[i+3].split()[2]
line = lines[i+4].split()[2]
ret = (err, op, f, line)
break
return ret
def test_1():
# --- compile code ---
cmd = ["make"]
try:
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print(e.output)
exit()
# --- run code ---
cmd = ["./main"]
try:
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print(e.output)
exit()
rep = getFPCReport(cmdOutput.decode('utf-8').split("\n"))
assert rep[0] == 'INF'
assert rep[3] == '8'
| [
"ilaguna@llnl.gov"
] | ilaguna@llnl.gov |
d1ed81e74acd860b2a60472a9908d5c19b953515 | 5dac0010edb884cd6d412954c79b75fa946e252d | /101-AWS-S3-Hacks/last_modified.py | b91aeb13f0761f01f9f3703ebe255acf10820cb1 | [] | no_license | ralic/aws_hack_collection | c1e1a107aa100e73b6e5334ed9345576057bdc9d | 7b22018169e01d79df7416dd149c015605dea890 | refs/heads/master | 2023-01-09T04:31:57.125028 | 2020-02-06T11:21:39 | 2020-02-06T11:21:39 | 90,350,262 | 3 | 1 | null | 2022-12-26T20:03:05 | 2017-05-05T07:39:34 | Python | UTF-8 | Python | false | false | 460 | py | #!/usr/bin/python
"""
- Author : Nag m
- Hack : List all the objects last modified timestamp in Zulu format
- Info : List all the objects last modified timestamp in Zulu format
* 101-s3-aws
"""
import boto
def modified(name):
bucket = conn.get_bucket(name)
lt = bucket.list()
for obj in lt:
print obj.last_modified
if __name__ == "__main__":
conn = boto.connect_s3()
bucketname = "101-s3-aws"
modified(bucketname)
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
1f3e6552b2041938ee45c8f4cf410cbcd65fad3d | 7ca4838ab8871cb78e2fcf119a252d23e2bc89c5 | /samples/generated_samples/logging_v2_generated_config_service_v2_update_view_sync.py | 33014bf236582bd759d03fd778342ff3867a3036 | [
"Apache-2.0"
] | permissive | googleapis/python-logging | abb25a7a34306527c37bb68e98bfb4d6f1647e1b | 1037afccd1436a152aa229fa98f35ec83c723d06 | refs/heads/main | 2023-08-31T10:06:49.191395 | 2023-08-29T13:28:36 | 2023-08-29T13:28:36 | 226,992,562 | 109 | 56 | Apache-2.0 | 2023-09-12T16:13:25 | 2019-12-10T00:09:45 | Python | UTF-8 | Python | false | false | 1,814 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateView
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_UpdateView_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import logging_v2
def sample_update_view():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.UpdateViewRequest(
name="name_value",
)
# Make the request
response = client.update_view(request=request)
# Handle the response
print(response)
# [END logging_v2_generated_ConfigServiceV2_UpdateView_sync]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
daaf6b89bb892f00604e3b114f689b37985fdad8 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_34/266.py | 85d39360d2c79a5749c5b6d690f381a6e01efb74 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | #!/usr/bin/env python
import sys
input = sys.stdin.readline().strip().split(" ")
l = int(input[0])
d = int(input[1])
n = int(input[2])
words = []
for i in range(d):
words.append(sys.stdin.readline().strip())
for i in range(n):
thisWord = sys.stdin.readline().strip()
cursor = 0
possibilities = []
possibleLetters = []
for j in range(l):
oldPossibilities = possibilities
possibilities = []
possibleWords = []
for word in words:
possibleWords.append(word)
possibleLetters = ""
if thisWord[cursor] == "(":
cursor += 1
while thisWord[cursor] != ")":
possibleLetters += thisWord[cursor]
cursor += 1
cursor += 1
else:
possibleLetters = thisWord[cursor]
cursor += 1
if j == 0:
for letter in possibleLetters:
possibilities.append(letter)
else:
for possibility in oldPossibilities:
for letter in possibleLetters:
for word in possibleWords:
if word.startswith(possibility + letter):
possibilities.append(possibility + letter)
break
print "Case #%d: %d" % (i+1, len(possibilities))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
57a2d22ba7c99cd7640e45bd7fef33b988099485 | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/skimage/transform/pyramids.py | 848756848f446a4d459ec29ee55afc41cb49b718 | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ad3430ef2503a2317891465bf4bcd56b145ec8bf66b8db7ad65262fae38250f1
size 12022
| [
"rksc.k1p@gmail.com"
] | rksc.k1p@gmail.com |
ee93ff8ae4035ffd35c0d86834f7c75f28445031 | 057c525d6fbff928fc0cb0cd6b2930e9494b5d4b | /training-data/py/5-analyse.py | 1f5243bdb3b48c22ec0e2f0011149bf2dc22f64b | [] | no_license | uk-gov-mirror/ukwa.text-id | 0931742d1f2df3091ac52eee6160c177ea98180d | 5f3dcc6436bc46dedb375b37e3fd51c1c0d9b45b | refs/heads/master | 2022-02-26T15:32:15.901527 | 2019-11-19T16:36:06 | 2019-11-19T16:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | a_a = {}N
a a("函数日志") a a:N
a a a a:N
a = a.a('\a', '')N
a a a a_a.a():N
a_a[a] = a_a[a] + 0N
a:N
a_a[a] = 0N
N
a(a_a)N
N
a_a = a(a_a.a(), a=(a a: a[0]), a=Aa)N
a(a_a)N
a a a a_a:N
a(a[0], a[0])N
| [
"Andrew.Jackson@bl.uk"
] | Andrew.Jackson@bl.uk |
60f032df862af5ff958b8f9bfa750e02502b8da6 | 20a9787564f76ae0fcf2332a8655b21bae0646a3 | /GrokkingCodingInterview/Trees_BFS/level_order_traversal_reversed.py | 94a73523f8bfb2e99f8125589aa943d91a719918 | [] | no_license | nidhiatwork/Python_Coding_Practice | 3b33a40c947413c2695d3ee77728fa69430f14cd | 9d5071a8ddcda19181d3db029fb801d4e3233382 | refs/heads/master | 2023-02-08T20:50:47.522565 | 2023-02-04T10:04:10 | 2023-02-04T10:04:10 | 194,607,759 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | '''
Given a binary tree, populate an array to represent its level-by-level traversal in reverse order, i.e., the lowest level comes first. You should populate the values of all nodes in each level from left to right in separate sub-arrays.
'''
from collections import deque
class TreeNode(object):
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def level_order_traversal_reversed(root):
result = deque()
if not root:
return result
queue = deque()
queue.append(root)
while queue:
levelSize = len(queue)
currentLevel = []
for _ in range(levelSize):
currentNode = queue.popleft()
currentLevel.append(currentNode.val)
if currentNode.left:
queue.append(currentNode.left)
if currentNode.right:
queue.append(currentNode.right)
result.appendleft(currentLevel)
return result
root = TreeNode(1, TreeNode(2, TreeNode(4, TreeNode(8), TreeNode(9)), TreeNode(5, TreeNode(10), TreeNode(11))), TreeNode(3, TreeNode(6, TreeNode(12), TreeNode(13)), TreeNode(7, TreeNode(14), TreeNode(15))))
print(str(level_order_traversal_reversed(root))) | [
"“nidhi.bhushan123@gmail.com”"
] | “nidhi.bhushan123@gmail.com” |
0b34fd09a32b61b26a0e91673051e43c03f74504 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/desktopvirtualization/azure-mgmt-desktopvirtualization/generated_samples/scaling_plan_update.py | 4831a97f98926ec6612a858ab2bf88ca82161dc5 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,636 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.desktopvirtualization import DesktopVirtualizationMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-desktopvirtualization
# USAGE
python scaling_plan_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DesktopVirtualizationMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="daefabc0-95b4-48b3-b645-8a753a63c4fa",
)
response = client.scaling_plans.update(
resource_group_name="resourceGroup1",
scaling_plan_name="scalingPlan1",
)
print(response)
# x-ms-original-file: specification/desktopvirtualization/resource-manager/Microsoft.DesktopVirtualization/stable/2022-09-09/examples/ScalingPlan_Update.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
5118a798d9ebf7c4a8473638ed1aae87c200932f | ea40d872e4d3122387f7a17400c1d2f31cf5bd6a | /Dynamic Programming/221. Maximal Square Medium.py | c2e3c879c1cfbc3c4bffa5bb587537c1006ee3c3 | [] | no_license | dongbo910220/leetcode_ | e4cf6c849986b105d4d5162c5cd2318ffc3fbb67 | e4c02084f26384cedbd87c4c60e9bdfbf77228cc | refs/heads/main | 2023-05-29T11:23:46.865259 | 2021-06-17T03:40:30 | 2021-06-17T03:40:30 | 344,785,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | '''
https://leetcode.com/problems/maximal-square/
'''
class Solution(object):
def maximalSquare(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix:
return 0
rows = len(matrix)
cols = len(matrix[0])
maxside = 0
dp = [[0] * (cols + 1) for _ in range(rows+1)]
for i in range(rows):
for j in range(cols):
if matrix[i][j] == '1':
dp[i+1][j+1] = min(dp[i][j], dp[i+1][j], dp[i][j+1]) + 1
if dp[i+1][j+1] > maxside:
maxside = dp[i+1][j+1]
return maxside * maxside
'''
Success
Details
Runtime: 156 ms, faster than 94.83% of Python online submissions for Maximal Square.
Memory Usage: 20.2 MB, less than 12.50% of Python online submissions for Maximal Square.
''' | [
"1275604947@qq.com"
] | 1275604947@qq.com |
ee4a70928260425ed3cea0b9fe08e84f44207b80 | b76c08a4c33245a737fa0e139d212bb424017cd1 | /sandbox/order/utils.py | e2f60215327f7e0f361cf2b48bd1f2f84aaf5d02 | [
"ISC"
] | permissive | thelabnyc/django-oscar-cybersource | 5b09845121ef1c074335c01e86c649c36e4e51e4 | 95b33362adf8ba0217ac73c6f816b544c9faa18d | refs/heads/master | 2023-03-15T15:25:55.388795 | 2023-03-14T16:00:07 | 2023-03-14T16:00:07 | 58,149,620 | 4 | 3 | ISC | 2023-02-07T22:17:15 | 2016-05-05T17:45:52 | Python | UTF-8 | Python | false | false | 159 | py | from oscar.apps.order import utils
from oscarapicheckout.mixins import OrderCreatorMixin
class OrderCreator(OrderCreatorMixin, utils.OrderCreator):
pass
| [
"crgwbr@gmail.com"
] | crgwbr@gmail.com |
b324bb45f046ab2f4143cd271e5684defe01b32d | 7d4d6dc3c897ec7c297bb67f30c3f4e39509b250 | /Python/DailyFlash/27feb2020/MySolutions/program4.py | 4f1ba7e0d83a4b78cd942e0c584bda578e54146f | [] | no_license | kumbharswativ/Core2Web | 48a6ec0275466f4179c502097b1314d04a29e63e | 60949e5461ef103a4ad2c7c39ee9be0be101ec11 | refs/heads/master | 2022-12-24T06:11:45.096063 | 2020-08-09T12:04:07 | 2020-08-09T12:09:13 | 286,219,590 | 0 | 1 | null | 2022-12-11T10:57:50 | 2020-08-09T11:02:18 | Python | UTF-8 | Python | false | false | 199 | py | '''
write a program to print the following pattern
A B D G
G H J
J K
K
'''
a=70
for i in range(4,0,-1):
a=a-i-1
b=0
for j in range(i):
print(chr(a),end=" ")
b=b+1
a=a+b
print(" ")
| [
"“kumbharswativ@gmail.com”"
] | “kumbharswativ@gmail.com” |
641b1d430f4f0766aad0d2b37668f9c06eaf590f | f13f336c42313b9e45a9a497d5737ecff8652731 | /Python/116.Populating Next Right Pointers in Each Node .py | 08c9a38fe8fc369288a53e4552e2c4965c839beb | [] | no_license | whguo/LeetCode | 9b58bfbad07d9c3bfe8c48c74cd52fa6e019c2be | 74cc5aa3743d387213a36c7dcfd37e82ca60473a | refs/heads/master | 2020-02-26T16:02:39.172627 | 2017-07-28T01:59:20 | 2017-07-28T01:59:20 | 57,174,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | #把二叉树同一深度的节点串联起来(next)
class TreeLinkNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution(object):
def connect(self, root):
self.dic = {}
self.inorder(root,0)
return root
def inorder(self,p,level):
if p!=None:
if len(self.dic)<=level:
self.dic[level] = p
else:
self.dic[level].next = p
self.dic[level] = p
self.inorder(p.left,level+1) if p.left!=None else None
self.inorder(p.right,level+1) if p.right!=None else None
t1 = TreeLinkNode(1)
t2 = TreeLinkNode(2)
t3 = TreeLinkNode(3)
t4 = TreeLinkNode(4)
t5 = TreeLinkNode(5)
t6 = TreeLinkNode(6)
t7 = TreeLinkNode(7)
t8 = TreeLinkNode(8)
t9 = TreeLinkNode(9)
t1.left = t2
t1.right = t3
t2.left = t4
t2.right = t5
t3.left = t6
t3.right = t7
t4.left = t8
t4.right = t9
s = Solution()
root = s.connect(t1)
while root!=None:
p = root
while p!=None:
print(p.val)
p = p.next
print("next")
root = root.left
| [
"490216194@qq.com"
] | 490216194@qq.com |
7c44ba09652dca939859a751af24723bd6bf41cb | f0e0c1637f3b49fd914410361c3f1f3948462659 | /Python/Sets/set_add.py | 3da84d4b720396943cd15de874d9d726b08a6a21 | [] | no_license | georggoetz/hackerrank-py | 399bcd0599f3c96d456725471708068f6c0fc4b1 | a8478670fcc65ca034df8017083269cb37ebf8b0 | refs/heads/master | 2021-09-18T07:47:32.224981 | 2018-07-11T09:24:49 | 2018-07-11T09:24:49 | 111,611,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # http://www.hackerrank.com/contests/python-tutorial/challenges/py-set-add
if __name__ == '__main__':
s = set([])
for _ in range(int(input())):
s.add(input())
print(len(s))
| [
"GeorgG@haufe.com"
] | GeorgG@haufe.com |
1a366362e73d577c2fadcb9ccc06efc6aa64d44e | f3eaf09705b9dcc92f15d9aaa25aa739bd09c161 | /pyti/money_flow.py | 28301668fcc66057d241b31d6761604305963453 | [
"MIT"
] | permissive | BernhardSchlegel/pyti | 9f7171d660d6b4e4450d3b5882132204a4d3a3d7 | bfead587fe49f7662df475a28688d3ce649e2e9b | refs/heads/master | 2021-08-19T22:14:52.721190 | 2017-11-27T14:35:59 | 2017-11-27T14:35:59 | 111,237,071 | 1 | 0 | null | 2017-11-18T20:30:59 | 2017-11-18T20:30:59 | null | UTF-8 | Python | false | false | 387 | py | from pyti import catch_errors
from pyti.typical_price import typical_price as tp
def money_flow(close_data, high_data, low_data, volume):
"""
Money Flow.
Formula:
MF = VOLUME * TYPICAL PRICE
"""
catch_errors.check_for_input_len_diff(
close_data, high_data, low_data, volume
)
mf = volume * tp(close_data, high_data, low_data)
return mf
| [
"kyle@collectiveidea.com"
] | kyle@collectiveidea.com |
1ace0aa1a9255b961b78176e9f318b2f0adae7a5 | 7a83e536c2ea73e9f0c61928db0f566825b60e7f | /bot/wikidata/clarkart_import.py | dbea5461e95e770004b6cc9d0c1046bf9bf3846e | [] | no_license | multichill/toollabs | 32919377ae1e1bc05608828d30d81fe672569fa5 | 99a96e49cfe6b2d3151da7ad5469792d80171be3 | refs/heads/master | 2023-08-17T19:05:59.936875 | 2023-08-16T15:41:14 | 2023-08-16T15:41:14 | 54,907,129 | 18 | 6 | null | 2021-03-04T13:23:41 | 2016-03-28T16:45:15 | Python | UTF-8 | Python | false | false | 7,012 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Bot to import paintings from the Clark Art Institute to Wikidata.
Just loop over pages like https://www.clarkart.edu/artpiece/search?limit=20&offset=0&collectionIds=1095,1096,1097,1118
This bot does uses artdatabot to upload it to Wikidata.
"""
import artdatabot
import pywikibot
import requests
import re
from html.parser import HTMLParser
import json
def getClarkArtGenerator():
"""
Generator to return Clark Art Institute paintings
"""
basesearchurl = 'https://www.clarkart.edu/artpiece/search?limit=20&offset=%s&collectionIds=1095,1096,1097,1118'
htmlparser = HTMLParser()
session = requests.Session()
# 545 (to start with), 20 per page
for i in range(1, 550,20):
searchurl = basesearchurl % (i,)
print (searchurl)
searchPage = session.get(searchurl)
for item in searchPage.json().get('results'):
# Main search contains quite a bit, but we're getting the individual objects
#itemid = '%s' % (item.get('id'),)
url = 'https://www.clarkart.edu%s' % (item.get('Url'),)
itempage = session.get(url)
metadata = {}
pywikibot.output (url)
metadata['url'] = url
metadata['collectionqid'] = 'Q1465805'
metadata['collectionshort'] = 'Clark Art'
metadata['locationqid'] = 'Q1465805'
# Search is for paintings
metadata['instanceofqid'] = 'Q3305213'
title = item.get('Title').strip()
if len(title) > 220:
title = title[0:200]
metadata['title'] = { 'en' : title,
}
creatorname = item.get('Artist').strip()
metadata['creatorname'] = creatorname
metadata['description'] = { 'nl' : '%s van %s' % ('schilderij', metadata.get('creatorname'),),
'en' : '%s by %s' % ('painting', metadata.get('creatorname'),),
'de' : '%s von %s' % ('Gemälde', metadata.get('creatorname'), ),
'fr' : '%s de %s' % ('peinture', metadata.get('creatorname'), ),
}
metadata['idpid'] = 'P217'
invregex = '\<strong\>Object Number\<\/strong\>[\r\n\s\t]*\<\/td\>[\r\n\s\t]*\<td\>[\r\n\s\t]*([^\<]+)[\r\n\s\t]*\<\/td\>'
invmatch = re.search(invregex, itempage.text)
metadata['id'] = invmatch.group(1).strip()
# Year contains the date in various variants
if item.get('Year'):
createdate = item.get('Year')
dateregex = '^(\d\d\d\d)\s*$'
datecircaregex = '^c\.\s*(\d\d\d\d)\s*$'
periodregex = '^(\d\d\d\d)\s*[-–]\s*(\d\d\d\d)\s*$'
circaperiodregex = '^c\.\s\s*(\d\d\d\d)[-\/](\d\d\d\d)\s*$'
shortperiodregex = '^(\d\d)(\d\d)[-–](\d\d)\s*$'
circashortperiodregex = '^c\.\s*(\d\d)(\d\d)[-–](\d\d)\s*$'
datematch = re.search(dateregex, createdate)
datecircamatch = re.search(datecircaregex, createdate)
periodmatch = re.search(periodregex, createdate)
circaperiodmatch = re.search(circaperiodregex, createdate)
shortperiodmatch = re.search(shortperiodregex, createdate)
circashortperiodmatch = re.search(circashortperiodregex, createdate)
if datematch:
metadata['inception'] = int(datematch.group(1).strip())
elif datecircamatch:
metadata['inception'] = int(datecircamatch.group(1).strip())
metadata['inceptioncirca'] = True
elif periodmatch:
metadata['inceptionstart'] = int(periodmatch.group(1))
metadata['inceptionend'] = int(periodmatch.group(2))
elif circaperiodmatch:
metadata['inceptionstart'] = int(circaperiodmatch.group(1))
metadata['inceptionend'] = int(circaperiodmatch.group(2))
metadata['inceptioncirca'] = True
elif shortperiodmatch:
metadata['inceptionstart'] = int('%s%s' % (shortperiodmatch.group(1),shortperiodmatch.group(2),))
metadata['inceptionend'] = int('%s%s' % (shortperiodmatch.group(1),shortperiodmatch.group(3),))
elif circashortperiodmatch:
metadata['inceptionstart'] = int('%s%s' % (circashortperiodmatch.group(1),circashortperiodmatch.group(2),))
metadata['inceptionend'] = int('%s%s' % (circashortperiodmatch.group(1),circashortperiodmatch.group(3),))
metadata['inceptioncirca'] = True
else:
print ('Could not parse date: "%s"' % (createdate,))
# acquisitiondate is available
acquisitiondateRegex = '\<strong\>Acquisition\<\/strong\>[\r\n\s\t]*\\<\/td\>[\r\n\s\t]*\<td\>[\r\n\s\t]*[^\<]+, (\d\d\d\d)[\r\n\s\t]*\<\/td\>'
acquisitiondateMatch = re.search(acquisitiondateRegex, itempage.text)
if acquisitiondateMatch:
metadata['acquisitiondate'] = int(acquisitiondateMatch.group(1))
mediumRegex = '\<strong\>Medium\<\/strong\>[\r\n\s\t]*\\<\/td\>[\r\n\s\t]*\<td\>[\r\n\s\t]*([^\<]+)[\r\n\s\t]*\<\/td\>'
mediumMatch = re.search(mediumRegex, itempage.text)
# Artdatabot will sort this out
if mediumMatch:
metadata['medium'] = mediumMatch.group(1)
# Dimensions is a mix of types and also Inches and cm
# Free images! See https://www.clarkart.edu/museum/collections/image-resources
imageRegex = '\<h6 class\=\"text-center\"\>TIFF \(up to 500 MB\)\<\/h6\>[\r\n\s\t]*\<a href\=\"#\" data-href\=\"(https\:\/\/media\.clarkart\.edu\/hires\/[^\"]+\.tif)\"'
imageMatch = re.search(imageRegex, itempage.text)
if imageMatch:
metadata['imageurl'] = imageMatch.group(1).replace(' ', '%20')
metadata['imageurlformat'] = 'Q215106' # TIFF
metadata['imageoperatedby'] = 'Q1465805'
# metadata['imageurllicense'] = 'Q6938433' # Just free use
## Use this to add suggestions everywhere
metadata['imageurlforce'] = False
yield metadata
def main(*args):
dictGen = getClarkArtGenerator()
dryrun = False
create = False
for arg in pywikibot.handle_args(args):
if arg.startswith('-dry'):
dryrun = True
elif arg.startswith('-create'):
create = True
if dryrun:
for painting in dictGen:
print (painting)
else:
artDataBot = artdatabot.ArtDataBot(dictGen, create=create)
artDataBot.run()
if __name__ == "__main__":
main()
| [
"maarten@mdammers.nl"
] | maarten@mdammers.nl |
e9fed6137ec3b295fda35dcfe8f083d0c3625be6 | 336f11ee8934581f05ab620c5324c601ba864b05 | /jb_adaptive_python/Problems/Step matrix/Programming/tests.py | 79d6ba9430dae06ac099ab9abf679986502ca197 | [] | no_license | ancient-clever/sandbox | 01adeee2638a23533965cf57ca873a30e7dfad3d | 87dec3bf8860a67a36154ee5d7c826d919d3111b | refs/heads/master | 2022-05-17T04:49:54.703068 | 2020-01-19T17:44:27 | 2020-01-19T17:44:27 | 206,946,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from test_helper import check_samples
if __name__ == '__main__':
check_samples(samples=[["3","1 1 1 1 1\n1 2 2 2 1\n1 2 3 2 1\n1 2 2 2 1\n1 1 1 1 1"]]) | [
"ancient-clever@outlook.com"
] | ancient-clever@outlook.com |
8ebd7be5762779f7dac2dfa4eb6fc4cd1e08545f | 5ea260271732d5cd3531665b3fefcad0b0b4d1ec | /emovie/settings.py | a675b1f23cb2c26a13fad4e443346a98a8fa82bd | [] | no_license | HettyIsIn/emovie | 1504c9552b48a657fb8bf615eef6a4b444ce4bf8 | 9bc83a6bed02b7fba748866df971ad89c3d2c14e | refs/heads/master | 2020-12-25T21:01:26.250640 | 2015-05-27T15:12:22 | 2015-05-27T15:12:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,633 | py | """
Django settings for emovie project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7xzti=v(6&bh7+$l5de0a0+p!w!+p7tblv%y5-cd%alvh4t53r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'xadmin',
'xcms',
'movie',
'movie_session',
'cinema',
'cm',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'emovie.urls'
WSGI_APPLICATION = 'emovie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
TIME_ZONE = 'Asia/Shanghai'
LANGUAGES = (
('zh-CN', 'Simplified Chinese'),
('en', 'English'),
)
LANGUAGE_CODE = 'zh-CN'
USE_I18N = True
USE_L10N = True
USE_TZ = False
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i:s'
TIME_FORMAT = 'H:i:s'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
STATIC_ROOT = os.path.join(ROOT_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(ROOT_DIR, 'media')
MEDIA_URL = '/media/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# try:
# from product import *
# except ImportError:
# pass
| [
"lingnck@gmail.com"
] | lingnck@gmail.com |
adab5fb7e004978cbebf3c2330e8eac6f237a263 | d842a95213e48e30139b9a8227fb7e757f834784 | /gcloud/google-cloud-sdk/lib/surface/iot/devices/credentials/create.py | 3f061c8c10c35fbe2b555639f83327eab69ea7f8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/JobSniperRails | f37a15edb89f54916cc272884b36dcd83cdc868a | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | refs/heads/master | 2022-11-22T18:12:37.972441 | 2019-09-20T22:43:14 | 2019-09-20T22:43:14 | 282,293,504 | 0 | 0 | MIT | 2020-07-24T18:47:35 | 2020-07-24T18:47:34 | null | UTF-8 | Python | false | false | 2,204 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud iot credentials create` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudiot import devices
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iot import flags
from googlecloudsdk.command_lib.iot import resource_args
from googlecloudsdk.command_lib.iot import util
from googlecloudsdk.core import log
class Create(base.CreateCommand):
"""Add a new credential to a device.
A device may have at most 3 credentials.
"""
@staticmethod
def Args(parser):
resource_args.AddDeviceResourceArg(parser,
'for which to create credentials',
positional=False)
flags.AddDeviceCredentialFlagsToParser(parser, combine_flags=False)
def Run(self, args):
client = devices.DevicesClient()
device_ref = args.CONCEPTS.device.Parse()
new_credential = util.ParseCredential(
args.path, args.type, args.expiration_time, messages=client.messages)
credentials = client.Get(device_ref).credentials
if len(credentials) >= util.MAX_PUBLIC_KEY_NUM:
raise util.InvalidPublicKeySpecificationError(
'Cannot create a new public key credential for this device; '
'maximum {} keys are allowed.'.format(util.MAX_PUBLIC_KEY_NUM))
credentials.append(new_credential)
response = client.Patch(device_ref, credentials=credentials)
log.CreatedResource(device_ref.Name(), 'credentials for device')
return response
| [
"luizfper@gmail.com"
] | luizfper@gmail.com |
e2600c0fed8c5a857f10392c0665bc36c5b1364a | 216a5e05360afcda9f90a2a5154ce8ea33bf8f82 | /utils/permissions.py | 4f347907c4d70eceb2eccb3b16b89c8236a7a182 | [] | no_license | ppark9553/our-web-server | dfa6bdbdd4ced51d11b1d4951255c6618371a83f | a37ba6b27fc1973d8150fd253f6f5543be97ad1c | refs/heads/master | 2021-09-16T17:24:48.021715 | 2018-06-22T12:42:25 | 2018-06-22T12:42:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from django.contrib.auth import get_user_model
from rest_framework import permissions
User = get_user_model()
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
req_user = request.user.username
if obj.__class__ == User:
return obj.username == req_user
else:
return obj.user == request.user
| [
"ppark9553@gmail.com"
] | ppark9553@gmail.com |
a692f5fbc1997092e7d8ff1b9ee253f703e4b898 | 35a6f5a26ea97ebed8ab34619a8eec51719d2cc0 | /SpiderLearning/1 RequestBasic/4request_header_cookie.py | bdf907c06f27f2e4af98bfdc27634f2c7fbd9acf | [] | no_license | PandaCoding2020/pythonProject | c3644eda22d993b3b866564384ed10441786e6c5 | 26f8a1e7fbe22bab7542d441014edb595da39625 | refs/heads/master | 2023-02-25T14:52:13.542434 | 2021-02-03T13:42:41 | 2021-02-03T13:42:41 | 331,318,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | """
@Time : 2021/1/29 9:54
@Author : Steven Chen
@File : 4request_header_cookie.py
@Software: PyCharm
"""
# 目标:
# 方法:
import requests
url = 'https://github.com/PandaCoding2020'
headers = {
'User-Agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.53",
"cookie": "_octo=GH1.1.358692347.1554840020; _ga=GA1.2.59697938.1554840020; _device_id=60d9769fd3fdc2235abf6fdd29b31a97; user_session=kKzwErWaCQYaFMH0lrjSTfLNpSoXLKhCB3NBaUPYl8FejzWl; __Host-user_session_same_site=kKzwErWaCQYaFMH0lrjSTfLNpSoXLKhCB3NBaUPYl8FejzWl; logged_in=yes; dotcom_user=PandaCoding2020; has_recent_activity=1; tz=Asia%2FShanghai; _gh_sess=rlQUfViTvHnD9iR5XhwxzbrymK7xwYYHJ1vdRGB9vAonJRFKZk2duKjpGhvr4UwZqwRpZeOiDTfwMdnsPAwn6hjm4GNYxY7xzJK05u1%2FdwqhIgZBmGNgG7s4gDvqwiEqSA%2BbA14DGgqRCCYHsFloCToW0e7wLzGrtCMBgMNv8tx67QbyP4BaMyBxgHc%2FO%2F2Z--HcrZEvIzY1UFGgaL--tFLMqsWygO75tY5xHBRqYw%3D%3D"
}
response = requests.get(url, headers = headers)
with open('github_without.html','wb') as f:
f.write(response.content) | [
"gzupanda@outlook.com"
] | gzupanda@outlook.com |
21d7f7d408c190688ed8f05e94c0b50134527b88 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/profile/ProfileAwards.py | 6823a9a78792cbc4ee5a3719776eef9944204c58 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,334 | py | # 2017.02.03 21:50:17 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/profile/ProfileAwards.py
from gui.Scaleform.daapi.view.meta.ProfileAwardsMeta import ProfileAwardsMeta
from gui.Scaleform.locale.PROFILE import PROFILE
from web_stubs import i18n
from gui.Scaleform.daapi.view.AchievementsUtils import AchievementsUtils
from gui.shared.utils.RareAchievementsCache import IMAGE_TYPE
from gui.shared.gui_items.dossier import dumpDossier
class ProfileAwards(ProfileAwardsMeta):
def __init__(self, *args):
super(ProfileAwards, self).__init__(*args)
self.__achievementsFilter = PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_ALL
def setFilter(self, data):
self.__achievementsFilter = data
self.invokeUpdate()
@classmethod
def _getTotalStatsBlock(cls, dossier):
return dossier.getTotalStats()
def _sendAccountData(self, targetData, accountDossier):
super(ProfileAwards, self)._sendAccountData(targetData, accountDossier)
achievements = targetData.getAchievements()
totalItemsList = []
for block in achievements:
totalItemsList.append(len(block))
if self.__achievementsFilter == PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_INPROCESS:
achievements = targetData.getAchievements(isInDossier=True)
elif self.__achievementsFilter == PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_NONE:
achievements = targetData.getAchievements(isInDossier=False)
packedList = []
for achievementBlockList in achievements:
packedList.append(AchievementsUtils.packAchievementList(achievementBlockList, accountDossier.getDossierType(), dumpDossier(accountDossier), self._userID is None))
self.as_responseDossierS(self._battlesType, {'achievementsList': packedList,
'totalItemsList': totalItemsList,
'battlesCount': targetData.getBattlesCount()}, '', '')
return
def _populate(self):
super(ProfileAwards, self)._populate()
initData = {'achievementFilter': {'dataProvider': [self.__packProviderItem(PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_ALL), self.__packProviderItem(PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_INPROCESS), self.__packProviderItem(PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_NONE)],
'selectedItem': self.__achievementsFilter}}
self.as_setInitDataS(initData)
def _onRareImageReceived(self, imgType, rareID, imageData):
if imgType == IMAGE_TYPE.IT_67X71:
stats = self._getNecessaryStats()
achievement = stats.getAchievement(('rareAchievements', rareID))
if achievement is not None:
image_id = achievement.getSmallIcon()[6:]
self.as_setRareAchievementDataS(rareID, image_id)
return
def _dispose(self):
self._disposeRequester()
super(ProfileAwards, self)._dispose()
@staticmethod
def __packProviderItem(key):
return {'label': i18n.makeString(key),
'key': key}
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\profile\ProfileAwards.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:50:17 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
50f1bc82e3cd796a76f23afea6bb0124b04e54c2 | f5dbf8b9fc7a67167a966ad842999c5ec41d2363 | /app/migrations/0197_auto_20170209_1130.py | 4e84c7d80b4fb09fbfa2e9dde1a556f70fa6dff0 | [] | no_license | super0605/cogofly-v1 | 324ead9a50eaeea370bf40e6f37ef1372b8990fe | dee0f5db693eb079718b23099992fba3acf3e2dd | refs/heads/master | 2022-11-27T12:16:30.312089 | 2019-10-11T20:35:09 | 2019-10-11T20:35:09 | 214,522,983 | 0 | 0 | null | 2022-11-22T00:57:28 | 2019-10-11T20:25:01 | JavaScript | UTF-8 | Python | false | false | 1,974 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('app', '0196_auto_20170209_1047'),
]
operations = [
migrations.CreateModel(
name='PersonneBlogNewsletter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_creation', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('date_last_modif', models.DateTimeField(auto_now=True, verbose_name='Last changed')),
('date_v_debut', models.DateTimeField(default=django.utils.timezone.now, verbose_name='V. start')),
('date_v_fin', models.DateTimeField(default=None, null=True, verbose_name='V. end', blank=True)),
('date_sent', models.DateTimeField(default=None, null=True, verbose_name='Sent', blank=True)),
],
options={
'ordering': ['-date_last_modif', '-date_v_debut'],
'abstract': False,
},
),
migrations.AlterField(
model_name='blog',
name='date_envoi_newsletter',
field=models.DateField(default=None, help_text='Blank = never sent. If the date is older than now it will be sent tonight.', null=True, verbose_name='Add this blog into the newsletter', blank=True),
),
migrations.AddField(
model_name='personneblognewsletter',
name='blog',
field=models.ForeignKey(default=None, blank=True, to='app.Blog', null=True, verbose_name='Blog'),
),
migrations.AddField(
model_name='personneblognewsletter',
name='personne',
field=models.ForeignKey(default=None, blank=True, to='app.Personne', null=True, verbose_name='To'),
),
]
| [
"dream.dev1025@gmail.com"
] | dream.dev1025@gmail.com |
76bebcbd53c7a8e9ee54ffe104bf1631e3426098 | 453ca12d912f6498720152342085636ba00c28a1 | /leetcode/backtracking/python/sudoku_solver_leetcode.py | 7ab9498918f52ff1adde13c698c7938033f4934e | [] | no_license | yanbinkang/problem-bank | f9aa65d83a32b830754a353b6de0bb7861a37ec0 | bf9cdf9ec680c9cdca1357a978c3097d19e634ae | refs/heads/master | 2020-06-28T03:36:49.401092 | 2019-05-20T15:13:48 | 2019-05-20T15:13:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,652 | py | """
https://leetcode.com/problems/sudoku-solver/
Write a program to solve a Sudoku puzzle by filling the empty cells.
Empty cells are indicated by the character '.'
You may assume that there will be only one unique solution.
https://discuss.leetcode.com/topic/11327/straight-forward-java-solution-using-backtracking/18
O(9 ^ m) m represents the number of blanks to be filled in since each blank can have 9 choices. (Exponential)
"""
def solve_sudoku(board):
if not board or len(board) == 0:
return
solve(board)
def solve(board):
for i in range(len(board)): # row
for j in range(len(board[0])): # col
if board[i][j] == '.':
for c in '123456789':
if is_valid(board, i, j, c):
board[i][j] = c # put c in this cell
if solve(board):
return True # if its the solution return true
else:
board[i][j] = '.' # else go back
return False # 1..9 cannot be placed on board
return True # entire board is filled
def is_valid(board, row, col, c):
for i in range(9):
if board[i][col] == c:
return False
if board[row][i] == c:
return False
# this is also correct but results in longer runtime
# for j in range(9):
# if board[row][j] == c:
# return False
# check sub-box
for i in range(3):
for j in range(3):
if board[3 * (row / 3) + i][3 * (col / 3) + j] == c:
return False
return True
# solution for 4 x 4 board. Use for testing
def solve_sudoku_4_by_4(board):
if not board or len(board) == 0:
return
solve_4_by_4(board)
def solve_4_by_4(board):
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == '.':
for c in '1234':
if is_valid_4_by_4(board, i, j, c):
board[i][j] = c
if solve_4_by_4(board):
return True
else:
board[i][j] = '.'
return False # 1, 2, 3, 4 cannot be placed on board
return True # entire board is filled
def is_valid_4_by_4(board, row, col, c):
for i in range(4):
if board[i][col] == c:
return False
if board[row][i] == c:
return False
for i in range(2):
for j in range(2):
if board[2 * (row / 2) + i][2 * (col / 2 ) + j] == c:
return False
return True
if __name__ == '__main__':
board = [['.' for i in range(9)] for j in range(9)]
board[0] = list('53..7....')
board[1] = list('6..195...')
board[2] = list('.98....6.')
board[3] = list('8...6...3')
board[4] = list('4..8.3..1')
board[5] = list('7...2...6')
board[6] = list('.6....28.')
board[7] = list('...419..5')
board[8] = list('....8..79')
board_1 = [[None for i in range(4)] for j in range(4)]
board_1[0] = list('1.3.')
board_1[1] = list('..21')
board_1[2] = list('.1.2')
board_1[3] = list('24..')
solve_sudoku(board)
# print board
print '9 x 9 board solution'
print '\n'
for i in range(len(board)):
for j in range(len(board)):
print board[i][j],
print '\n'
print '\n'
solve_sudoku_4_by_4(board_1)
print '4 x 4 board solution'
print '\n'
for i in range(len(board_1)):
for j in range(len(board_1)):
print board_1[i][j],
print '\n'
| [
"albert.agram@gmail.com"
] | albert.agram@gmail.com |
d07657ffb4666e58c4579f7680be4286b481fc9c | a5ea878c1ab822ace8f8ba2b71c525b04dc97dad | /0x04-python-more_data_structures/4-only_diff_elements.py | a053c4f1ad16bff77eb9ad7ddff685c8f984b2bc | [] | no_license | gardenia-homsi/holbertonschool-python | 592c45e742f83695014abc318bf7269712b3a91c | fb7854835669aeffce71cf8fae7bca7d14d2e2f3 | refs/heads/master | 2023-01-22T05:50:00.447106 | 2020-12-03T21:32:55 | 2020-12-03T21:32:55 | 291,767,394 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | #!/usr/bin/python3
def only_diff_elements(set_1, set_2):
new_set = set_1.difference(set_2).union(set_2.difference(set_1))
return(new_set)
| [
"noreply@github.com"
] | gardenia-homsi.noreply@github.com |
82bfae90259287144f1f2c3cddb7ab93c5f23692 | c47b68a858e01d5fe51661a8ded5138652d3082e | /src/recommender.py | 970816d03d7c4d5ca6d3f39836420dcaf2de1fe7 | [] | no_license | RitGlv/Practice_Makes_perfect | d2d50efbf810b41d0648f27d02b5710c14c3fcae | 3dcb7ff876e58ade64faed0fa5523cba7461cf8d | refs/heads/master | 2021-03-13T03:51:43.142777 | 2017-06-05T07:32:47 | 2017-06-05T07:32:47 | 91,500,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,779 | py | import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
import featurize
reload (featurize)
import decomposition
reload (decomposition)
from decomposition import decomposed
import plots
reload (plots)
from plots import plot_pca
import matplotlib.pyplot as plt
class SimilarityRecommender(object):
'''
Creates a matrix with recommendation scores based on content boosted collaborative filtering.
The final recommendation is based on user-user and item-item similarity
Currently works with static info, future: incorporate feature change over time
'''
def __init__(self,features_df,ratings_df):
#ratings_df = processed matrix of match rating per interview
self.ratings = ratings_df
self.sim_matrix = None
#features_df = processed matrix of features per user,assumes userId as index
self.features = features_df
self.baseline = None
self.recommended = []
self.false_positive_users = []
self.true_positive_users = []
self.all_recommendations = []
self.count=0
def fit(self):
self.get_ratings_matrix()
self.get_similarity_score()
def predict_one(self,user,n):
'''
Returns a list pf top N matched users
'''
self.recommended = []
n_most_similar = self.get_most_similar_users(user,n)
for similar_user in n_most_similar:
if np.asarray(self.match_matrix.iloc[similar_user]).max():
matched = np.asarray(self.match_matrix.iloc[similar_user]).argmax()
matched_id = self.match_matrix.index[matched]
most_similar = self.get_most_similar_users(matched_id,n)
for m in most_similar:
self.recommended.append(self.match_matrix.index[m])
self.recommended = set(self.recommended)
def get_most_similar_users(self,user,n):
'''
Ranked list of the most similar users to the requested user
User defined as a row in the sim_matrix
Treat users at different point of time as different users
'''
sorted_indices=np.argsort(self.sim_matrix[self.features.index==user])
n_most_similar= sorted_indices[0][1:(n+1)]
return n_most_similar
def get_ratings_matrix(self,index='userId1', columns='matched_user', values='good_match'):
'''
Get a matrix with all of the users matching scores
'''
self.match_matrix = self.ratings.pivot(index=index, columns=columns, values=values).fillna(-1)
def get_similarity_score(self,metric='euclidean'):
'''
Calculates similarity between every 2 users
'''
self.sim_matrix = pairwise_distances(self.features,metric=metric)
def model_eval(self,n):
'''
Asses model based on AUC for different n for recommendation
Predict all
n=2,3,5,10
'''
self.eval_mat = np.zeros(self.sim_matrix.shape)*-1.0
for user in self.match_matrix.index:
self.predict_one(user,n)
for predicted_match in self.recommended:
self.eval_mat[self.match_matrix.index==user][0][self.match_matrix.index==predicted_match]=1
if self.match_matrix[self.match_matrix.index==predicted_match][user][0] == 0:
self.false_positive_users.append((user,predicted_match))
elif self.match_matrix[self.match_matrix.index==predicted_match][user][0] == 1:
self.true_positive_users.append((user,predicted_match))
self.all_recommendations.append((user,self.recommended))
self.count+=1
if __name__=="__main__":
'''
Load data for all interview match rating
'''
path = 'data/full_data_one_row_swap_idsby_userwith_matched_user.csv'
df_for_rating = pd.read_csv(path)
#crate dataframe for match rating matrix
min_df = df_for_rating[['userId1','matched_user','totalMatch1','match1']]
with_match_type = featurize.good_match_bool(min_df)
interview_rating = featurize.dataframe_for_matrix(with_match_type)
train_path = 'data/full_data_one_row_swap_idsby_user.csv'
df = pd.read_csv(train_path).set_index('userId1')
df['experienceInYears1'] = np.sqrt(df['experienceInYears1'])
#columns to leave in the static inforamtion(pre_interview) grouped user dataframe
cols_to_leave = ['selfPrep1', 'experienceAreas1','experienceInYears1','degree1', 'status1','studyArea1']
categories = ['degree1','status1','studyArea1']
pca = decomposed(df)
pca.fit(cols_to_leave,categories,6)
df_pca = pd.DataFrame(pca.X_pca).set_index(pca.processed.index)
sim = SimilarityRecommender(df_pca,interview_rating)
sim.fit()
| [
"johndoe@example.com"
] | johndoe@example.com |
a209ed748eac1477a4eedfef2d1ff0311c02deee | 96ec8ea87fb2cfdd2d850a0471c9820f92152847 | /九章算法/基础班LintCode/Subarray Sum Closest.py | 1265ec062dd08f4ffd2b586b41598fd2433598bd | [] | no_license | bitterengsci/algorithm | ae0b9159fd21cc30c9865f981f9c18cf9c6898d7 | bf70d038b70c51edc6ddd6bfef1720fb5f9f2567 | refs/heads/master | 2023-08-10T10:22:18.774232 | 2023-07-31T21:04:11 | 2023-07-31T21:04:11 | 186,261,880 | 95 | 46 | null | 2023-07-31T21:04:12 | 2019-05-12T13:57:27 | Python | UTF-8 | Python | false | false | 861 | py | class Solution:
"""
@param: nums: A list of integers
@return: A list of integers includes the index of the first number and the index of the last number
"""
# 前缀和优化 + 排序贪心
def subarraySumClosest(self, nums):
prefix_sum = [(0, -1)] # sum, index
for i, num in enumerate(nums):
prefix_sum.append((prefix_sum[-1][0] + num, i))
prefix_sum.sort()
closest, answer = sys.maxsize, []
for i in range(1, len(prefix_sum)):
if closest > prefix_sum[i][0] - prefix_sum[i - 1][0]:
closest = prefix_sum[i][0] - prefix_sum[i - 1][0]
left = min(prefix_sum[i - 1][1], prefix_sum[i][1]) + 1
right = max(prefix_sum[i - 1][1], prefix_sum[i][1])
answer = [left, right]
return answer | [
"yanran2012@gmail.com"
] | yanran2012@gmail.com |
d259fbb9ed2f76823094bdb3eca5bd6775fc1343 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/AKSHAYUBHAT_DeepVideoAnalytics/DeepVideoAnalytics-master/dvaapp/migrations/0005_auto_20170125_1807.py | 15fbaee9b504fad725f3b818bbbeb8551faa310a | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-25 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dvaapp', '0004_detection_metadata'),
]
operations = [
migrations.RemoveField(
model_name='frame',
name='bucket',
),
migrations.RemoveField(
model_name='frame',
name='key',
),
migrations.AddField(
model_name='frame',
name='name',
field=models.CharField(max_length=200, null=True),
),
]
| [
"659338505@qq.com"
] | 659338505@qq.com |
164f6b75d371e03cbe09103f8ec9eb0d85a4c5a1 | ac94164dd36b9d7fee5e460a5e115356059bf280 | /src/networks/classification/bert_adapter_owm.py | 8ffc8cb47e3aaa8702657cc57290c013802edf53 | [] | no_license | leducthanguet/PyContinual | 5c6014f64ccd29dc52b05ecc858b282846aa487b | 3325a1c33bfd2eab280f96f423cce59babcfcfc6 | refs/heads/main | 2023-08-30T01:23:04.765429 | 2021-10-28T03:50:24 | 2021-10-28T03:50:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,388 | py | #coding: utf-8
import sys
import torch
from transformers import BertModel, BertConfig
import utils
from torch import nn
sys.path.append("./networks/base/")
from my_transformers import MyBertModel
class Net(torch.nn.Module):
def __init__(self,taskcla,args):
super(Net,self).__init__()
config = BertConfig.from_pretrained(args.bert_model)
config.return_dict=False
self.bert = MyBertModel.from_pretrained(args.bert_model,config=config,args=args)
#BERT fixed all ===========
for param in self.bert.parameters():
# param.requires_grad = True
param.requires_grad = False
#But adapter is open
#Only adapters are trainable
if args.apply_bert_output and args.apply_bert_attention_output:
adaters = \
[self.bert.encoder.layer[layer_id].attention.output.adapter_owm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].attention.output.LayerNorm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].output.adapter_owm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].output.LayerNorm for layer_id in range(config.num_hidden_layers)]
elif args.apply_bert_output:
adaters = \
[self.bert.encoder.layer[layer_id].output.adapter_owm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].output.LayerNorm for layer_id in range(config.num_hidden_layers)]
elif args.apply_bert_attention_output:
adaters = \
[self.bert.encoder.layer[layer_id].attention.output.adapter_owm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].attention.output.LayerNorm for layer_id in range(config.num_hidden_layers)]
for adapter in adaters:
for param in adapter.parameters():
param.requires_grad = True
# param.requires_grad = False
self.taskcla=taskcla
self.dropout = nn.Dropout(args.hidden_dropout_prob)
self.args = args
if 'dil' in args.scenario:
self.last=torch.nn.Linear(args.bert_hidden_size,args.nclasses)
elif 'til' in args.scenario:
self.last=torch.nn.ModuleList()
for t,n in self.taskcla:
self.last.append(torch.nn.Linear(args.bert_hidden_size,n))
print('BERT ADAPTER OWM')
return
def forward(self,input_ids, segment_ids, input_mask):
output_dict_ = {} # more flexible
output_dict = \
self.bert(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
sequence_output, pooled_output = output_dict['outputs']
x_list = output_dict['x_list']
h_list = output_dict['h_list']
pooled_output = self.dropout(pooled_output)
if 'dil' in self.args.scenario:
y=self.last(pooled_output)
elif 'til' in self.args.scenario:
y=[]
for t,i in self.taskcla:
y.append(self.last[t](pooled_output))
output_dict_['y'] = y
output_dict_['x_list'] = x_list
output_dict_['h_list'] = h_list
return output_dict_
| [
"iscauzixuanke@gmail.com"
] | iscauzixuanke@gmail.com |
c9cd595e6b21f955b807c5cb083e1cfd285d16a7 | 3d729e2e5b5d486095159c6636fa832fed48bcac | /server/advert/models.py | 27a2710cad10fba08c5e94caa1a30284a002bdac | [] | no_license | UuljanAitnazarova/advert_project | 8e7a590244c930a725916f3d3d89c74549c9ab71 | fa5e054ac2c0990c3b0feadc8e81e23e7d672a0b | refs/heads/master | 2023-07-07T20:51:10.027739 | 2021-08-21T13:35:03 | 2021-08-21T13:35:03 | 398,460,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | from django.db import models
from django.contrib.auth import get_user_model
class Advert(models.Model):
CATEGORY_CHOICE = [
('ad', 'ad'),
('announcement', 'announcement'),
]
title = models.CharField(max_length=250, blank=False, null=False)
category = models.CharField(max_length=13, choices=CATEGORY_CHOICE, blank=False, null=False)
description = models.TextField(max_length=400, blank=False, null=False)
image = models.ImageField(upload_to='images', blank=True, null=True)
price = models.PositiveIntegerField(blank=True, null=True)
author = models.ForeignKey(get_user_model(),
blank=False,
null=False,
related_name='advert',
on_delete=models.CASCADE)
created_date = models.DateField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
post_date = models.DateTimeField(auto_now=True)
moderated = models.BooleanField(default=False)
rejected = models.BooleanField(default=False)
def __str__(self):
return f'{self.title}: {self.author}'
class Meta:
permissions = [
('сan_approve', 'Can approve')
] | [
"u.aitnazarova@gmail.com"
] | u.aitnazarova@gmail.com |
451585c409af738c6b0ceaaf948a14aeda57bae1 | cc738a180b98d3a48b740a53ed7a1f30604be292 | /src/zeep/wsdl/utils.py | 1951deacf7bc472c557e1cd7d5f93f8a2ddd3221 | [
"MIT",
"BSD-3-Clause"
] | permissive | Easter-eggs/python-zeep | c0cb3e9b71d5a5dabfbeea81eef528e195e92918 | ad6e7ea22bff989b78d3a5b30ab04869626b2565 | refs/heads/master | 2021-01-16T22:32:56.513623 | 2016-05-31T17:52:18 | 2016-05-31T17:52:18 | 60,026,731 | 0 | 0 | null | 2016-05-30T17:13:10 | 2016-05-30T17:13:10 | null | UTF-8 | Python | false | false | 380 | py |
def _soap_element(xmlelement, key):
"""So soap1.1 and 1.2 namespaces can be mixed HAH!"""
namespaces = [
'http://schemas.xmlsoap.org/wsdl/soap/',
'http://schemas.xmlsoap.org/wsdl/soap12/',
]
for ns in namespaces:
retval = xmlelement.find('soap:%s' % key, namespaces={'soap': ns})
if retval is not None:
return retval
| [
"michaelvantellingen@gmail.com"
] | michaelvantellingen@gmail.com |
7feed8b3a53ec0c6af7957788eabe1b4e7195e56 | 134c429df7d5c4d067d9761cb1435992b048adaf | /notes/0922/0922.py | 51d8a7cdbfebf9eb26dda9c7f5c67ef9348a1ca8 | [] | no_license | PaulGuo5/Leetcode-notes | 65c6ebb61201d6f16386062e4627291afdf2342d | 431b763bf3019bac7c08619d7ffef37e638940e8 | refs/heads/master | 2021-06-23T09:02:58.143862 | 2021-02-26T01:35:15 | 2021-02-26T01:35:15 | 177,007,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | class Solution:
def sortArrayByParityII2(self, A: List[int]) -> List[int]:
odd = []
even = []
for i in range(len(A)):
if A[i] % 2 == 0:
even.append(A[i])
else:
odd.append(A[i])
j = 0
for i in range(0, len(A), 2):
A[i] = even[j]
j += 1
j = 0
for i in range(1, len(A), 2):
A[i] = odd[j]
j += 1
return A
def sortArrayByParityII(self, A: List[int]) -> List[int]:
even = 0
odd = 1
while even < len(A) and odd <len(A):
if A[even] % 2 != 0 and A[odd] % 2 == 0:
temp = A[even]
A[even] = A[odd]
A[odd] = temp
even += 2
odd += 2
elif A[even] % 2 != 0 and A[odd] % 2 != 0:
odd += 2
elif A[even] % 2 == 0 and A[odd] % 2 != 0:
even += 2
odd += 2
elif A[even] % 2 == 0 and A[odd] % 2 == 0:
even += 2
return A
| [
"zhg26@pitt.edu"
] | zhg26@pitt.edu |
8e1737c26a20b1eeebbf9f29a33a37e8ae65e723 | e7917cf00e06331c59799a27ddb57256268941f1 | /ptm/rate_estimator/debug_stuck_particles.py | ecff4a726313711b03e4ae35e6fb20c7875d09db | [] | no_license | rustychris/csc | 75d9f36b9c5ccb614ebab17ff110a7e1c0ad4764 | b29f94be3f2c44c222f3113d5fcadf6cfbbf0df1 | refs/heads/master | 2022-05-01T11:53:50.322733 | 2022-04-13T16:25:10 | 2022-04-13T16:25:10 | 133,419,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | from stompy.grid import unstructured_grid
from stompy.model.fish_ptm import ptm_tools
##
hyd=xr.open_dataset('../../dflowfm/runs/20180807_grid98_17/ptm_hydro.nc')
g=unstructured_grid.UnstructuredGrid.from_ugrid(hyd)
##
init=ptm_tools.PtmBin('run_10days/INIT_bin.out')
sac=ptm_tools.PtmBin('run_10days/SAC_bin.out')
srv=ptm_tools.PtmBin('run_10days/SRV_bin.out')
ntimes=init.count_timesteps()
##
# zoom=(605889.6569457075, 638002.2586920519, 4217801.158715993, 4241730.226468915)
# zoom=(597913.7274775933, 648118.8262812896, 4217179.54644355, 4301202.344200377)
# zoom=(611280.377359663, 632614.9072567355, 4222938.787804629, 4248182.140275016)
zoom=(626037.7515578158, 626228.6109768279, 4232804.050163795, 4233029.878040465)
plt.figure(1).clf()
fig,ax=plt.subplots(num=1)
ti=500
init.plot(ti,ax=ax,zoom=zoom,update=False,ms=4)
sac.plot(ti,ax=ax,zoom=zoom,update=False,color='cyan',ms=4)
srv.plot(ti,ax=ax,zoom=zoom,update=False,color='g',ms=4)
g.plot_edges(color='k',lw=0.4,ax=ax,clip=zoom) # ,labeler='id')
# g.plot_cells(centers=True,labeler=lambda i,r:str(i),clip=zoom,ax=ax)
ax.axis(zoom)
##
# For example,
j=25170
c_deep=21111
c_shallow=51090
##
t=hyd.nMesh2_data_time
# Flow on this edge is 0 for all time.
Qj=hyd.h_flow_avg.isel(nMesh2_edge=j,nMesh2_layer_3d=0)
# 1 for all time
j_bot=hyd.Mesh2_edge_bottom_layer.isel(nMesh2_edge=j)
# 0 for all time.
j_top=hyd.Mesh2_edge_top_layer.isel(nMesh2_edge=j)
# 0 for all time
Aj=hyd.Mesh2_edge_wet_area.isel(nMesh2_edge=j,nMesh2_layer_3d=0)
# shallow cell 51090 is always bottom layer=1, top=0
# deep cell 21111 is always bottom=top=1
##
plt.figure(2).clf()
plt.plot(t,Qj)
| [
"rustychris@gmail.com"
] | rustychris@gmail.com |
6c5cc7b15fbdc3db75af1c48e02db2934e988e96 | 54bb9ba6d507cd25b2c2ac553665bc5fc95280d1 | /src/onegov/wtfs/layouts/invoice.py | dd78e0508c54baa33f82ff232d7890fd5e0dc050 | [
"MIT"
] | permissive | href/onegov-cloud | 9ff736d968979380edba266b6eba0e9096438397 | bb292e8e0fb60fd1cd4e11b0196fbeff1a66e079 | refs/heads/master | 2020-12-22T07:59:13.691431 | 2020-01-28T08:51:54 | 2020-01-28T08:51:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | from cached_property import cached_property
from onegov.core.elements import Link
from onegov.wtfs import _
from onegov.wtfs.collections import PaymentTypeCollection
from onegov.wtfs.layouts.default import DefaultLayout
from onegov.wtfs.security import EditModel
class InvoiceLayout(DefaultLayout):
@cached_property
def title(self):
return _("Create invoice")
@cached_property
def editbar_links(self):
result = []
model = PaymentTypeCollection(self.request.session)
if self.request.has_permission(model, EditModel):
result.append(
Link(
text=_("Manage payment types"),
url=self.request.link(model),
attrs={'class': 'payment-icon'}
)
)
return result
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(self.title, self.request.link(self.model))
]
@cached_property
def cancel_url(self):
return self.invoices_url
@cached_property
def success_url(self):
return self.invoices_url
| [
"denis.krienbuehl@seantis.ch"
] | denis.krienbuehl@seantis.ch |
f9a81e96bf7412c530f031c1ba97734e5ad6a5ce | 1543840cd62b4a3301ce4626e3f3dafa1fbe3715 | /parallel_wavegan/layers/pqmf.py | bb31c430d2abe0219f58f153f69d836383e095ef | [
"MIT"
] | permissive | arita37/ParallelWaveGAN | cc4dc10560595bf17e941a4c4576731169bd64ae | bb32b19f9ccb638de670f8b8d3a1dfed13ecf1c3 | refs/heads/master | 2022-11-21T16:45:00.289300 | 2020-07-20T14:09:23 | 2020-07-20T14:09:23 | 283,928,615 | 1 | 0 | MIT | 2020-07-31T03:02:01 | 2020-07-31T03:02:00 | null | UTF-8 | Python | false | false | 4,478 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Pseudo QMF modules."""
import numpy as np
import torch
import torch.nn.functional as F
from scipy.signal import kaiser
def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
"""Design prototype filter for PQMF.
This method is based on `A Kaiser window approach for the design of prototype
filters of cosine modulated filterbanks`_.
Args:
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
Returns:
ndarray: Impluse response of prototype filter (taps + 1,).
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
https://ieeexplore.ieee.org/abstract/document/681427
"""
# check the arguments are valid
assert taps % 2 == 0, "The number of taps mush be even number."
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
# make initial filter
omega_c = np.pi * cutoff_ratio
with np.errstate(invalid='ignore'):
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \
/ (np.pi * (np.arange(taps + 1) - 0.5 * taps))
h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
# apply kaiser window
w = kaiser(taps + 1, beta)
h = h_i * w
return h
class PQMF(torch.nn.Module):
"""PQMF module.
This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
.. _`Near-perfect-reconstruction pseudo-QMF banks`:
https://ieeexplore.ieee.org/document/258122
"""
def __init__(self, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0):
"""Initilize PQMF module.
Args:
subbands (int): The number of subbands.
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
"""
super(PQMF, self).__init__()
# define filter coefficient
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
h_analysis = np.zeros((subbands, len(h_proto)))
h_synthesis = np.zeros((subbands, len(h_proto)))
for k in range(subbands):
h_analysis[k] = 2 * h_proto * np.cos(
(2 * k + 1) * (np.pi / (2 * subbands)) *
(np.arange(taps + 1) - ((taps - 1) / 2)) +
(-1) ** k * np.pi / 4)
h_synthesis[k] = 2 * h_proto * np.cos(
(2 * k + 1) * (np.pi / (2 * subbands)) *
(np.arange(taps + 1) - ((taps - 1) / 2)) -
(-1) ** k * np.pi / 4)
# convert to tensor
analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1)
synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0)
# register coefficients as beffer
self.register_buffer("analysis_filter", analysis_filter)
self.register_buffer("synthesis_filter", synthesis_filter)
# filter for downsampling & upsampling
updown_filter = torch.zeros((subbands, subbands, subbands)).float()
for k in range(subbands):
updown_filter[k, k, 0] = 1.0
self.register_buffer("updown_filter", updown_filter)
self.subbands = subbands
# keep padding info
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
def analysis(self, x):
"""Analysis with PQMF.
Args:
x (Tensor): Input tensor (B, 1, T).
Returns:
Tensor: Output tensor (B, subbands, T // subbands).
"""
x = F.conv1d(self.pad_fn(x), self.analysis_filter)
return F.conv1d(x, self.updown_filter, stride=self.subbands)
def synthesis(self, x):
"""Synthesis with PQMF.
Args:
x (Tensor): Input tensor (B, subbands, T // subbands).
Returns:
Tensor: Output tensor (B, 1, T).
"""
# NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands.
# Not sure this is the correct way, it is better to check again.
# TODO(kan-bayashi): Understand the reconstruction procedure
x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands)
return F.conv1d(self.pad_fn(x), self.synthesis_filter)
| [
"hayashi.tomoki@g.sp.m.is.nagoya-u.ac.jp"
] | hayashi.tomoki@g.sp.m.is.nagoya-u.ac.jp |
9f437582fa091c0826b81b2906a1727f5729c925 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /xcv58/LeetCode/Maximum-Depth-of-Binary-Tree/Solution.py | f1999894091385ef5f7a20ac09a37d2cdd716a2f | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
return 0 if root is None else max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
| [
"xenron@outlook.com"
] | xenron@outlook.com |
55974c3062c65bc035880e96b2a781aca322528e | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/compute/url_maps/describe.py | a57dae6a7042b32f18ee5d7c134c3a8aafc00b96 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 2,596 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing url maps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.url_maps import flags
from googlecloudsdk.command_lib.compute.url_maps import url_maps_utils
def _DetailedHelp():
return {
'brief':
'Describe a URL map.',
'DESCRIPTION':
"""\
*{command}* displays all data associated with a URL map in a
project.
""",
}
def _Run(args, holder, url_map_arg):
"""Issues requests necessary to describe URL maps."""
client = holder.client
url_map_ref = url_map_arg.ResolveAsResource(
args,
holder.resources,
default_scope=compute_scope.ScopeEnum.GLOBAL,
scope_lister=compute_flags.GetDefaultScopeLister(client))
if url_maps_utils.IsRegionalUrlMapRef(url_map_ref):
service = client.apitools_client.regionUrlMaps
request = client.messages.ComputeRegionUrlMapsGetRequest(
**url_map_ref.AsDict())
else:
service = client.apitools_client.urlMaps
request = client.messages.ComputeUrlMapsGetRequest(**url_map_ref.AsDict())
return client.MakeRequests([(service, 'Get', request)])[0]
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class Describe(base.DescribeCommand):
"""Describe a URL map."""
detailed_help = _DetailedHelp()
URL_MAP_ARG = None
@classmethod
def Args(cls, parser):
cls.URL_MAP_ARG = flags.UrlMapArgument()
cls.URL_MAP_ARG.AddArgument(parser, operation_type='describe')
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
return _Run(args, holder, self.URL_MAP_ARG)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
4647b56d18f408edad9a00c6ac02a82e34d11f08 | 49fa43ae11cd06f68efb65a9f59add168b205f29 | /python/306_additive-number/additiveNumber.py | 2a5375a84a18bf7b1d4fdf22a0ced35681321de9 | [] | no_license | kfrancischen/leetcode | 634510672df826a2e2c3d7cf0b2d00f7fc003973 | 08500c39e14f3bf140db82a3dd2df4ca18705845 | refs/heads/master | 2021-01-23T13:09:02.410336 | 2019-04-17T06:01:28 | 2019-04-17T06:01:28 | 56,357,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | import itertools
class Solution(object):
def isAdditiveNumber(self, num):
"""
:type num: str
:rtype: bool
"""
n = len(num)
for i, j in itertools.combinations(range(1, n), 2):
a, b = num[:i], num[i:j]
if a != str(int(a)) or b != str(int(b)):
continue
while j < n:
c = str(int(a) + int(b))
if not num.startswith(c, j):
break
j += len(c)
a, b = b, c
if j == n:
return True
return False
mytest = Solution()
num = "0235813"
print mytest.isAdditiveNumber(num)
| [
"kfrancischen@gmail.com"
] | kfrancischen@gmail.com |
30031f16272956ac941b3b7060bb3e05b133017f | 5210993914691c70076be979aa5c57c33d5d3bc4 | /Programming101-3/Week_1/The_Final_Round/reduce_file_path.py | 5b9b39aeefb744abd25cd76f7c27dce970610b97 | [] | no_license | presian/HackBulgaria | d29f84ab7edc85a4d8dfbf055def7d0be783539e | 8bc95bb31daeb1f5a313d25b928f505013f5f0b0 | refs/heads/master | 2021-01-10T04:38:05.759005 | 2015-10-15T07:05:21 | 2015-10-15T07:05:21 | 36,889,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | def double_slash_remover(path):
return path.replace("//", "/")
def string_splitter(path):
return path.split("/")
def empty_string_remover(path_entities):
return [x for x in path_entities if x != ""]
def point_checker(path_entity):
if path_entity != ".." and path_entity != ".":
return True
return False
def result_maker(path_entities):
result = []
for i in range(0, len(path_entities) - 1):
if path_entities[i + 1] != ".." and point_checker(path_entities[i]):
result.append(path_entities[i])
if len(result) > 0:
if point_checker(path_entities[-1]):
result.append(path_entities[-1])
return "/" + "/".join(result)
def reduce_file_path(path):
path = double_slash_remover(path)
path_entities = string_splitter(path)
path_entities = empty_string_remover(path_entities)
return result_maker(path_entities)
def main():
print(reduce_file_path("/"))
print(reduce_file_path("/srv/../"))
print(reduce_file_path("/srv///www/htdocs/wtf/"))
print(reduce_file_path("/srv/www/htdocs/wtf"))
print(reduce_file_path("/srv/./././././"))
print(reduce_file_path("/etc//wtf/"))
print(reduce_file_path("/etc/../etc/../etc/../"))
print(reduce_file_path("//////////////"))
print(reduce_file_path("/../"))
print(reduce_file_path(
"/home//radorado/code/./hackbulgaria/week0/../"))
if __name__ == '__main__':
main()
| [
"presiandanailov@gmail.com"
] | presiandanailov@gmail.com |
eeb5e3da57cd6e9d5e0b1d2daa31670aea57a886 | 0856f65fdd2c1bd305860eeebd9e51b5d1d1f017 | /xinshuo_images/test/test_image_processing.py | bbecf5ef608a8fe8f08dae2d4e347546dcbd179b | [] | no_license | Fuyaoyao/xinshuo_toolbox | 8a313416b4ce82188015421558d6a89ed526b81e | 9049003d9f8e05ece4ef19ae0beb42b5f9c5731b | refs/heads/master | 2021-08-23T12:15:39.864095 | 2017-12-04T21:48:24 | 2017-12-04T21:48:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | # Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
import os, sys
import pytest
import __init__paths__
from image_processing import *
from check import *
def test_imagecoor2cartesian_center():
image_shape = (480, 640)
forward, backward = imagecoor2cartesian_center(image_shape)
assert isfunction(forward)
assert isfunction(backward)
test_pts = (0, 0)
centered_pts = forward(test_pts)
assert centered_pts == (-320, 240)
back_pts = backward(centered_pts)
assert back_pts == (0, 0)
test_pts = (639, 479)
centered_pts = forward(test_pts)
assert centered_pts == (319, -239)
back_pts = backward(centered_pts)
assert back_pts == (639, 479)
test_pts = (0, 479)
centered_pts = forward(test_pts)
assert centered_pts == (-320, -239)
back_pts = backward(centered_pts)
assert back_pts == (0, 479)
test_pts = (639, 0)
centered_pts = forward(test_pts)
assert centered_pts == (319, 240)
back_pts = backward(centered_pts)
assert back_pts == (639, 0)
if __name__ == '__main__':
pytest.main([__file__]) | [
"xinshuo.weng@gmail.com"
] | xinshuo.weng@gmail.com |
cfeb827afdfb015cd4f2721eb31338c72a286d65 | 871690900c8da2456ca2818565b5e8c34818658e | /programmers/level3/72415.py | 481ce98547e56515643755fe9c3e7c61bbc6a4e2 | [] | no_license | kobeomseok95/codingTest | 40d692132e6aeeee32ee53ea5d4b7af8f2b2a5b2 | d628d72d9d0c1aef2b3fa63bfa9a1b50d47aaf29 | refs/heads/master | 2023-04-16T09:48:14.916659 | 2021-05-01T11:35:42 | 2021-05-01T11:35:42 | 311,012,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | from collections import deque
from itertools import permutations
def ctrl(board, y0, x0, dir_y, dir_x):
for i in range(1, 4):
if 0 <= (y1 := y0 + dir_y * i) < 4 and 0 <= (x1 := x0 + dir_x * i) < 4:
if board[y1][x1] != 0:
return (y1, x1)
l = i
return (y0 + dir_y * l, x0 + dir_x * l)
def move(board, start, end):
dy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]
dist = [[6 for _ in range(4)] for _ in range(4)]
q = deque([(start, 0)])
while q:
[y, x], d = q.popleft()
# 큐에서 나온 좌표가 최소 거리인 상황에 이어서 최단 경로를 구해주어야 한다.
# if절에서 최단 경로가 아니라면 거리를 구할 이유가 없다. 최단 경로가 아니기 때문이다.
if dist[y][x] > d:
dist[y][x] = d
for i in range(4):
ny, nx = y + dy[i], x + dx[i]
if 0 <= ny < 4 and 0 <= nx < 4:
q.append(((ny, nx), d + 1))
q.append((ctrl(board, y, x, dy[i], dx[i]), d + 1))
return dist[end[0]][end[1]]
def solution(board, r, c):
location = {k: [] for k in range(1, 7)}
for i in range(4):
for j in range(4):
if board[i][j]:
location[board[i][j]].append((i, j))
answer = int(1e9)
for per in permutations(filter(lambda v: v, location.values())):
dist = 0
cursors = [(r, c)]
stage = [[v for v in w] for w in board]
for xy1, xy2 in per:
# 해당 그림까지의 거리, 목적지
vs = [(move(stage, cursor, xy1) + move(stage, xy1, xy2), xy2) for cursor in cursors] + \
[(move(stage, cursor, xy2) + move(stage, xy2, xy1), xy1) for cursor in cursors]
# 이동처리
stage[xy1[0]][xy1[1]] = stage[xy2[0]][xy2[1]] = 0
dist += 2 + (mvn := min(vs)[0])
# 커서가 될 수 있는 위치, 최소 거리여야 한다.
cursors = [pos for d, pos in vs if d == mvn]
answer = min(answer, dist)
return answer | [
"37062337+kobeomseok95@users.noreply.github.com"
] | 37062337+kobeomseok95@users.noreply.github.com |
280e1dd2cbe2dd3cda6db54f063f203eaddd74d7 | 684f15ab9c10a2c2e378c04009afa33166e049f2 | /cifar_eval.py | 51f6ef0e2248b10c16b3971a8de0b5bfae249f9f | [
"MIT"
] | permissive | sx14/image_classification_imbalance | ac410e88164912ef6bf47a87041d60934becfc1f | 49869e12e9ca424496f7c137b7026bea79fa2f72 | refs/heads/master | 2023-01-09T11:08:30.574937 | 2020-11-05T14:48:52 | 2020-11-05T14:48:52 | 309,088,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,679 | py | import argparse
import os
import random
import time
import warnings
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
from tensorboardX import SummaryWriter
from sklearn.metrics import confusion_matrix
from utils import *
from imbalance_cifar import IMBALANCECIFAR10, IMBALANCECIFAR100
from losses import LDAMLoss, FocalLoss
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch Cifar Training')
parser.add_argument('--dataset', default='cifar10', help='dataset setting')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet32',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet32)')
parser.add_argument('--loss_type', default="CE", type=str, help='loss type')
parser.add_argument('--imb_type', default="exp", type=str, help='imbalance type')
parser.add_argument('--imb_factor', default=0.01, type=float, help='imbalance factor')
parser.add_argument('--train_rule', default='None', type=str, help='data sampling strategy for train loader')
parser.add_argument('--rand_number', default=0, type=int, help='fix random number for data sampling')
parser.add_argument('--exp_str', default='0', type=str, help='number to indicate which experiment it is')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N',
help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=2e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default='0', type=int,
help='GPU id to use.')
parser.add_argument('--root_log',type=str, default='log')
parser.add_argument('--root_model', type=str, default='checkpoint')
best_acc1 = 0
def main():
args = parser.parse_args()
args.store_name = '_'.join([args.dataset, args.arch, args.loss_type, args.train_rule, args.imb_type, str(args.imb_factor), args.exp_str])
prepare_folders(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for evaluating".format(args.gpu))
# create model
print("=> creating model '{}'".format(args.arch))
num_classes = 100 if args.dataset == 'cifar100' else 10
use_norm = True if args.loss_type == 'LDAM' else False
model = models.__dict__[args.arch](num_classes=num_classes, use_norm=use_norm)
load_best_checkpoint(args, model)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
# Data loading code
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == 'cifar10':
val_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_val)
elif args.dataset == 'cifar100':
val_dataset = datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_val)
else:
warnings.warn('Dataset is not listed')
return
# evaluate on validation set
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=100, shuffle=False,
num_workers=args.workers, pin_memory=True)
validate(val_loader, model, args)
def validate(val_loader, model, args, flag='val'):
batch_time = AverageMeter('Time', ':6.3f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
# switch to evaluate mode
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
cf = confusion_matrix(all_targets, all_preds).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
output = ('{flag} Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(flag=flag, top1=top1, top5=top5))
out_cls_acc = '%s Class Accuracy: %s'%(flag,(np.array2string(cls_acc, separator=',', formatter={'float_kind':lambda x: "%.3f" % x})))
print(output)
print(out_cls_acc)
return top1.avg
if __name__ == '__main__':
main() | [
"1059363093@qq.com"
] | 1059363093@qq.com |
99e7db6e7ff1635636e44793f849c84b4c76d03e | bf13574ef4af42b33ee931d2be0ccf862bd297d4 | /util/weather_analysis.py | ac9b30b57963557883a372fbcb09b35fa9534082 | [] | no_license | Futureword123456/WeatherRecommendationSystem | 0f032cb260415a7786b8c4a3c0b801e6377e7125 | 14489fe27eed985d950b8571a205d56612afc8a4 | refs/heads/master | 2023-04-20T23:52:27.565091 | 2021-05-16T08:52:58 | 2021-05-16T08:52:58 | 285,297,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | # 该模块用于天气数据分析的相关脚本
from pandas import DataFrame
import pandas as pd
import util
from region.models import Region
from util.normalization import sigmoid, weather_type_normalization, wind_power_normalization
from weather_analysis1.settings import OPTIMUM_MAX_DEGREE, OPTIMUM_MIN_DEGREE, WEIGHTS_DICT
from weather_data.models import WeatherData, WeatherResult
# 获取区域未来六天的天气数据 以列表+字典的形式返回数据
def get_region_weather_data(region: Region):
return WeatherData.objects.filter(region=region).order_by('-created')[:6].values('day_weather', 'day_weather_code',
'day_wind_power', 'max_degree',
'min_degree')
# 获取区域天气数据对应的日期, 以列表形式返回数据
def get_region_weather_date(region: Region) -> list:
return WeatherData.objects.filter(region=region).order_by('-created')[:6].values_list('time')
# 将区域的天气数据整理成DataFrame的形式
def get_region_weather_dataframe(region: Region) -> DataFrame:
data = get_region_weather_data(region)
date = get_region_weather_date(region)
return pd.DataFrame(data, index=date)
# 对区域的天气数据进行归一化处理
def normalize_weather_data(region: Region) -> DataFrame:
df = get_region_weather_dataframe(region)
new_df = pd.DataFrame()
new_df['max_degree'] = 1.5 - (df['max_degree'] - OPTIMUM_MAX_DEGREE).abs().apply(sigmoid)
new_df['min_degree'] = 1.5 - (df['min_degree'] - OPTIMUM_MIN_DEGREE).abs().apply(sigmoid)
new_df['day_weather_code'] = df['day_weather_code'].apply(weather_type_normalization)
new_df['day_wind_power'] = df['day_wind_power'].apply(wind_power_normalization)
return new_df
# 计算给定城市的推荐指数
def caculate_region_result(region: Region):
try:
df = normalize_weather_data(region)
series = pd.Series(WEIGHTS_DICT)
return (df @ series).sum()
except:
return -1
# 将要显示的城市的推荐结果计算出来
def save_display_region_result():
region_list = Region.objects.filter(is_display=True)
for r in region_list:
WeatherResult.objects.create(region=r, result=caculate_region_result(r))
print("%s的结果保存成功!" % r.name)
if __name__ == '__main__':
region = Region.objects.get(name='贵阳市')
print(get_region_weather_data(region))
print(get_region_weather_date(region))
df = pd.DataFrame(get_region_weather_data(region))
df = get_region_weather_dataframe(region)
print(df)
# new = normalize_weather_data(region)
# print(new)
# print(caculate_region_result(region))
save_display_region_result()
| [
"2635681517@qq.com"
] | 2635681517@qq.com |
ba9cab5d111516ba4e1fd9eba0a8fa8c5bc19eeb | 98d61512fdf7f8426d4634a86edd25669944ab9e | /algorithms/BestTimeToBuyAndSellStock/solution.py | 2c6f64b803cdbd44d2e66e2168c317b6104e54ea | [] | no_license | P-ppc/leetcode | 145102804320c6283fa653fc4a7ae89bf745b2fb | 0d90db3f0ca02743ee7d5e959ac7c83cdb435b92 | refs/heads/master | 2021-07-12T02:49:15.369119 | 2018-11-13T05:34:51 | 2018-11-24T12:34:07 | 132,237,265 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
max_profit = 0
min_price = sys.maxint
for price in prices:
min_price = min(min_price, price)
max_profit = max(max_profit, price - min_price)
return max_profit | [
"ppc-user@foxmail.com"
] | ppc-user@foxmail.com |
c3a1d237da21bec7b9c7408ef0008689bbdd134d | 88ae8695987ada722184307301e221e1ba3cc2fa | /printing/backend/PRESUBMIT.py | c400aabee9f16585e38e2c421c765eb7291eb5c0 | [
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 1,984 | py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Presubmit script for the printing backend.
See https://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API.
"""
USE_PYTHON3 = True
def _CheckForStringViewFromNullableIppApi(input_api, output_api):
"""
Looks for all affected lines in CL where one constructs either
base::StringPiece or std::string_view from any ipp*() CUPS API call.
Assumes over-broadly that all ipp*() calls can return NULL.
Returns affected lines as a list of presubmit errors.
"""
# Attempts to detect source lines like:
# * base::StringPiece foo = ippDoBar();
# * base::StringPiece foo(ippDoBar());
# and the same for std::string_view.
string_view_re = input_api.re.compile(
r"^.+(base::StringPiece|std::string_view)\s+\w+( = |\()ipp[A-Z].+$")
violations = input_api.canned_checks._FindNewViolationsOfRule(
lambda extension, line:
not (extension in ("cc", "h") and string_view_re.search(line)),
input_api, None)
bulleted_violations = [" * {}".format(entry) for entry in violations]
if bulleted_violations:
return [output_api.PresubmitError(
("Possible construction of base::StringPiece or std::string_view "
"from CUPS IPP API (that can probably return NULL):\n{}").format(
"\n".join(bulleted_violations))),]
return []
def _CommonChecks(input_api, output_api):
"""Actual implementation of presubmits for the printing backend."""
results = []
results.extend(_CheckForStringViewFromNullableIppApi(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
"""Mandatory presubmit entry point."""
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
"""Mandatory presubmit entry point."""
return _CommonChecks(input_api, output_api)
| [
"jengelh@inai.de"
] | jengelh@inai.de |
3248006133f1a39fee5c6ed3e1d8983387dae879 | 17ef6c9ead83c2a2c18fe029ae3f6ba90d57b8f4 | /unsupervised_learning/0x01-clustering/8-EM.py | c0e7abe3c2039e1dca58a146d909b3d122c2d090 | [] | no_license | shincap8/holbertonschool-machine_learning | ede0c2be6df44f91c125c4497cf5ac1b90f654fe | cfc519b3290a1b8ecd6dc94f70c5220538ee7aa0 | refs/heads/master | 2023-03-26T07:00:10.238239 | 2021-03-18T04:39:01 | 2021-03-18T04:39:01 | 279,436,819 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,875 | py | #!/usr/bin/env python3
"""Function that performs the expectation maximization for a GMM"""
import numpy as np
initialize = __import__('4-initialize').initialize
expectation = __import__('6-expectation').expectation
maximization = __import__('7-maximization').maximization
def expectation_maximization(X, k, iterations=1000, tol=1e-5, verbose=False):
"""X is a numpy.ndarray of shape (n, d) containing the data set
k is a positive integer containing the number of clusters
iterations is a positive integer containing the
maximum number of iterations for the algorithm
tol is a non-negative float containing tolerance
of the log likelihood, used to determine early
stopping i.e. if the difference is less than or
equal to tol you should stop the algorithm
verbose is a boolean that determines if you
should print information about the algorithm
If True, print Log Likelihood after {i} iterations:
{l} every 10 iterations and after the last iteration
{i} is the number of iterations of the EM algorithm
{l} is the log likelihood, rounded to 5 decimal places
You should use:
initialize = __import__('4-initialize').initialize
expectation = __import__('6-expectation').expectation
maximization = __import__('7-maximization').maximization
You may use at most 1 loop
Returns: pi, m, S, g, l, or None, None, None, None, None on failure
pi is a numpy.ndarray of shape (k,)
containing the priors for each cluster
m is a numpy.ndarray of shape (k, d)
containing the centroid means for each cluster
S is a numpy.ndarray of shape (k, d, d)
containing the covariance matrices for each cluster
g is a numpy.ndarray of shape (k, n) containing
the probabilities for each data point in each cluster
l is the log likelihood of the model"""
if type(X) is not np.ndarray or len(X.shape) != 2:
return (None, None, None, None, None)
if type(k) is not int or type(iterations) is not int:
return (None, None, None, None, None)
if k <= 0 or iterations <= 0:
return (None, None, None, None, None)
if type(tol) is not float or tol < 0:
return (None, None, None, None, None)
if type(verbose) is not bool:
return (None, None, None, None, None)
n, d = X.shape
pi, m, S = initialize(X, k)
g, ll = expectation(X, pi, m, S)
ll_old = 0
text = 'Log Likelihood after {} iterations: {}'
for i in range(iterations):
if verbose and i % 10 == 0:
print(text.format(i, ll.round(5)))
pi, m, S = maximization(X, g)
g, ll = expectation(X, pi, m, S)
if np.abs(ll_old - ll) <= tol:
break
ll_old = ll
if verbose:
print(text.format(i + 1, ll.round(5)))
return (pi, m, S, g, ll)
| [
"shincap8@gmail.com"
] | shincap8@gmail.com |
f8bd1fc30da97b0917edc5644b68c14a92de65b1 | 3b50605ffe45c412ee33de1ad0cadce2c5a25ca2 | /python/paddle/fluid/tests/unittests/test_monitor.py | a5d5e30176fb07753a1ec9e47c4031fccf825b92 | [
"Apache-2.0"
] | permissive | Superjomn/Paddle | f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1 | 7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188 | refs/heads/develop | 2023-02-04T20:27:54.244843 | 2023-01-26T15:31:14 | 2023-01-26T15:31:14 | 66,896,049 | 4 | 1 | Apache-2.0 | 2023-04-14T02:29:52 | 2016-08-30T01:45:54 | C++ | UTF-8 | Python | false | false | 3,566 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TestCases for Monitor
"""
import paddle
paddle.enable_static()
import os
import tempfile
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
class TestDatasetWithStat(unittest.TestCase):
"""TestCases for Dataset."""
def setUp(self):
self.use_data_loader = False
self.epoch_num = 10
self.drop_last = False
def test_dataset_run_with_stat(self):
temp_dir = tempfile.TemporaryDirectory()
path_a = os.path.join(temp_dir.name, "test_in_memory_dataset_run_a.txt")
path_b = os.path.join(temp_dir.name, "test_in_memory_dataset_run_b.txt")
with open(path_a, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open(path_b, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
for slot in slots:
var = paddle.static.data(
name=slot, shape=[-1, 1], dtype="int64", lod_level=1
)
slots_vars.append(var)
embs = []
for x in slots_vars:
emb = fluid.layers.embedding(x, is_sparse=True, size=[100001, 4])
embs.append(emb)
dataset = paddle.distributed.InMemoryDataset()
dataset._set_batch_size(32)
dataset._set_thread(3)
dataset.set_filelist([path_a, path_b])
dataset._set_pipe_command("cat")
dataset._set_use_var(slots_vars)
dataset.load_into_memory()
dataset._set_fea_eval(1, True)
dataset.slots_shuffle(["slot1"])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
if self.use_data_loader:
data_loader = fluid.io.DataLoader.from_dataset(
dataset, fluid.cpu_places(), self.drop_last
)
for i in range(self.epoch_num):
for data in data_loader():
exe.run(fluid.default_main_program(), feed=data)
else:
for i in range(self.epoch_num):
try:
exe.train_from_dataset(
fluid.default_main_program(),
dataset,
fetch_list=[embs[0], embs[1]],
fetch_info=["emb0", "emb1"],
print_period=1,
)
except Exception as e:
self.assertTrue(False)
int_stat = core.get_int_stats()
# total 56 keys
print(int_stat["STAT_total_feasign_num_in_mem"])
temp_dir.cleanup()
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | Superjomn.noreply@github.com |
3e1979aafdbea3e2c81383a48cfe6a7cafe0f013 | fb78fd824e904705fb1ee09db8b3c20cc3902805 | /django-guestbook/guestbook/migrations/0002_auto_20191016_1315.py | ef5f841831a597352e01e1f4517998095c6e161b | [] | no_license | Roderich25/mac | 8469833821ac49c539a744db29db5a41d755ad55 | 4f7fe281c88f0199b85d0ac99ce41ffb643d6e82 | refs/heads/master | 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 | Jupyter Notebook | UTF-8 | Python | false | false | 431 | py | # Generated by Django 2.2.6 on 2019-10-16 13:15
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('guestbook', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='date_added',
field=models.DateField(default=django.utils.timezone.now),
),
]
| [
"rodrigoavilasolis@gmail.com"
] | rodrigoavilasolis@gmail.com |
0922c9ad0dfb9694fc3319082b15404f0dfd3dd1 | 3bb0fe3babe9f30e05a181722c476504e0ab56df | /tests/contrib/permissions/test_models.py | a264941db8418b287856b6f52bb671120629e476 | [
"MIT"
] | permissive | inonit/django-chemtrails | 6616aa121afe70da42a2a237b88b671ee2cddd74 | e8bd97dc68852902b57d314250e616b505db0e16 | refs/heads/master | 2021-01-20T10:55:30.979439 | 2017-12-11T21:32:06 | 2017-12-11T21:32:06 | 80,567,669 | 14 | 2 | MIT | 2017-12-04T05:43:13 | 2017-01-31T22:11:51 | Python | UTF-8 | Python | false | false | 907 | py | # -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from chemtrails.contrib.permissions.models import (
AccessRule,
get_node_relations_choices, get_node_permissions_choices
)
class ChoicesHelperFunctionsTestCase(TestCase):
"""
Test various functions for getting choices based on Neo4j data.
"""
def test_get_node_relations_choices(self):
choices = get_node_relations_choices()
self.assertIsInstance(choices, list)
for item in choices:
self.assertIsInstance(item, tuple)
self.assertEqual(len(item), 2)
def test_get_node_permissions_choices(self):
choices = get_node_permissions_choices()
self.assertIsInstance(choices, list)
for item in choices:
self.assertIsInstance(item, tuple)
self.assertEqual(len(item), 2)
| [
"rhblind@gmail.com"
] | rhblind@gmail.com |
1022e667a73f39aa64e68623e43a4c620ca0b8a2 | 47fb8f2ed2510a3777799e1b704bfcfce18789c3 | /challenge.py | dac506dffc3950144d67f5e7ba52f53f0399e6f0 | [] | no_license | ehiaig/learn_python | e8d476c228451fb91be40ec64b5f7700fa12db74 | 205119a15d49dd81d98fca4623e0df99963aece7 | refs/heads/master | 2022-12-25T02:00:32.428483 | 2020-09-19T19:02:32 | 2020-09-19T19:02:32 | 103,679,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # print ("input is {}".format(input))
# ALTERNATE HUMAN AN DDOG CHALLENGE
stirr = "hd...h...d..d..hd...h..d..h.d"
stir = list(stirr)
line = 0
while (line < len(stir)):
try:
if stir[line] == 'h' and not(stir[line+1] == 'd'):
# #print (stir[line], stir[line + 1])
for ch in stir[line + 1:]:
if ch == 'd':
ch, stir[line +1] = stir[line +1], ch
except (IndexError):
break
line = line + 1
print (''.join(stir))
# HUMAN AND DOG CHALLENGE
# Put all dogs one step in from of human
| [
"ehiagheaigg@gmail.com"
] | ehiagheaigg@gmail.com |
6d76dfb817a6e4b6b701679da6ac15531301e20f | b32121fbf9cdbb7043fe255ebc01bc367c78ac73 | /backend/react_native_hook_ex_4479/settings.py | 0e34266f386b5732b712a0a471d9bba374dc8b07 | [] | no_license | crowdbotics-apps/react-native-hook-ex-4479 | 1883330009525d866c06b3b5e8135de5fe6c18a1 | b2f5d65eccbfdf427158ce05c57fba8efc6a0914 | refs/heads/master | 2022-12-12T03:09:00.171484 | 2019-06-10T10:08:40 | 2019-06-10T10:08:40 | 191,143,962 | 0 | 0 | null | 2022-12-09T05:45:04 | 2019-06-10T10:08:22 | Python | UTF-8 | Python | false | false | 4,603 | py | """
Django settings for react_native_hook_ex_4479 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '12nhr61!$2hgc7msxz)-u!imhrof5a@zua&mm91le2@z$kn6n0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'react_native_hook_ex_4479.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'react_native_hook_ex_4479.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
217e500f19ff35a819836f710b2c6bcdc325240c | 7b1a5db0a067766a9805fe04105f6c7f9ff131f3 | /pysal/explore/segregation/tests/test_multi_gini_seg.py | c1c1475ea19bacb1f18c2f5aae2607416cb87a5b | [] | permissive | ocefpaf/pysal | 2d25b9f3a8bd87a7be3f96b825995a185624e1d0 | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | refs/heads/master | 2020-06-26T17:13:06.016203 | 2019-07-31T19:54:35 | 2019-07-31T19:54:35 | 199,696,188 | 0 | 0 | BSD-3-Clause | 2019-07-30T17:17:19 | 2019-07-30T17:17:18 | null | UTF-8 | Python | false | false | 563 | py | import unittest
import pysal.lib
import geopandas as gpd
import numpy as np
from pysal.explore.segregation.aspatial import MultiGiniSeg
class Multi_Gini_Seg_Tester(unittest.TestCase):
def test_Multi_Gini_Seg(self):
s_map = gpd.read_file(pysal.lib.examples.get_path("sacramentot2.shp"))
groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
df = s_map[groups_list]
index = MultiGiniSeg(df, groups_list)
np.testing.assert_almost_equal(index.statistic, 0.5456349992598081)
if __name__ == '__main__':
unittest.main() | [
"sjsrey@gmail.com"
] | sjsrey@gmail.com |
b5b118a49469fea04673123a5d2e2352799f59f3 | 35517b6f40a0672a9c355fa42c899a03735b7c46 | /rooms/urls.py | 58db40c531cc40b09fe0e3eb62ee8b4d64f1f47f | [] | no_license | byungsujeong/airbnb-clone | 45a1bd074897f97faa5c10a85ae103301cbd9de1 | 158bcae353105c90ad2b1899367c90a67bbac6af | refs/heads/master | 2023-04-24T06:35:04.902040 | 2021-05-09T13:24:34 | 2021-05-09T13:24:34 | 355,564,701 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | from django.urls import path
from . import views
app_name = "rooms"
urlpatterns = [
# path("<int:pk>", views.room_detail, name="detail"),
path("create/", views.CreateRoomView.as_view(), name="create"),
path("<int:pk>", views.RoomDetail.as_view(), name="detail"),
path("<int:pk>/edit/", views.EditRoomView.as_view(), name="edit"),
path("<int:pk>/photos/", views.RoomPhotosView.as_view(), name="photos"),
path("<int:pk>/photos/add", views.AddPthotoView.as_view(), name="add-photo"),
path(
"<int:room_pk>/photos/<int:photo_pk>/delete",
views.delete_photo,
name="delete-photo",
),
path(
"<int:room_pk>/photos/<int:photo_pk>/edit",
views.EditPhotoView.as_view(),
name="edit-photo",
),
path("search", views.SearchView.as_view(), name="search"),
]
| [
"byungsu.jeong88@gmail.com"
] | byungsu.jeong88@gmail.com |
fe2fcd6a9850105452a96addca96d68626b41407 | 330dbbefb2e7d6283c812888c89e58498f0b4188 | /_Dist/NeuralNetworks/b_TraditionalML/SVM.py | fd8bc5c1269e79328815274d2214640054952c91 | [
"MIT"
] | permissive | leoatchina/MachineLearning | 93dd3e6c91911f5743617dde1873bf60493171a2 | 071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3 | refs/heads/master | 2023-04-23T02:36:36.809015 | 2021-04-29T15:06:00 | 2021-04-29T15:06:00 | 286,432,406 | 0 | 1 | MIT | 2020-08-10T09:30:34 | 2020-08-10T09:30:34 | null | UTF-8 | Python | false | false | 5,527 | py | import os
import sys
root_path = os.path.abspath("../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from _Dist.NeuralNetworks.DistBase import Base, AutoBase, AutoMeta, DistMixin, DistMeta
class LinearSVM(Base):
def __init__(self, *args, **kwargs):
super(LinearSVM, self).__init__(*args, **kwargs)
self._name_appendix = "LinearSVM"
self.c = None
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
super(LinearSVM, self).init_from_data(x, y, x_test, y_test, sample_weights, names)
metric = self.model_param_settings.setdefault("metric", "binary_acc")
if metric == "acc":
self.model_param_settings["metric"] = "binary_acc"
self.n_class = 1
def init_model_param_settings(self):
self.model_param_settings.setdefault("lr", 0.01)
self.model_param_settings.setdefault("n_epoch", 10 ** 3)
self.model_param_settings.setdefault("max_epoch", 10 ** 6)
super(LinearSVM, self).init_model_param_settings()
self.c = self.model_param_settings.get("C", 1.)
def _build_model(self, net=None):
self._model_built = True
if net is None:
net = self._tfx
current_dimension = net.shape[1].value
self._output = self._fully_connected_linear(
net, [current_dimension, 1], "_final_projection"
)
def _define_loss_and_train_step(self):
self._loss = self.c * tf.reduce_sum(
tf.maximum(0., 1 - self._tfy * self._output)
) + tf.nn.l2_loss(self._ws[0])
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self._train_step = self._optimizer.minimize(self._loss)
def _get_feed_dict(self, x, y=None, weights=None, is_training=False):
if y is not None:
y[y == 0] = -1
return super(LinearSVM, self)._get_feed_dict(x, y, weights, is_training)
def predict_classes(self, x):
return (self._calculate(x, tensor=self._output, is_training=False) >= 0).astype(np.int32)
class SVM(LinearSVM):
def __init__(self, *args, **kwargs):
super(SVM, self).__init__(*args, **kwargs)
self._name_appendix = "SVM"
self._p = self._gamma = None
self._x = self._gram = self._kernel_name = None
@property
def kernel(self):
if self._kernel_name == "linear":
return self.linear
if self._kernel_name == "poly":
return lambda x, y: self.poly(x, y, self._p)
if self._kernel_name == "rbf":
return lambda x, y: self.rbf(x, y, self._gamma)
raise NotImplementedError("Kernel '{}' is not implemented".format(self._kernel_name))
@staticmethod
def linear(x, y):
return x.dot(y.T)
@staticmethod
def poly(x, y, p):
return (x.dot(y.T) + 1) ** p
@staticmethod
def rbf(x, y, gamma):
return np.exp(-gamma * np.sum((x[..., None, :] - y) ** 2, axis=2))
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
self._x, y = np.atleast_2d(x).astype(np.float32), np.asarray(y, np.float32)
self._p = self.model_param_settings.setdefault("p", 3)
self._gamma = self.model_param_settings.setdefault("gamma", 1 / self._x.shape[1])
self._kernel_name = self.model_param_settings.setdefault("kernel_name", "rbf")
self._gram, x_test = self.kernel(self._x, self._x), self.kernel(x_test, self._x)
super(SVM, self).init_from_data(self._gram, y, x_test, y_test, sample_weights, names)
def init_model_param_settings(self):
super(SVM, self).init_model_param_settings()
self._p = self.model_param_settings["p"]
self._gamma = self.model_param_settings["gamma"]
self._kernel_name = self.model_param_settings["kernel_name"]
def _define_py_collections(self):
super(SVM, self)._define_py_collections()
self.py_collections += ["_x", "_gram"]
def _define_loss_and_train_step(self):
self._loss = self.c * tf.reduce_sum(tf.maximum(0., 1 - self._tfy * self._output)) + 0.5 * tf.matmul(
self._ws[0], tf.matmul(self._gram, self._ws[0]), transpose_a=True
)[0]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self._train_step = self._optimizer.minimize(self._loss)
def _evaluate(self, x=None, y=None, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
n_sample = self._x.shape[0]
cv_feat_dim = None if x_cv is None else x_cv.shape[1]
test_feat_dim = None if x_test is None else x_test.shape[1]
x_cv = None if x_cv is None else self.kernel(x_cv, self._x) if cv_feat_dim != n_sample else x_cv
x_test = None if x_test is None else self.kernel(x_test, self._x) if test_feat_dim != n_sample else x_test
return super(SVM, self)._evaluate(x, y, x_cv, y_cv, x_test, y_test)
def predict(self, x):
# noinspection PyTypeChecker
return self._predict(self.kernel(x, self._x))
def predict_classes(self, x):
return (self.predict(x) >= 0).astype(np.int32)
def evaluate(self, x, y, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
return self._evaluate(self.kernel(x, self._x), y, x_cv, y_cv, x_test, y_test, metric)
class AutoLinearSVM(AutoBase, LinearSVM, metaclass=AutoMeta):
pass
class DistLinearSVM(AutoLinearSVM, DistMixin, metaclass=DistMeta):
pass
| [
"syameimaru.saki@gmail.com"
] | syameimaru.saki@gmail.com |
0ff04073a92aeec8f89f0c43ffbb6fcfa390b892 | 0d0cf0165ca108e8d94056c2bae5ad07fe9f9377 | /20_Introduction_to_Deep_Learning_in_Python/4_Fine-tuning_keras_models/experimentingWithWiderNetworks.py | 0f882da2de07c8d71246cb278cc2a850532cb36e | [] | no_license | MACHEIKH/Datacamp_Machine_Learning_For_Everyone | 550ec4038ebdb69993e16fe22d5136f00101b692 | 9fe8947f490da221430e6dccce6e2165a42470f3 | refs/heads/main | 2023-01-22T06:26:15.996504 | 2020-11-24T11:21:53 | 2020-11-24T11:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | # Experimenting with wider networks
# Now you know everything you need to begin experimenting with different models!
# A model called model_1 has been pre-loaded. You can see a summary of this model printed in the IPython Shell. This is a relatively small network, with only 10 units in each hidden layer.
# In this exercise you'll create a new model called model_2 which is similar to model_1, except it has 100 units in each hidden layer.
# After you create model_2, both models will be fitted, and a graph showing both models loss score at each epoch will be shown. We added the argument verbose=False in the fitting commands to print out fewer updates, since you will look at these graphically instead of as text.
# Because you are fitting two models, it will take a moment to see the outputs after you hit run, so be patient.
# Instructions
# 100 XP
# Create model_2 to replicate model_1, but use 100 nodes instead of 10 for the first two Dense layers you add with the 'relu' activation. Use 2 nodes for the Dense output layer with 'softmax' as the activation.
# Compile model_2 as you have done with previous models: Using 'adam' as the optimizer, 'categorical_crossentropy' for the loss, and metrics=['accuracy'].
# Hit 'Submit Answer' to fit both the models and visualize which one gives better results! Notice the keyword argument verbose=False in model.fit(): This prints out fewer updates, since you'll be evaluating the models graphically instead of through text.
# Define early_stopping_monitor
early_stopping_monitor = EarlyStopping(patience=2)
# Create the new model: model_2
model_2 = Sequential()
# Add the first and second layers
model_2.add(Dense(100, activation='relu', input_shape=input_shape))
model_2.add(Dense(100, activation='relu'))
# Add the output layer
model_2.add(Dense(2, activation='softmax'))
# Compile model_2
model_2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Fit model_1
model_1_training = model_1.fit(predictors, target, epochs=15, validation_split=0.2, callbacks=[early_stopping_monitor], verbose=False)
# Fit model_2
model_2_training = model_2.fit(predictors, target, epochs=15, validation_split=0.2, callbacks=[early_stopping_monitor], verbose=False)
# Create the plot
plt.plot(model_1_training.history['val_loss'], 'r', model_2_training.history['val_loss'], 'b')
plt.xlabel('Epochs')
plt.ylabel('Validation score')
plt.show()
| [
"noreply@github.com"
] | MACHEIKH.noreply@github.com |
a5951f67f2d24f9eb6ee99d86c9191910a281899 | 493e4405c421a897304c4d1227e7d91b83eb890f | /douappbook/spiders/rating.py | 354499ca355f524682ab2213dff63c86eb0add07 | [] | no_license | stipid/douappbook | 7f94d2bde5e3ce1af87acb7636d0a038a39352ba | c9fac02e6713c0781f10ebcd985aa25370389432 | refs/heads/master | 2020-12-24T07:53:57.018981 | 2015-03-29T16:06:48 | 2015-03-29T16:06:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | # -*- coding: utf-8 -*-
import random
try:
import simplejson as json
except ImportError:
import json
import furl
from scrapy import Request
from douappbook.spiders import DoubanAppSpider
from douappbook.items import RatingItem
from douappbook.models import CrawledBook
class RatingSpider(DoubanAppSpider):
name = "rating"
allowed_domains = ["douban.com"]
def start_requests(self):
book_ids = CrawledBook.get_book_ids()
# randomize book ids
random.shuffle(book_ids)
for book_id in book_ids:
endpoint = 'book/%d/interests' % book_id
url = self.get_api_url(
endpoint,
start=0,
count=50
)
yield Request(url, callback=self.parse)
if self.settings['DEBUG']:
break
def parse(self, response):
api_url = furl.furl(response.url)
book_id = int(api_url.path.segments[3])
res = json.loads(response.body_as_unicode())
start = res['start']
count = res['count']
total = res['total']
interests = res['interests']
for item in interests:
rating = RatingItem()
rating['id'] = item['id']
rating['book_id'] = book_id
rating['user_id'] = item['user']['id']
rating['username'] = item['user']['uid']
rating['rating'] = item['rating']['value']
rating['vote'] = item['vote_count']
rating['comment'] = item['comment']
yield rating
if start + count < total and not self.settings['DEBUG']:
endpoint = 'book/%d/interests' % book_id
url = self.get_api_url(
endpoint,
start=start + count,
count=50
)
yield Request(url, callback=self.parse)
| [
"messense@icloud.com"
] | messense@icloud.com |
2af8e8f2d3a6794386959b990b732044f55ab12a | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/common/Lib/plat-irix5/GLWS.py | 307029b979ba39dee61a455577a638429f9d3b14 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 635 | py | # 2017.05.04 15:33:48 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-irix5/GLWS.py
from warnings import warnpy3k
warnpy3k('the GLWS module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
NOERROR = 0
NOCONTEXT = -1
NODISPLAY = -2
NOWINDOW = -3
NOGRAPHICS = -4
NOTTOP = -5
NOVISUAL = -6
BUFSIZE = -7
BADWINDOW = -8
ALREADYBOUND = -100
BINDFAILED = -101
SETFAILED = -102
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\plat-irix5\GLWS.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:33:48 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
9d6fabcf0453c8517213f483a0dd28f5050d0ae6 | 0a11a15cf64e25585d28f484bb2118e8f858cfeb | /알고리즘/알고리즘문제/5097_회전.py | 945a166c127ee3c6ff9b7e8a6fcbc6b7122dddeb | [] | no_license | seoul-ssafy-class-2-studyclub/GaYoung_SSAFY | 7d9a44afd0dff13fe2ba21f76d0d99c082972116 | 23e0b491d95ffd9c7a74b7f3f74436fe71ed987d | refs/heads/master | 2021-06-30T09:09:00.646827 | 2020-11-30T14:09:03 | 2020-11-30T14:09:03 | 197,476,649 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | for t in range(int(input())):
N, M = map(int, input().split())
data = list(map(int, input().split()))
queue = [data.pop(0)]
for m in range(M):
data.append(queue.pop(0))
queue.append(data.pop(0))
result = queue.pop()
print('#{} {}'.format(t+1, result)) | [
"gyyoon4u@naver.com"
] | gyyoon4u@naver.com |
585733c3996bda61a1e80b9902673d6f8d8a8733 | aa49120740b051eed9b7199340b371a9831c3050 | /sum_submatrix.py | 6b338ee7bb7f248c32506d5981c498d60b9ed51e | [] | no_license | ashutosh-narkar/LeetCode | cd8d75389e1ab730b34ecd860b317b331b1dfa97 | b62862b90886f85c33271b881ac1365871731dcc | refs/heads/master | 2021-05-07T08:37:42.536436 | 2017-11-22T05:18:23 | 2017-11-22T05:18:23 | 109,366,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | #!/usr/bin/env python
'''
Given a matrix, calculate the sum of a sub matrix given the start and end indices of the submatrix
'''
def sumSubMatrix(matrix, start_row, start_col, end_row, end_col):
if not matrix:
return 0
nrows = len(matrix)
ncols = len(matrix[0])
if start_row >= nrows or end_row < 0:
return 0
if start_col >= ncols or end_col < 0:
return 0
result = 0
for i in range(start_row, end_row + 1):
for j in range(start_col, end_col + 1):
result += matrix[i][j]
return result
if __name__ == '__main__':
input = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]
# entire matrix
print sumSubMatrix(input, 0, 0, 3, 3)
# same row
print sumSubMatrix(input, 1, 1, 1, 3)
# col
print sumSubMatrix(input, 2, 1, 3, 1)
# range
print sumSubMatrix(input, 1, 0, 2, 2)
| [
"ashutosh.narkar@one.verizon.com"
] | ashutosh.narkar@one.verizon.com |
66b191caad4cf439c094f78b09e6827e3a792f22 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02614/s684487359.py | 15d0f66c289bf45b788f38685224357fa8c988e3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | H, W, K = map(int,input().split())
c = list(list(input()) for _ in range(H))
ans = 0
#bit演算
for i in range(2 ** H):
for j in range(2 ** W):
b = 0
for k in range(H):
for l in range(W):
#縦も横も塗らない色が黒のマスを数える
if i >> k & 1 and j >> l & 1 and c[k][l] == "#": b += 1
if b == K: ans += 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
aad1886a583a2c25a51b9c079489b8e629b94068 | 6fa0d5d3b61fbce01fad5a7dd50258c09298ee00 | /Web/04_django/Open_API/MYFORM/articles/models.py | a6d857d2f7321697638dc293f00c179220437d9b | [] | no_license | athletejuan/TIL | c8e6bd9f7e2c6f999dbac759adcdb6b2959de384 | 16b854928af2f27d91ba140ebc1aec0007e5eb04 | refs/heads/master | 2023-02-19T13:59:06.495110 | 2022-03-23T15:08:04 | 2022-03-23T15:08:04 | 188,750,527 | 1 | 0 | null | 2023-02-15T22:54:50 | 2019-05-27T01:27:09 | Python | UTF-8 | Python | false | false | 989 | py | from django.db import models
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
class Article(models.Model):
title = models.CharField(max_length=30)
content = models.TextField()
image = ProcessedImageField(
upload_to = 'articles/images',
processors = [ResizeToFill(200,300)],
format = 'jpeg',
options = {'quality': 90}
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-pk']
def __str__(self):
return f'No.{self.id} - {self.title}'
class Comment(models.Model):
content = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
class Meta:
ordering = ['-pk']
def __str__(self):
return f'<Article({self.article_id}) : Comment({self.id})> - {self.content}' | [
"vanillasky84.0627@gmail.com"
] | vanillasky84.0627@gmail.com |
fb076470f17938090d47bcac17fbac9b550f005e | 150d9e4cee92be00251625b7f9ff231cc8306e9f | /largestNumberAtLeastTwiceofOthers.py | 3318a94d71cdb4b6b3e595f6c4ced9a862df489b | [] | no_license | JerinPaulS/Python-Programs | 0d3724ce277794be597104d9e8f8becb67282cb0 | d0778178d89d39a93ddb9b95ca18706554eb7655 | refs/heads/master | 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | '''
747. Largest Number At Least Twice of Others
You are given an integer array nums where the largest integer is unique.
Determine whether the largest element in the array is at least twice as much as every other number in the array. If it is, return the index of the largest element, or return -1 otherwise.
Example 1:
Input: nums = [3,6,1,0]
Output: 1
Explanation: 6 is the largest integer.
For every other number in the array x, 6 is at least twice as big as x.
The index of value 6 is 1, so we return 1.
Example 2:
Input: nums = [1,2,3,4]
Output: -1
Explanation: 4 is less than twice the value of 3, so we return -1.
Example 3:
Input: nums = [1]
Output: 0
Explanation: 1 is trivially at least twice the value as any other number because there are no other numbers.
Constraints:
1 <= nums.length <= 50
0 <= nums[i] <= 100
The largest element in nums is unique.
'''
class Solution(object):
def dominantIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 1:
return 0
sorted_nums = sorted(nums)
max_num = sorted_nums[-1]
print max_num, sorted_nums
if max_num >= sorted_nums[len(sorted_nums) - 2] * 2:
return nums.index(max_num)
else:
return -1
obj = Solution()
print(obj.dominantIndex([3,6,1,0])) | [
"jerinsprograms@gmail.com"
] | jerinsprograms@gmail.com |
bd70f4a9e1704eac28e96fd1bf95f8f4f712a2b9 | 7453e69cda5f4d331ef5b6bb437c27c24579d62d | /event/migrations/0001_initial.py | 9d6f8fcb08dc929724c9120d4e10448d7aeffe20 | [] | no_license | jerinisready/learndjangogrouppehia1-eventmgt | f4ac4c24adae9b600ab45b3b4ab9b086fd3e2e00 | 6c5950169b98c97853ae467009cd51df01b855e0 | refs/heads/master | 2023-05-28T15:08:38.408981 | 2021-06-23T16:10:17 | 2021-06-23T16:10:17 | 379,661,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | # Generated by Django 3.2.4 on 2021-06-22 15:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField(blank=True, null=True)),
('event_datetime', models.DateTimeField()),
('is_registration_closed', models.BooleanField(default=False)),
('max_no_participants', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Registration',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('point', models.IntegerField(default=0)),
('position', models.CharField(choices=[('First', 'First'), ('Second', 'Second'), ('Third', 'Third')], default='Participant', max_length=12)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event.event')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"="
] | = |
9e2bc48ca7987ee41405b2bb7640150b642f890c | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/devhub/azure-mgmt-devhub/generated_samples/workflow_create_or_update_with_artifact_gen.py | ecfb1c28f4ed24b756066d1017b0909a8f5eae51 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 3,628 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.devhub import DevHubMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-devhub
# USAGE
python workflow_create_or_update_with_artifact_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DevHubMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="subscriptionId1",
)
response = client.workflow.create_or_update(
resource_group_name="resourceGroup1",
workflow_name="workflow1",
parameters={
"location": "location1",
"properties": {
"artifactGenerationProperties": {
"appName": "my-app",
"dockerfileGenerationMode": "enabled",
"dockerfileOutputDirectory": "./",
"generationLanguage": "javascript",
"imageName": "myimage",
"imageTag": "latest",
"languageVersion": "14",
"manifestGenerationMode": "enabled",
"manifestOutputDirectory": "./",
"manifestType": "kube",
"namespace": "my-namespace",
"port": "80",
},
"githubWorkflowProfile": {
"acr": {
"acrRegistryName": "registry1",
"acrRepositoryName": "repo1",
"acrResourceGroup": "resourceGroup1",
"acrSubscriptionId": "subscriptionId1",
},
"aksResourceId": "/subscriptions/subscriptionId1/resourcegroups/resourceGroup1/providers/Microsoft.ContainerService/managedClusters/cluster1",
"branchName": "branch1",
"deploymentProperties": {
"kubeManifestLocations": ["/src/manifests/"],
"manifestType": "kube",
"overrides": {"key1": "value1"},
},
"dockerBuildContext": "repo1/src/",
"dockerfile": "repo1/images/Dockerfile",
"oidcCredentials": {
"azureClientId": "12345678-3456-7890-5678-012345678901",
"azureTenantId": "66666666-3456-7890-5678-012345678901",
},
"repositoryName": "repo1",
"repositoryOwner": "owner1",
},
},
"tags": {"appname": "testApp"},
},
)
print(response)
# x-ms-original-file: specification/developerhub/resource-manager/Microsoft.DevHub/preview/2022-10-11-preview/examples/Workflow_CreateOrUpdate_WithArtifactGen.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
9e6f9df4f4afbe859d1c4cf2103cc13eeaa21eab | 1b5546e1fede94587fd2dabee0ef2695699cbab8 | /hyperengine/impl/tensorflow/tensorflow_solver.py | d60c70b21aa3a2b44987826f248958282ca9fb84 | [
"Apache-2.0"
] | permissive | 4thepoch/hyper-engine | 1998ba3f82d23eef732d278655ce1eb293478c20 | 5b73c5fd12ce0ca3f5038b41fd98bc52a1eccb34 | refs/heads/master | 2021-05-09T11:39:42.473874 | 2018-01-25T19:18:02 | 2018-01-25T19:18:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,304 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import tensorflow as tf
from hyperengine.model import BaseSolver
from tensorflow_model_io import TensorflowModelIO
from tensorflow_runner import TensorflowRunner
from tf_util import is_gpu_available
class TensorflowSolver(BaseSolver):
def __init__(self, data, model=None, hyper_params=None, augmentation=None, model_io=None, result_metric='max', **params):
if isinstance(model, TensorflowRunner):
runner = model
else:
runner = TensorflowRunner(model)
self._session = None
self._model_io = model_io if model_io is not None else TensorflowModelIO(**params)
self._save_accuracy_limit = params.get('save_accuracy_limit', 0)
params['eval_flexible'] = params.get('eval_flexible', True) and is_gpu_available()
super(TensorflowSolver, self).__init__(runner, data, hyper_params, augmentation, result_metric, **params)
def create_session(self):
self._session = tf.Session(graph=self._runner.graph())
return self._session
def init_session(self):
self._runner.init(session=self._session)
results = self._load(directory=self._model_io.load_dir, log_level=1)
return results.get('validation_accuracy', 0)
def terminate(self):
self._runner.terminate()
def on_best_accuracy(self, accuracy, eval_result):
if accuracy >= self._save_accuracy_limit:
self._model_io.save_results({'validation_accuracy': accuracy, 'model_size': self._runner.model_size()})
self._model_io.save_hyper_params(self._hyper_params)
self._model_io.save_session(self._session)
self._model_io.save_data(eval_result.get('data'))
def _evaluate_test(self):
# Load the best session if available before test evaluation
current_results = self._load(directory=self._model_io.save_dir, log_level=0)
eval_ = super(TensorflowSolver, self)._evaluate_test()
if not current_results:
return eval_
# Update the current results
current_results['test_accuracy'] = eval_.get('accuracy', 0)
self._model_io.save_results(current_results)
return eval_
def _load(self, directory, log_level):
self._model_io.load_session(self._session, directory, log_level)
results = self._model_io.load_results(directory, log_level)
return results or {}
| [
"Maxim.Podkolzine@jetbrains.com"
] | Maxim.Podkolzine@jetbrains.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.