blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ebc299fc609c9f70d3b6bb73526b7c530b6aad85 | f65be296b831982b187cb3c3a1c82740fec15b5a | /ineco_tax_include/account.py | bd62769cf187b9dbca6b460bc4eff50915aaa2f6 | [] | no_license | nitikarnh/bpe_module | ab05af81f7dae10129ec584233423d4e5c3c7f3d | 6b1057495b277dc69023554d5d4e7bf172ba07c1 | refs/heads/master | 2020-05-21T16:40:05.291099 | 2017-10-24T09:11:01 | 2017-10-24T09:11:01 | 64,814,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,151 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
from lxml import etree
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
from openerp.tools import float_compare
import openerp.addons.decimal_precision as dp
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.one
@api.depends('invoice_line.price_subtotal', 'tax_line.amount')
def _compute_amount(self):
#self.amount_untaxed = sum(line.price_subtotal for line in self.invoice_line)
#self.amount_tax = sum(line.amount for line in self.tax_line)
#self.amount_total = self.amount_untaxed + self.amount_tax
tax_included = False
subtotal = alltotal = 0.0
percent = 0.0
for line in self.invoice_line:
alltotal += line.quantity * line.price_unit
if line.invoice_line_tax_id:
for tax in line.invoice_line_tax_id:
tax_included = tax.price_include == True
percent = tax.amount
subtotal += line.quantity * line.price_unit
amount_total = amount_tax = amount_untaxed = 0.0
if tax_included:
amount_total = subtotal
amount_tax = subtotal * (percent / (1 + percent))
amount_untaxed = amount_total - amount_tax
else:
amount_untaxed = subtotal
amount_tax = subtotal * percent
amount_total = amount_untaxed + amount_tax
self.amount_untaxed = amount_untaxed
self.amount_tax = amount_tax
self.amount_total = amount_total
class AccountInvoiceTax(models.Model):
_inherit = 'account.invoice.tax'
@api.v8
def compute(self, invoice):
tax_grouped = {}
currency = invoice.currency_id.with_context(date=invoice.date_invoice or fields.Date.context_today(invoice))
company_currency = invoice.company_id.currency_id
tax_included = False
subtotal = alltotal = 0.0
percent = 0.0
for line in invoice.invoice_line:
alltotal += line.quantity * line.price_unit
if line.invoice_line_tax_id:
for tax in line.invoice_line_tax_id:
tax_included = tax.price_include == True
percent = tax.amount
subtotal += line.quantity * line.price_unit
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, invoice.partner_id)['taxes']
for tax in taxes:
val = {
'invoice_id': invoice.id,
'name': tax['name'],
'amount': tax['amount'],
'manual': False,
'sequence': tax['sequence'],
'base': currency.round(tax['price_unit'] * line['quantity']),
}
if invoice.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['ref_base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['ref_tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
# If the taxes generate moves on the same financial account as the invoice line
# and no default analytic account is defined at the tax level, propagate the
# analytic account from the invoice line to the tax line. This is necessary
# in situations were (part of) the taxes cannot be reclaimed,
# to ensure the tax move is allocated to the proper analytic account.
if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id:
val['account_analytic_id'] = line.account_analytic_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
amount_total = amount_tax = amount_untaxed = 0.0
if tax_included:
amount_total = currency.round(subtotal)
amount_tax = currency.round(subtotal * (percent / (1 + percent)))
amount_untaxed = currency.round(amount_total - amount_tax)
else:
amount_untaxed = currency.round(subtotal)
amount_tax = currency.round(subtotal * percent)
amount_total = currency.round(amount_untaxed + amount_tax)
for t in tax_grouped.values():
#t['base'] = currency.round(t['base'])
#t['amount'] = currency.round(t['amount'])
#t['base_amount'] = currency.round(t['base_amount'])
#t['tax_amount'] = currency.round(t['tax_amount'])
t['base'] = amount_untaxed
t['amount'] = amount_tax
t['base_amount'] = amount_untaxed
t['tax_amount'] = amount_tax
return tax_grouped
| [
"thitithup@gmail.com"
] | thitithup@gmail.com |
a534743075eff3bc484d4ecb49af2297db59755c | 8ded62b315fc3e325833d91caa885776e46ebead | /geatpy/demo/single_objective_demo5/main.py | 3fea0027833b9470ad3502501703584c2e785879 | [] | no_license | CL-White/geatpy | 46079c501d72763b629b9654e07d38adefa2f848 | 3921306b310c91f0bb7aab22f718ab0ba44d600b | refs/heads/master | 2020-04-13T23:59:03.770713 | 2018-12-21T06:55:07 | 2018-12-21T06:55:07 | 163,521,396 | 1 | 0 | null | 2018-12-29T15:30:55 | 2018-12-29T15:30:55 | null | UTF-8 | Python | false | false | 1,629 | py | # -*- coding: utf-8 -*-
"""
执行脚本main.py
描述:
该demo是展示如何计算带约束的任务指派问题
其中目标函数和约束条件写在aimfuc.py文件中
问题如下:
设有5个人,5个任务。
已知这5个人每小时工作要求的工资分别是1,2,3,4,5元,
而这5个任务分别耗时1,2,3,4,5小时。
此外,已知工人1无法完成第2和第4个任务;工人3无法完成第1和第4个任务。
现要求给每个人分配去完成不同的任务,要求老板一共支付工人的工钱数最少。
因为问题需要用排列编码的染色体来解决,因此本案例调用了“sga_new_permut_templet”这个算法模板,其详细用法可利用help命令查看,或是在github下载并查看源码
调用算法模板时可以设置drawing=2,此时算法模板将在种群进化过程中绘制动画,但注意执行前要在Python控制台执行命令matplotlib qt5。
"""
import geatpy as ga
# 获取函数接口地址
AIM_M = __import__('aimfuc')
# 参数设置
NVAR = 5 # 排列编码的染色体长度
VarLen = 5 # 排列集合的大小,等于5表示排列集合为{1,2,3,4,5}
# 调用编程模板,其中recombinStyle要设置为'xovpm',对于排列编码问题,要采用特殊的xovpm(部分匹配交叉)的重组方式
[pop_trace, var_trace, times] = ga.sga_new_permut_templet(AIM_M, 'aimfuc', None, None, NVAR, VarLen, maxormin = 1, MAXGEN = 100, NIND = 10, SUBPOP = 1, GGAP = 0.9, selectStyle = 'etour', recombinStyle = 'xovpm', recopt = 0.9, pm = 0.1, distribute = True, drawing = 1)
| [
"jazzbin@geatpy.com"
] | jazzbin@geatpy.com |
ac0e03f089553008be7076ba6ad23656ff3032d2 | 00540621f1ce1038a6fd5550b868931d9d02829a | /IPProxyPool/util/compatibility.py | 9e21c93b7aaeba3210dd4c79fda013c91eea4603 | [
"Apache-2.0"
] | permissive | MisterZhouZhou/pythonLearn | 392a3c0f31d0d3a61a43ae27b4a24c6d15316722 | 8933c7a6d444d3d86a173984e6cf4c08dbf84039 | refs/heads/master | 2020-05-19T18:53:08.451360 | 2019-07-18T07:01:51 | 2019-07-18T07:01:51 | 185,164,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # coding:utf-8
import sys
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
else:
text_type = unicode
binary_type = str
def text_(s, encoding='utf-8', errors='strict'):
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return s
def bytes_(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
return s.encode(encoding, errors)
return s | [
"m15038960016@163.com"
] | m15038960016@163.com |
179e8aa96fbd0a11d883b2da40dc69431c00f40e | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/v1_0/devicescorpmgt_v1_0/azext_devicescorpmgt_v1_0/vendored_sdks/devicescorpmgt/aio/operations_async/_device_app_management_managed_app_registration_applied_policy_operations_async.py | 8fb43691c6645917a280dbc32ead62781233dd0b | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,249 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DeviceAppManagementManagedAppRegistrationAppliedPolicyOperations:
"""DeviceAppManagementManagedAppRegistrationAppliedPolicyOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~devices_corporate_management.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def target_app(
self,
managed_app_registration_id: str,
managed_app_policy_id: str,
apps: Optional[List["models.MicrosoftGraphManagedMobileApp"]] = None,
**kwargs
) -> None:
"""Invoke action targetApps.
Invoke action targetApps.
:param managed_app_registration_id: key: id of managedAppRegistration.
:type managed_app_registration_id: str
:param managed_app_policy_id: key: id of managedAppPolicy.
:type managed_app_policy_id: str
:param apps:
:type apps: list[~devices_corporate_management.models.MicrosoftGraphManagedMobileApp]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.PathsVf2Dh9DeviceappmanagementManagedappregistrationsManagedappregistrationIdAppliedpoliciesManagedapppolicyIdMicrosoftGraphTargetappsPostRequestbodyContentApplicationJsonSchema(apps=apps)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.target_app.metadata['url'] # type: ignore
path_format_arguments = {
'managedAppRegistration-id': self._serialize.url("managed_app_registration_id", managed_app_registration_id, 'str'),
'managedAppPolicy-id': self._serialize.url("managed_app_policy_id", managed_app_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'PathsVf2Dh9DeviceappmanagementManagedappregistrationsManagedappregistrationIdAppliedpoliciesManagedapppolicyIdMicrosoftGraphTargetappsPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
target_app.metadata = {'url': '/deviceAppManagement/managedAppRegistrations/{managedAppRegistration-id}/appliedPolicies/{managedAppPolicy-id}/microsoft.graph.targetApps'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
b2545483337881d35bc202af30352ec1f39361c6 | a44b918826fcbeccb6b70a295d98ca0a31007c13 | /Koudai/Server/release/Script/PyScript/Action/action12057.py | 568de94028d8a9ba6a5488d3dd22f347debf6ba1 | [] | no_license | Pattentively/Scut-samples | a5eff14506f137e409aa7f6139825fbff09e40a2 | 824037857450ca6e580d2bf6710679331132e6fe | refs/heads/master | 2021-01-22T01:05:22.578877 | 2015-04-27T01:53:53 | 2015-04-27T01:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | import clr, sys
import random
import time
import datetime
import ReferenceLib
from lang import Lang
from action import *
from System import *
from System.Collections.Generic import *
from ZyGames.Framework.Common.Log import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.BLL import *
from ZyGames.Tianjiexing.BLL.Base import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Tianjiexing.Model.Config import *
from ZyGames.Tianjiexing.BLL.Combat import *
from ZyGames.Tianjiexing.Model.Enum import *
# 12057_地图列表接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self);
self.plotID = 0;
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self);
self.mapList = [];
self.userPlotPackage = None;
def getUrlElement(httpGet, parent):
urlParam = UrlParam();
return urlParam;
def takeAction(urlParam, parent):
actionResult = ActionResult();
userId = parent.Current.User.PersonalId;
contextUser = parent.Current.User;
# 加载数据出错
def loadError():
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("LoadError");
actionResult.Result = False;
return actionResult;
# 判断玩家等级是否达到 20 级
if contextUser.UserLv < 20:
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("St12057_UserLvNotEnough");
actionResult.Result = False;
return actionResult;
userPlotPackage = GameDataCacheSet[UserPlotPackage]().FindKey(userId);
# 下发地图列表
plotList = ConfigCacheSet[PlotInfo]().FindAll(match=lambda x:x.PlotType == PlotType.KaoGuPlot); # 副本地图信息
if not plotList or not userPlotPackage:
return loadError();
# 当玩家等级达到 20 级时,初始化地图数据
plotMapList = userPlotPackage.PlotPackage.FindAll(match=lambda x:x.PlotType == PlotType.KaoGuPlot);
if not plotMapList and contextUser.UserLv >= 20:
UserArchaeologyHelper.InitializeMapInfo(userId);
userPlotPackage = GameDataCacheSet[UserPlotPackage]().FindKey(userId);
actionResult.mapList = plotList;
actionResult.userPlotPackage = userPlotPackage.PlotPackage;
return actionResult;
def buildPacket(writer, urlParam, actionResult):
mapList = actionResult.mapList
userPlotPackage = actionResult.userPlotPackage;
# 地图列表
writer.PushIntoStack(len(mapList));
for info in mapList:
dsItem = DataStruct();
dsItem.PushIntoStack(info.PlotID);
dsItem.PushIntoStack(info.PlotName);
dsItem.PushIntoStack(info.BossHeadID);
dsItem.PushIntoStack(info.KgScene);
mapInfo = userPlotPackage.Find(match=lambda x:x.PlotID == info.PlotID);
dsItem.PushShortIntoStack(1 if mapInfo else 0);
writer.PushIntoStack(dsItem);
return True; | [
"wzf_88@qq.com"
] | wzf_88@qq.com |
d9d2239e1b1af794739b1c6def6e3cfe648785d9 | ddda55fcfc84ac5cd78cfc5c336a3df0b9096157 | /projects/gd32f303-demo/board/SConscript | 060557bb0a4e56f4bc0b79aa63de2f2cbaa77954 | [
"Apache-2.0"
] | permissive | liu-delong/lu_xing_xiang_one_os | 701b74fceb82dbb2806518bfb07eb85415fab43a | 0c659cb811792f2e190d5a004a531bab4a9427ad | refs/heads/master | 2023-06-17T03:02:13.426431 | 2021-06-28T08:12:41 | 2021-06-28T08:12:41 | 379,661,507 | 2 | 2 | Apache-2.0 | 2021-06-28T10:08:10 | 2021-06-23T16:11:54 | C | UTF-8 | Python | false | false | 1,037 | import os
import osconfig
from build_tools import *
sys.path.append(Env['OS_ROOT'] + '/drivers/hal/gd/scripts/')
pwd = PresentDir()
LIBS = []
LIBPATH = []
# add general drivers
src = Split('''
board.c
CubeMX_Config/Src/gd32f30x_it.c
CubeMX_Config/Src/system_gd32f30x.c
''')
# path include path in project
path = [pwd]
path += [pwd + '/ports']
path += [pwd + '/../../../drivers/hal/gd/GD32F30x_HAL/GD32F30x_standard_peripheral/Include']
path += [pwd + '/../../../drivers/hal/gd/GD32F30x_HAL/CMSIS/GD/GD32F30x/Include']
path += [pwd + '/CubeMX_Config/Inc']
path += [pwd + '/../../../drivers/hal/gd/GD32F30x_HAL/CMSIS']
if osconfig.CROSS_TOOL == 'gcc':
src += [pwd + '/startup/startup_gd32f30x_hd_gcc.s']
elif osconfig.CROSS_TOOL == 'keil':
src += [pwd + '/startup/startup_gd32f30x_hd_arm.s']
elif osconfig.CROSS_TOOL == 'iar':
src += [pwd + '/startup/startup_gd32f30x_hd_iar.s']
CPPDEFINES = ['GD32F30X_HD']
group = AddCodeGroup('bsp', src, depend = [''], CPPPATH = path, CPPDEFINES = CPPDEFINES)
Return('group')
| [
"cmcc_oneos@cmiot.chinamobile.com"
] | cmcc_oneos@cmiot.chinamobile.com | |
d9ae372f842f837ad2873725740adc882eeccff2 | 684a9016bf00e132eab3c9cf4534639ae096cfc5 | /Main/dlnaupnpserver/ServerClass.py | ab0249376c9d385dcead51ad84cea188e86f85d2 | [
"BSD-3-Clause",
"MIT"
] | permissive | pszafer/dlna_upnp_invention | d97a0c641d1d7b170378f0fad5b978d8e5576966 | 497d173a9e3883412dbbb17cafa826a0394ff849 | refs/heads/master | 2021-01-02T09:35:04.628203 | 2012-07-10T15:02:53 | 2012-07-10T15:02:53 | 1,980,638 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,922 | py | '''
Created on 05-07-2011
@copyright: 2011,
@author: Pawel Szafer
@license: Licensed under the BSD license
http://www.opensource.org/licenses/bsd-license.php
@contact: pszafer@gmail.com
@version: 0.8
@note:this is only test module
'''
from twisted.internet import reactor
from modCoherence.base import Coherence
import gnome.ui
import gnomevfs
import gettext
import os
import struct
import sys
class ServerClass(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
print "initek"
def check_device(self, device):
print "check device"
print "found device %s of type %s - %r" %(device.get_friendly_name(),
device.get_device_type(),
device.client)
def start(self):
print "I'm started"
config = {'logmode':'warning'}
c = Coherence(config)
print "to connect"
c.connect(self.check_device, 'Coherence.UPnP.Device.detection_completed')
print "start"
#myClass = ServerClass()
#reactor.callWhenRunning(ServerClass().start)
#reactor.run()
#header = {}
#header['user-agent'] = 'Microsoft-Windows/6.1 UPnP/1.0 Windows-Media-Player/12.0.7601.17514 DLNADOC/1.50 (MS-DeviceCaps/1024)'
#test = header['user-agent'].find('blee')
#print test
#filename = "file:///home/xps/Wideo/test/test2/Friends_S06_E20.avi"
#filename = "/home/xps/.thumbnails/normal/f1d2e7cf33db9de55a6fe49b91a63b1b.png"
#hash_from_path = str(id(filename))
#print hash_from_path
import subprocess
def getFileMetadata(filename):
result = subprocess.Popen(["ffprobe", filename],
stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
duration_line = [x for x in result.stdout.readlines() if "Duration" in x]
duration = duration_line[(duration_line.index('Duration: ',) + len('Duration: ')):duration_line.index(', start')]
return duration
def create_thumbnail_via_gnome(uri):
mimetype = gnomevfs.get_mime_type(uri)
thumbFactory = gnome.ui.ThumbnailFactory(gnome.ui.THUMBNAIL_SIZE_NORMAL)
if thumbFactory.can_thumbnail(uri, mimetype,0):
thumbnail = thumbFactory.generate_thumbnail(uri, mimetype)
print "here"
if thumbnail != None:
thumbFactory.save_thumbnail(thumbnail, uri, 0)
print "passed"
#uri = "file:///home/xps/Wideo/test/test2/Friends_S06_E20.avi"
#create_thumbnail_via_gnome(uri)
#Duration: 00:21:55.64, start: 0.000000, bitrate: 1485 kb/s
# print "test"
#import Image
#im = Image.open("/home/xps/Obrazy/toyota_public/toyota_1.jpg")
#print im.size
#int = "aaa"
#b= None
#for i in im.size:
# b += str(i)+"x"
#b = b[:len(b)-1]
#print b
#a = [66.25, 333, 335, 1, 1234.5]
#
#
#print a
#print a[:2]
#
#for i in range(0, 2):
# print a[i]
#itemmimetype = "x-mkv"
#itemmimetype = "avi"
#print itemmimetype.replace("x-", "")
#
#zara = {}
#
#zara['test'] = "aaa"
#zara['test2'] = "bbb"
#
#for i in zara:
# print i[0]
# print i[1]
from StringIO import StringIO
APP="dlnaupnpserver"
DIR=os.path.dirname (__file__) + '/locale'
#locale.setlocale(locale.LC_ALL, '')
#gettext.bindtextdomain(APP, DIR)
#gettext.textdomain(APP)
#_ = gettext.gettext
#gettext.install(APP, './locale', unicode=1)
#translations = gettext.translation(APP, "./locale", languages=['pl'])
#translations.install()
#print _("Image")
y = "hehehehe"
#b =
PROJECT_DIR = os.path.normpath(os.path.dirname(__file__))
new_dir, _ = os.path.split(PROJECT_DIR)
print sys.path
sys.path.insert(0, new_dir)
print sys.path
#liczba = round(286/72,4)
#
#liczba = (286 + 72 // 2) // 72
#print liczba
#print round(liczba,0)
#dur = str(getFileMetadata("/home/xps/Wideo/test/test2/Friends_S06_E20.avi"))
#ind = dur.index(', start')
#print ind
#print max(dur)
#dur1 = dur[(dur.index('Duration: ',) + len('Duration: ')):dur.index(', start')]
#print dur1
#print "stop" | [
"pszafer@gmail.com"
] | pszafer@gmail.com |
15ba5830a4ff5c10b5f9b04726ad9ef517dec34e | 0e647273cffc1fb6cbd589fa3c7c277b221ba247 | /configs/hpt-pretrain/chexpert_rm_color/base-chexpert_rm_color-config.py | bf0ba57feabab69c163a32da34d296ce91df454b | [
"Apache-2.0"
] | permissive | Berkeley-Data/OpenSelfSup | e9976bf011b69ebf918506ba184f464b1073ec13 | 221191b88d891de57725b149caf237ffef72e529 | refs/heads/master | 2023-05-12T07:34:52.268476 | 2021-04-08T00:58:37 | 2021-04-08T00:58:37 | 343,654,823 | 0 | 1 | Apache-2.0 | 2021-04-08T00:58:37 | 2021-03-02T05:20:27 | Python | UTF-8 | Python | false | false | 2,019 | py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCO',
pretrained=None,
queue_len=65536,
feat_dim=128,
momentum=0.999,
backbone=dict(
type='ResNet',
depth=50,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
neck=dict(
type='NonLinearNeckV1',
in_channels=2048,
hid_channels=2048,
out_channels=128,
with_avg_pool=True),
head=dict(type='ContrastiveHead', temperature=0.2))
# dataset settings
data_source_cfg = dict(
type='ImageNet',
memcached=False,
mclient_path='/not/used')
data_train_list = "data/chexpert/meta/train-val.txt"
data_train_root = "data/chexpert"
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.5028, 0.5028, 0.5028], std=[0.2919, 0.2919, 0.2919])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
imgs_per_gpu=128, # total 64*4=256
workers_per_gpu=4,
drop_last=True,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.03, weight_decay=0.0001, momentum=0.9)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
# cjrd added this flag, since OSS didn't support training by iters(?)
by_iter=True
log_config = dict(
interval=25,
by_epoch=False,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook', by_epoch=False)
])
| [
"taeil.goh@gmail.com"
] | taeil.goh@gmail.com |
ce2901dd98d7ea783a9d14e70295121f5c69db4c | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dli/huaweicloudsdkdli/v1/model/export_sql_job_result_request.py | ce6445aee0752af2e044cada575e4d159696ae5d | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,877 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ExportSqlJobResultRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str',
'body': 'ExportSqlResultRequestBody'
}
attribute_map = {
'job_id': 'job_id',
'body': 'body'
}
def __init__(self, job_id=None, body=None):
"""ExportSqlJobResultRequest
The model defined in huaweicloud sdk
:param job_id: 作业ID
:type job_id: str
:param body: Body of the ExportSqlJobResultRequest
:type body: :class:`huaweicloudsdkdli.v1.ExportSqlResultRequestBody`
"""
self._job_id = None
self._body = None
self.discriminator = None
self.job_id = job_id
if body is not None:
self.body = body
@property
def job_id(self):
"""Gets the job_id of this ExportSqlJobResultRequest.
作业ID
:return: The job_id of this ExportSqlJobResultRequest.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this ExportSqlJobResultRequest.
作业ID
:param job_id: The job_id of this ExportSqlJobResultRequest.
:type job_id: str
"""
self._job_id = job_id
@property
def body(self):
"""Gets the body of this ExportSqlJobResultRequest.
:return: The body of this ExportSqlJobResultRequest.
:rtype: :class:`huaweicloudsdkdli.v1.ExportSqlResultRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ExportSqlJobResultRequest.
:param body: The body of this ExportSqlJobResultRequest.
:type body: :class:`huaweicloudsdkdli.v1.ExportSqlResultRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExportSqlJobResultRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
19788b4aba6724c127c6db8862d91a21a8586c99 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2021_08_01/models/__init__.py | e592b28040ea7a510acc836e19f07934db25ff4d | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,077 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import ManagedServiceIdentity
from ._models_py3 import Resource
from ._models_py3 import SystemData
from ._models_py3 import TrackedResource
from ._models_py3 import UserAssignedIdentity
from ._models_py3 import Workbook
from ._models_py3 import WorkbookError
from ._models_py3 import WorkbookErrorDefinition
from ._models_py3 import WorkbookInnerErrorTrace
from ._models_py3 import WorkbookResource
from ._models_py3 import WorkbookResourceIdentity
from ._models_py3 import WorkbookUpdateParameters
from ._models_py3 import WorkbooksListResult
from ._application_insights_management_client_enums import CategoryType
from ._application_insights_management_client_enums import CreatedByType
from ._application_insights_management_client_enums import Kind
from ._application_insights_management_client_enums import ManagedServiceIdentityType
from ._application_insights_management_client_enums import SharedTypeKind
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ManagedServiceIdentity",
"Resource",
"SystemData",
"TrackedResource",
"UserAssignedIdentity",
"Workbook",
"WorkbookError",
"WorkbookErrorDefinition",
"WorkbookInnerErrorTrace",
"WorkbookResource",
"WorkbookResourceIdentity",
"WorkbookUpdateParameters",
"WorkbooksListResult",
"CategoryType",
"CreatedByType",
"Kind",
"ManagedServiceIdentityType",
"SharedTypeKind",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
96855b7975e0f1672164b652aaf3d0254e458dfe | 271c7959a39f3d7ff63dddf285004fd5badee4d9 | /venv/Lib/site-packages/alembic/testing/mock.py | 08a756cbc27e1fab3cda7021d4cbb7b54f3f0187 | [
"MIT"
] | permissive | natemellendorf/configpy | b6b01ea4db1f2b9109fd4ddb860e9977316ed964 | 750da5eaef33cede9f3ef532453d63e507f34a2c | refs/heads/master | 2022-12-11T05:22:54.289720 | 2019-07-22T05:26:09 | 2019-07-22T05:26:09 | 176,197,442 | 4 | 1 | MIT | 2022-12-08T02:48:51 | 2019-03-18T03:24:12 | Python | UTF-8 | Python | false | false | 791 | py | # testing/mock.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Import stub for mock library.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0
"""
from __future__ import absolute_import
from ..util.compat import py3k
if py3k:
from unittest.mock import MagicMock, Mock, call, patch, ANY
else:
try:
from mock import MagicMock, Mock, call, patch, ANY # noqa
except ImportError:
raise ImportError(
"SQLAlchemy's test suite requires the "
"'mock' library as of 0.8.2.")
| [
"nate.mellendorf@gmail.com"
] | nate.mellendorf@gmail.com |
ea7556a795f4b376364386c416a52e7d6026666c | ace30d0a4b1452171123c46eb0f917e106a70225 | /filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/nova/conf/notifications.py | 0fbb4593cda237954c01736736ed53ac75382305 | [
"Python-2.0"
] | permissive | juancarlosdiaztorres/Ansible-OpenStack | e98aa8c1c59b0c0040c05df292964520dd796f71 | c01951b33e278de9e769c2d0609c0be61d2cb26b | refs/heads/master | 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 | Python | UTF-8 | Python | false | false | 3,947 | py | # Copyright (c) 2016 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
notifications_group = cfg.OptGroup(
name='notifications',
title='Notifications options',
help="""
Most of the actions in Nova which manipulate the system state generate
notifications which are posted to the messaging component (e.g. RabbitMQ) and
can be consumed by any service outside the Openstack. More technical details
at http://docs.openstack.org/developer/nova/notifications.html
""")
ALL_OPTS = [
cfg.StrOpt(
'notify_on_state_change',
choices=(None, 'vm_state', 'vm_and_task_state'),
deprecated_group='DEFAULT',
help="""
If set, send compute.instance.update notifications on
instance state changes.
Please refer to
https://docs.openstack.org/nova/latest/reference/notifications.html for
additional information on notifications.
Possible values:
* None - no notifications
* "vm_state" - notifications are sent with VM state transition information in
the ``old_state`` and ``state`` fields. The ``old_task_state`` and
``new_task_state`` fields will be set to the current task_state of the
instance.
* "vm_and_task_state" - notifications are sent with VM and task state
transition information.
"""),
cfg.BoolOpt(
'notify_on_api_faults',
default=False,
deprecated_group='DEFAULT',
deprecated_name='notify_api_faults',
help="""
If enabled, send api.fault notifications on caught exceptions in the
API service.
"""),
cfg.StrOpt(
'default_level',
default='INFO',
choices=('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
deprecated_group='DEFAULT',
deprecated_name='default_notification_level',
help="Default notification level for outgoing notifications."),
cfg.StrOpt(
'default_publisher_id',
default='$my_ip',
deprecated_group='DEFAULT',
help="""
Default publisher_id for outgoing notifications. If you consider routing
notifications using different publisher, change this value accordingly.
Possible values:
* Defaults to the IPv4 address of this host, but it can be any valid
oslo.messaging publisher_id
Related options:
* my_ip - IP address of this host
"""),
cfg.StrOpt(
'notification_format',
choices=['unversioned', 'versioned', 'both'],
default='both',
deprecated_group='DEFAULT',
help="""
Specifies which notification format shall be used by nova.
The default value is fine for most deployments and rarely needs to be changed.
This value can be set to 'versioned' once the infrastructure moves closer to
consuming the newer format of notifications. After this occurs, this option
will be removed (possibly in the "P" release).
Possible values:
* unversioned: Only the legacy unversioned notifications are emitted.
* versioned: Only the new versioned notifications are emitted.
* both: Both the legacy unversioned and the new versioned notifications are
emitted. (Default)
The list of versioned notifications is visible in
http://docs.openstack.org/developer/nova/notifications.html
"""),
]
def register_opts(conf):
conf.register_group(notifications_group)
conf.register_opts(ALL_OPTS, group=notifications_group)
def list_opts():
return {notifications_group: ALL_OPTS}
| [
"jcdiaztorres96@gmail.com"
] | jcdiaztorres96@gmail.com |
a446f60b5e3cc2db0a5399fdccafb587ed11a532 | c8c855a6ebb3b3101e5c3a80b94514c36b103495 | /semana_2/serie_tv.py | b9a2da939fc0ae024cc6661b2e2a9305923e2f70 | [] | no_license | K-A-R-L-A-Robles/poo-1719110219 | 835965c0e3100c9d6770678eb67920945942fa80 | 7d1fc57cd4157e5b52a153210311821d8290144d | refs/heads/master | 2022-11-03T04:54:42.675869 | 2020-06-15T03:46:42 | 2020-06-15T03:46:42 | 265,970,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | class serie_tv:
"Atríbuto"
genero= "drama"
actores= "30"
horario= "7-8pm"
canal= "308"
difinicion = "hd"
edad= "mayores_13"
capitulos = "10"
idioma= "inglés-español"
duracion= "1hora"
trama="tristeza"
"Métodos"
def entretener(self):
print("entretener")
def emociones(self):
print("emociones")
def aprendizaje(self):
print("aprendizaje")
def dinero(self):
print("dinero")
def audencia(self):
print("audencia")
def _init_(self):
print("atributos serie_tv")
print("genero="+str(self.genero))
print("actores"+str(self.actores))
print("horario="+str (self.horario))
print("canal="+str(self.canal))
print("definicion"+str(self.difinicion))
print("edad="+str(self.edad))
print("capítulos="+str(self.capitulos))
print("idioma="+str(self.idioma))
print("duracion="+str(self.duracion))
print("trama="+str(self.trama))
objeto = serie_tv()
objeto.entretener()
objeto.emociones()
objeto.aprendizaje()
objeto.dinero()
objeto.audencia()
objeto._init_() | [
"replituser@example.com"
] | replituser@example.com |
fa88201c4c9b2211e6fc5b0e29819c8d8cb30a1e | 4ff94bdde94640d65c5a429be78cff5c8169689f | /spacescoops/compile.py | 371b1473878434d170213dfa66c1a19ebe30e789 | [] | no_license | unawe/spacescoop | db91058af55fcc51bb6535e89bb3b5f29fb75493 | 35caab11c556c124d04ea8fcb3ad012af7e5e39f | refs/heads/main | 2021-07-24T18:39:09.931385 | 2021-06-14T16:09:38 | 2021-06-14T16:09:38 | 315,893,040 | 2 | 1 | null | 2021-06-14T16:01:23 | 2020-11-25T09:40:44 | JavaScript | UTF-8 | Python | false | false | 717 | py | import os
from django.conf import settings
from django_ext.compiler import PdfCompiler
from .models import Article
OUT_PATH = os.path.join(settings.MEDIA_ROOT, 'articles', 'download')
OUT_URL = os.path.join(settings.MEDIA_URL, 'articles', 'download')
PRINT_PREVIEW_URLPATH = 'scoops:print-preview'
def pdf_filename(obj):
return 'spacescoop-%s%s-%s.pdf' % (obj.code, obj.language_code, obj.slug)
compiler = PdfCompiler(Article, OUT_PATH, OUT_URL, pdf_filename, PRINT_PREVIEW_URLPATH)
def make_pdf(code, lang, site_url=None):
if not site_url:
site_url = settings.SITE_URL
compiler.make_pdf(code, lang, site_url)
def get_pdf(code, lang):
return compiler.get_pdf(code, lang, 'scoops')
| [
"edward@gomez.me.uk"
] | edward@gomez.me.uk |
a29a1f36d0199651c392bdc7122776de7e49a978 | d1f1f05e4713c4011634f0e4247798f31d58c3a6 | /scanning/base_raster_slow_scan.py | 3fc9545e482a9605f3f36e04c9fb64000623039b | [
"BSD-3-Clause"
] | permissive | patrickurban/ScopeFoundry | 8463695277bfb9c0b267b3b6f02fe5a1d20b4fd9 | 4c5a9430fad0b63d39014bed81a6ffc0af07c4df | refs/heads/master | 2021-01-01T16:48:24.432671 | 2017-06-08T23:26:26 | 2017-06-08T23:26:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,841 | py | from .base_raster_scan import BaseRaster2DScan
from ScopeFoundry import h5_io
import numpy as np
import time
import os
class BaseRaster2DSlowScan(BaseRaster2DScan):
name = "base_raster_2Dslowscan"
def run(self):
S = self.settings
#Hardware
# self.apd_counter_hc = self.app.hardware_components['apd_counter']
# self.apd_count_rate = self.apd_counter_hc.apd_count_rate
# self.stage = self.app.hardware_components['dummy_xy_stage']
# Data File
# H5
# Compute data arrays
self.compute_scan_arrays()
self.initial_scan_setup_plotting = True
self.display_image_map = np.zeros(self.scan_shape, dtype=float)
while not self.interrupt_measurement_called:
try:
# h5 data file setup
self.t0 = time.time()
if self.settings['save_h5']:
self.h5_file = h5_io.h5_base_file(self.app, measurement=self)
self.h5_file.attrs['time_id'] = self.t0
H = self.h5_meas_group = h5_io.h5_create_measurement_group(self, self.h5_file)
#create h5 data arrays
H['h_array'] = self.h_array
H['v_array'] = self.v_array
H['range_extent'] = self.range_extent
H['corners'] = self.corners
H['imshow_extent'] = self.imshow_extent
H['scan_h_positions'] = self.scan_h_positions
H['scan_v_positions'] = self.scan_v_positions
H['scan_slow_move'] = self.scan_slow_move
H['scan_index_array'] = self.scan_index_array
# start scan
self.pixel_i = 0
self.current_scan_index = self.scan_index_array[0]
self.pixel_time = np.zeros(self.scan_shape, dtype=float)
if self.settings['save_h5']:
self.pixel_time_h5 = H.create_dataset(name='pixel_time', shape=self.scan_shape, dtype=float)
self.pre_scan_setup()
self.move_position_start(self.scan_h_positions[0], self.scan_v_positions[0])
for self.pixel_i in range(self.Npixels):
if self.interrupt_measurement_called: break
i = self.pixel_i
self.current_scan_index = self.scan_index_array[i]
kk, jj, ii = self.current_scan_index
h,v = self.scan_h_positions[i], self.scan_v_positions[i]
if self.pixel_i == 0:
dh = 0
dv = 0
else:
dh = self.scan_h_positions[i] - self.scan_h_positions[i-1]
dv = self.scan_v_positions[i] - self.scan_v_positions[i-1]
if self.scan_slow_move[i]:
self.move_position_slow(h,v, dh, dv)
if self.settings['save_h5']:
self.h5_file.flush() # flush data to file every slow move
#self.app.qtapp.ProcessEvents()
time.sleep(0.01)
else:
self.move_position_fast(h,v, dh, dv)
self.pos = (h,v)
# each pixel:
# acquire signal and save to data array
pixel_t0 = time.time()
self.pixel_time[kk, jj, ii] = pixel_t0
if self.settings['save_h5']:
self.pixel_time_h5[kk, jj, ii] = pixel_t0
self.collect_pixel(self.pixel_i, kk, jj, ii)
S['progress'] = 100.0*self.pixel_i / (self.Npixels)
finally:
self.post_scan_cleanup()
if hasattr(self, 'h5_file'):
print('h5_file', self.h5_file)
try:
self.h5_file.close()
except ValueError as err:
self.log.warning('failed to close h5_file: {}'.format(err))
if not self.settings['continuous_scan']:
break
print(self.name, 'done')
def move_position_start(self, h,v):
self.stage.settings.x_position.update_value(h)
self.stage.settings.y_position.update_value(v)
def move_position_slow(self, h,v, dh, dv):
self.stage.settings.x_position.update_value(h)
self.stage.settings.y_position.update_value(v)
def move_position_fast(self, h,v, dh, dv):
self.stage.settings.x_position.update_value(h)
self.stage.settings.y_position.update_value(v)
#x = self.stage.settings['x_position']
#y = self.stage.settings['y_position']
#x = self.stage.settings.x_position.read_from_hardware()
#y = self.stage.settings.y_position.read_from_hardware()
#print(x,y)
def pre_scan_setup(self):
print(self.name, "pre_scan_setup not implemented")
# hardware
# create data arrays
# update figure
def collect_pixel(self, pixel_num, k, j, i):
# collect data
# store in arrays
print(self.name, "collect_pixel", pixel_num, k,j,i, "not implemented")
def post_scan_cleanup(self):
print(self.name, "post_scan_setup not implemented")
def new_pt_pos(self, x,y):
self.move_position_start(x, y)
| [
"esbarnard@lbl.gov"
] | esbarnard@lbl.gov |
4666470fddd067baae63e9674af988c217531a52 | ff7d3116024c9df01b94191ddfa334e4a6782ae6 | /arbeid/wsgi.py | 7e7bcbe2174f9dbca3af07aa337ee738a4dfd3fc | [
"MIT"
] | permissive | jhnnsrs/arbeider | f5f708ee1026a9e9573a6f8a87c3b9e2fd6b5e33 | 4c5637913331c998a262ae0deca516b236845200 | refs/heads/master | 2021-05-26T10:31:16.279628 | 2020-04-08T13:40:26 | 2020-04-08T13:40:26 | 254,095,863 | 0 | 0 | MIT | 2020-04-08T13:40:28 | 2020-04-08T13:29:31 | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for arbeid project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'arbeid.settings')
application = get_wsgi_application()
| [
"jhnnsrs@gmail..com"
] | jhnnsrs@gmail..com |
cea6a9d224a588d0e6741186eb9b225d866a0cf1 | 06292f96cba132ca57777672a447cfff7c5abee6 | /week5/tut/submit/2.py | 965de3af9b37ce5f09db1489af166faccca72298 | [] | no_license | kietteik/ppl | 1746440b12affe71e67d6f958922b32b1fdaab5c | 2ee60582e81595b8d8b5d0f8212d20151cfe9264 | refs/heads/master | 2023-03-01T00:24:36.969189 | 2021-01-31T05:15:13 | 2021-01-31T05:15:13 | 305,802,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from functools import reduce
def flatten(lst):
'''2. a'''
return [item for sub_lst in lst for item in sub_lst]
def flatten(lst):
'''2. b'''
if not lst:
return []
return lst[0] + flatten(lst[1:])
def flatten(lst):
'''2. c'''
return list(reduce(lambda x, y: x + y, lst, []))
| [
"kietteikdoi@gmail.com"
] | kietteikdoi@gmail.com |
c5e0d6a51c84cd160f45fb4691d64910fd51cf86 | 2579f37a13cfbb47944c5b81c6e83ca710b29f88 | /Server/core/Server_commication_User.py | df8157c2a18a3c9ad89863727fd336831b399315 | [] | no_license | YangQian1992/FTP | 932f32d5ed934bae295bd674757f7af23d0ad1ba | 87d3a78522e7eb8058def1d74d7f32f0f61f1b86 | refs/heads/master | 2020-03-31T16:28:35.146329 | 2018-10-10T06:53:12 | 2018-10-10T06:53:12 | 152,376,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,942 | py | import sys
import os
import hashlib
from Server.config.Server_config_info import server_config_info
#建立基础常量
IP_PORT = (server_config_info['SERVER_IP'],server_config_info['SERVER_PORT'])
class user_info:
def __init__(self):
#sys.path[0]='G:\\PycharmProjects\\项目\\ftp_proj_base\\Server\\core'
server_path_li = sys.path[0].split(os.sep)[:-1]
#server_path_li =['G:','PycharmProjects','项目','ftp_proj_base','Server']
server_path_li.extend(['usr&pwd','username&password'])
# server_path_li =['G:','PycharmProjects','项目','ftp_proj_base','Server',
# 'usr&pwd','username&password']
self.server_path = (os.sep).join(server_path_li)
#server_path='G:\\PycharmProjects\\项目\\ftp_proj_base\\Server\\usr&pwd\\username&password'
self.ftp_root = server_config_info['FTP_ROOT'] #家目录
self.auth_key = server_config_info['AUTH_KEY']
self.coding = server_config_info['CODING']
def get_pwd(self,pwd):
md5_obj = hashlib.md5(pwd.encode(self.coding))
md5_obj.update(self.auth_key.encode(self.coding))
return md5_obj.hexdigest()
#将用户密码本中的信息首先加载到一个字典中
#{'杨倩':{'password':password(md5),'times':0,'root_path',root_path},
# '张三':{'password':password(md5)........}}
def load_user_info(self):
self.user_info_dic = {}
with open(self.server_path,encoding=self.coding,mode='r') as f:
for info in f:
username, password = info.split()
root_path = '%s%s%s' % (self.ftp_root,os.sep,username)
self.user_info_dic[username] ={'password':password,
'times': 0,
'root_path':root_path}
if not os.path.exists(root_path):
os.mkdir(root_path)
#服务器判定客户端登录是否成功的方法
def login(self,usr,pwd):
pwd = self.get_pwd(pwd)
#如果用户名不在文本中
if usr not in self.user_info_dic.keys():
return [False,'登录失败','用户名不存在,请注册!']
if (self.user_info_dic[usr] !='') and (self.user_info_dic[usr]['times'] < 3) \
and (self.user_info_dic[usr]['password'] == pwd):
self.user_info_dic[usr]['times'] +=0
return [True,'登录成功!',self.user_info_dic[usr]['root_path']]
elif self.user_info_dic[usr] != '' and self.user_info_dic[usr]['times'] < 3\
and self.user_info_dic[usr]['password'] != pwd:
self.user_info_dic[usr]['times'] += 1
return [False,'登录失败,密码错误,还剩%d次机会!' % (3-self.user_info_dic[usr]['times'])]
elif self.user_info_dic[usr] != '' and self.user_info_dic[usr]['times'] == 3\
and self.user_info_dic[usr]['password'] != pwd:
return [False,'登录失败,账户被锁,隔一段时间再登录吧!']
else:
return [False,'登录失败,当前用户不存在,请先注册!']
#服务端判定是否注册成功的方法
def register(self,usr,pwd):
if usr in self.user_info_dic.keys():
return [False, '你注册的用户名以存在,请更换']
else:
pwd = self.get_pwd(pwd)
self.user_info_dic[usr] = {'password':pwd,'times':0}
#将注册成功后的信息写入密码本
with open(self.server_path, encoding=self.coding,mode='a') as f:
f.write('\n%s %s' % (usr,pwd))
root_path = '%s%s%s' % (self.ftp_root,os.sep,usr)
os.mkdir(root_path) #根据注册成功后的用户的信息,建立专属家目录
self.user_info_dic[usr]['root_path'] = root_path #更新家目录
return [True,'注册成功!', root_path]
| [
"1289089651@qq.com"
] | 1289089651@qq.com |
863e8c47634a573e339090752dad971e80cb3be0 | 795b68819d51af14dfabb8dbe40c9e8153029188 | /make_spiral.py | e65ca25352a9beeefce09d476853f079e5c97963 | [] | no_license | MotazBellah/Code-Challenge | 507f1fd3d5b3265e54905979c80d609afd81c54d | c38c95239193e26c1a88f6736d2ab9ee37185964 | refs/heads/master | 2022-02-25T02:54:10.216892 | 2022-02-19T19:28:05 | 2022-02-19T19:28:05 | 193,115,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | # https://www.codewars.com/kata/534e01fbbb17187c7e0000c6/train/python
# Your task, is to create a NxN spiral with a given size.
# For example, spiral with size 5 should look like this:
# 00000
# ....0
# 000.0
# 0...0
# 00000
# Return value should contain array of arrays, of 0 and 1, for example for given size 5 result should be:
# [[1,1,1,1,1],[0,0,0,0,1],[1,1,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]
# Because of the edge-cases for tiny spirals, the size will be at least 5.
# General rule-of-a-thumb is, that the snake made with '1' cannot touch to itself.
def spiralize(size):
if size <= 0:
return []
core = [ [[1,1,1,1], [0,0,0,1], [1,0,0,1], [1,1,1,1]], [[1]], [[1,1],[0,1]], [[1,1,1],[0,0,1],[1,1,1]] ][size%4]
while len(core) < size:
for x in [0,1]:
core.insert(0, [ x for i in core[0] ] )
core.append([ x for i in core[0] ])
for line in core:
line.insert(0, x)
line.append(x)
core[1][0] = int(not x)
return core
| [
"engineer.android@yahoo.com"
] | engineer.android@yahoo.com |
6036c62127661ec04d262385fa810421752a0bde | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02647/s587144244.py | 32c73c2e731aa2116ac14b30639794e570dcf3d6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | def action(A, N):
B = [0] * N
for i, bright in enumerate(A):
s = max(0, i - bright)
B[s] += 1
e = min(N, i + bright)
if e < N - 1:
B[e + 1] -= 1
for i in range(N - 1):
B[i + 1] += B[i]
return B
def main():
N, K = map(int, input().split())
A = list(map(int, input().split()))
for i in range(K):
A = action(A, N)
if (A[0] == N) & all(A):
break
print(*A, sep=" ")
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
99fd7c389db08acda41a846ad6ffa8cc4039453c | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /D6XfxhRobdQvbKX4v_13.py | 5561fb6cf05642ceb60cb7daef49927c53d87e6a | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """
You are given three inputs: a string, one letter, and a second letter.
Write a function that returns `True` if every instance of the first letter
occurs **before** every instance of the second letter.
### Examples
first_before_second("a rabbit jumps joyfully", "a", "j") ➞ True
# Every instance of "a" occurs before every instance of "j".
first_before_second("knaves knew about waterfalls", "k", "w") ➞ True
first_before_second("happy birthday", "a", "y") ➞ False
# The "a" in "birthday" occurs after the "y" in "happy".
first_before_second("precarious kangaroos", "k", "a") ➞ False
### Notes
* All strings will be in lower case.
* All strings will contain the first and second letters at least **once**.
"""
def first_before_second(s, first, second):
ind1 = []
for m in enumerate(s):
if m[1] == first:
ind1.append(m[0])
ind2 = []
for m in enumerate(s):
if m[1] == second:
ind2.append(m[0])
if max(ind1) < min(ind2):
return True
else:
return False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
89a3190c1a4237cb81fa42e43e1b2a8bf7ff2925 | 27d92b640d3814fa5dc8040b79a99d077cba3aae | /cpython/Tools/scripts/combinerefs.py | e10e49ad7c72b37991ca927e805f40223ee75636 | [
"GPL-1.0-or-later",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi"
] | permissive | ms-iot/python | 99a0f4d3dd3926703d49b75910c78c69cdb7aed7 | a8f8fba1214289572713520f83409762a4446fea | refs/heads/develop | 2022-12-07T23:26:31.339811 | 2017-11-17T02:24:32 | 2017-11-17T02:24:32 | 31,045,533 | 73 | 39 | BSD-3-Clause | 2022-11-16T20:24:24 | 2015-02-20T01:01:09 | Python | UTF-8 | Python | false | false | 4,414 | py | #! /usr/bin/env python3
"""
combinerefs path
A helper for analyzing PYTHONDUMPREFS output.
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
time Py_Finalize() prints the list of all live objects twice: first it
prints the repr() of each object while the interpreter is still fully intact.
After cleaning up everything it can, it prints all remaining live objects
again, but the second time just prints their addresses, refcounts, and type
names (because the interpreter has been torn down, calling repr methods at
this point can get into infinite loops or blow up).
Save all this output into a file, then run this script passing the path to
that file. The script finds both output chunks, combines them, then prints
a line of output for each object still alive at the end:
address refcnt typename repr
address is the address of the object, in whatever format the platform C
produces for a %p format code.
refcnt is of the form
"[" ref "]"
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
or
"[" ref_before "->" ref_after "]"
if the refcount changed.
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
output block.
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
CAUTION: If object is a container type, it may not actually contain all the
objects shown in the repr: the repr was captured from the first output block,
and some of the containees may have been released since then. For example,
it's common for the line showing the dict of interned strings to display
strings that no longer exist at the end of Py_Finalize; this can be recognized
(albeit painfully) because such containees don't have a line of their own.
The objects are listed in allocation order, with most-recently allocated
printed first, and the first object allocated printed last.
Simple examples:
00857060 [14] str '__len__'
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
output blocks said there were 14 references to it. This is probably due to
C modules that intern the string "__len__" and keep a reference to it in a
file static.
00857038 [46->5] tuple ()
46-5 = 41 references to the empty tuple were removed by the cleanup actions
between the times PYTHONDUMPREFS produced output.
00858028 [1025->1456] str '<dummy key>'
The string '<dummy key>', which is used in dictobject.c to overwrite a real
key that gets deleted, grew several hundred references during cleanup. It
suggests that stuff did get removed from dicts by cleanup, but that the dicts
themselves are staying alive for some reason. """
import re
import sys
# Generate lines from fileiter. If whilematch is true, continue reading
# while the regexp object pat matches line. If whilematch is false, lines
# are read so long as pat doesn't match them. In any case, the first line
# that doesn't match pat (when whilematch is true), or that does match pat
# (when whilematch is false), is lost, and fileiter will resume at the line
# following it.
def read(fileiter, pat, whilematch):
for line in fileiter:
if bool(pat.match(line)) == whilematch:
yield line
else:
break
def combine(fname):
f = open(fname)
fi = iter(f)
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
pass
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
addr2rc = {}
addr2guts = {}
before = 0
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
m = crack.match(line)
if m:
addr, addr2rc[addr], addr2guts[addr] = m.groups()
before += 1
else:
print('??? skipped:', line)
after = 0
for line in read(fi, crack, True):
after += 1
m = crack.match(line)
assert m
addr, rc, guts = m.groups() # guts is type name here
if addr not in addr2rc:
print('??? new object created while tearing down:', line.rstrip())
continue
print(addr, end=' ')
if rc == addr2rc[addr]:
print('[%s]' % rc, end=' ')
else:
print('[%s->%s]' % (addr2rc[addr], rc), end=' ')
print(guts, addr2guts[addr])
f.close()
print("%d objects before, %d after" % (before, after))
if __name__ == '__main__':
combine(sys.argv[1])
| [
"juanyaw@exchange.microsoft.com"
] | juanyaw@exchange.microsoft.com |
d521134ee74c8fcdabfbf01d11310000cd770fd8 | 5e55858ef75c62921f8be2f2f6d19ebfe98a2d88 | /5kyu/two_strings.py | 3d70a10b12c4e90f6d10caddd13f76cfbe11546d | [] | no_license | Uthaeus/codewars_python | 63abd96b66cb81f86e05b244a24c2c4de2f321e4 | 4b00c74ce0173bcf8527da7e4ef381d6802dde16 | refs/heads/master | 2021-06-01T10:34:30.688941 | 2020-11-01T19:17:32 | 2020-11-01T19:17:32 | 134,450,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # Input Strings a and b: For every character in string a swap the casing of every occurrence of the same character in string b. Then do the same casing swap with the inputs reversed. Return a single string consisting of the changed version of a followed by the changed version of b. A char of a is in b regardless if it's in upper or lower case - see the testcases too.
def arrayify(s):
result = []
for c in s:
result.append(c)
return result
def work_on_strings(a,b):
x = 0
a = arrayify(a)
b = arrayify(b)
while x < len(a):
y = 0
while y < len(b):
if a[x].lower() == b[y].lower():
b[y] = b[y].swapcase()
y += 1
x += 1
x = 0
while x < len(b):
y = 0
while y < len(a):
if b[x].lower() == a[y].lower():
a[y] = a[y].swapcase()
y += 1
x += 1
return "".join(a) + "".join(b)
print(work_on_strings("abc","cde")) #, "abCCde" | [
"romanlavery@gmail.com"
] | romanlavery@gmail.com |
69bbcb51206c9a2dcbff2e6cc1eaf45346263a7a | 191a7f83d964f74a2b3c7faeb4fc47d9c63d521f | /.history/main_20210529113936.py | 21e3cf9871b43012cc06f98807e2c0817b4cbdec | [] | no_license | AndreLiu1225/Kinder-Values-Survey | 2a317feee8d5b17c27da2b2116742656e35d8ab9 | 090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3 | refs/heads/master | 2023-05-03T00:26:00.481423 | 2021-06-04T03:24:19 | 2021-06-04T03:24:19 | 371,989,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,921 | py | from flask import Flask, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
import datetime
import matplotlib.pyplot as plt
app = Flask(__name__)
app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a"
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///site.db"
db = SQLAlchemy(app)
class Survey(db.Model):
age = db.Column(db.Integer, nullable=False, primary_key=True)
email = db.Column(db.String(50), unique=True, nullable=False)
profession = db.Column(db.String(50), nullable=False)
power = db.Column(db.Integer, nullable=False)
tradition = db.Column(db.Integer, nullable=False)
achievement = db.Column(db.Integer, nullable=False)
stimulation = db.Column(db.Integer, nullable=False)
hedonism = db.Column(db.Integer, nullable=False)
conformity = db.Column(db.Integer, nullable=False)
security = db.Column(db.Integer, nullable=False)
self_direction = db.Column(db.Integer, nullable=False)
benevolence = db.Column(db.Integer, nullable=False)
universalism = db.Column(db.Integer, nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __repr__(self):
return f"Survey('{self.age}', '{self.name}', '{self.date_posted}')"
class MCQ(FlaskForm):
email = StringField("What is your email?", validators=[DataRequired(), Email(message=('Not a valid email address')), Length(max=50)])
age = IntegerField("Please enter your age", validators=[DataRequired()])
profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)])
# Self-Enhancement
power = IntegerField("Do you desire a higher social status and dominance over others? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
hedonism = IntegerField("Is personal gratification the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
achievement = IntegerField("Is achievement according to social standards important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Conservation
tradition = IntegerField("Do you care about preserving traditions? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
conformity = IntegerField("Do you think restraint of actions against social norms is important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
security = IntegerField("Do you value safety, harmony and stability of society, of relationships, and of self? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Openness to change
stimulation = IntegerField("Do you prefer novel and exciting challenges in life? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
self_direction = IntegerField("Do you think independent thought and action are important (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Self-transcendence
benevolence = IntegerField("Are preserving and enhancing the welfare of your friends and family the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
universalism = IntegerField("I find it important to understand, tolerate, appreciate and protect all ethnicities and people. (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
submit = SubmitField("Submit")
@app.route('/', methods=['POST','GET'])
def values_quiz():
form = MCQ()
if form.validate_on_submit():
post = Survey(age=form.age.data, email=form.email.data, profession=form.profession.data, power=form.power.data,
tradition=form.tradition.data, achievement=form.achievement.data, stimulation=form.stimulation.data,
hedonism=form.hedonism.data, conformity=form.conformity.data, self_direction=form.self_direction.data,
benevolence=form.benevolence.data, universalism=form.universalism.data)
# if Survey.is_email_in_database(form.email.data):
# flash(f"The user with {form.email.data} has already filled the survey", "danger")
db.session.add(post)
db.session.commit()
flash(f'Survey is completed by {form.email.data}', 'success')
return redirect(url_for('data_dashboard'))
else:
flash('Ensure all questions are answered correctly', 'warning')
return render_template('MCQ.html', form=form)
@app.route('/results', methods=['GET'])
def data_dashboard():
power = request.form.get('power')
tradition = request.form.get('tradition')
achievement = request.form.get('achievement')
stimulation = request.form.get('stimulation')
hedonism = request.form.get('hedonism')
conformity = request.form.get('conformity')
security = request.form.get('security')
self_direction = request.form.get('self_direction')
benevolence = request.form.get('benevolence')
universalism = request.form.get('universalism')
values = [power, tradition, achievement, stimulation, hedonism, conformity, security, self_direction, benevolence, universalism]
values_labels = ['Openness to Change', 'Self-Transcendence',
'Conservation', 'Self-Enchancement']
openness = [hedonism, stimulation, self_direction]
self_enhancement = [hedonism, achievement, power]
conservation = [tradition, conformity, security]
self_trans = [universalism, benevolence]
total_sum = sum(values)
open_sum = round(sum(openness)/total_sum*100)
enhance_sum = round(sum(self_enhancement)/total_sum*100)
trans_sum = round(sum(self_trans)/total_sum*100)
cons_sum = round(sum(conservation)/total_sum*100)
sum_v = [open_sum, enhance_sum, trans_sum, cons_sum]
# initiating the range of y ticks
ran = [20,40,60,80,100]
plt.xticks(ran, values_labels)
# Calling bar plot function
plt.bar(ran, sum_v)
plt.title('Percentage obtained on each dynamic values')
plt.ylabel('Percentage')
plt.xlabel('Dynamic value types')
return render_template('data_dashboard.html', image=plt.show())
if __name__ == "__main__":
app.run(debug=True)
| [
"andreliu2004@gmail.com"
] | andreliu2004@gmail.com |
98dea60f0782c91c98ee5568018f96a7c2856a04 | 532e4cdd9c0b72c444b13ef669b788f3c629074d | /expo/scheduler/data_transfer.py | c893cfa4ba5327b4d2a2c5fedbf8f0ed2460ac61 | [] | no_license | bethlakshmi/GBE2 | 3b2b5a677637759fd22220f3272336d9dfc5750e | e8e030a12946901ccb9f56a9a3c6a022a6a8c5c8 | refs/heads/master | 2021-01-23T09:02:40.253304 | 2019-05-01T01:39:51 | 2019-05-01T01:39:51 | 17,325,575 | 7 | 3 | null | 2019-04-29T03:52:18 | 2014-03-01T22:14:43 | Python | UTF-8 | Python | false | false | 4,791 | py |
class Person(object):
def __init__(self,
booking_id=None,
user=None,
public_id=None,
public_class="Performer",
role=None,
label=None,
worker=None):
if worker:
self.role = worker.role
self.user = worker._item.as_subtype.user_object
self.public_class = worker._item.as_subtype.__class__.__name__
self.public_id = worker._item.pk
else:
self.user = user
self.public_id = public_id
self.role = role
self.public_class = public_class
self.booking_id = booking_id
self.label = label
class Casting(object):
def __init__(self,
booking):
self.booking_id = booking.pk
self.role = booking.resource.actresource.role
self.act = booking.resource.actresource._item
class ScheduleItem(object):
def __init__(self,
user=None,
group_id=None,
event=None,
role=None,
label=None,
booking_id=None):
self.user = user
self.group_id = group_id
self.role = role
self.label = label
self.event = event
self.booking_id = booking_id
class Answer(object):
def __init__(self,
question=None,
value=None):
self.question = question
self.value = value
class Warning(object):
def __init__(self,
code=None,
user=None,
occurrence=None,
details=None):
self.code = code
self.user = user
self.occurrence = occurrence
self.details = details
class Error(object):
def __init__(self,
code=None,
details=None):
self.code = code
self.details = details
class GeneralResponse(object):
def __init__(self,
warnings=[],
errors=[]):
self.warnings = warnings
self.errors = errors
class OccurrenceResponse(GeneralResponse):
def __init__(self,
occurrence=None,
warnings=[],
errors=[]):
self.occurrence = occurrence
super(OccurrenceResponse, self).__init__(warnings, errors)
class OccurrencesResponse(GeneralResponse):
def __init__(self,
occurrences=[],
warnings=[],
errors=[]):
self.occurrences = occurrences
super(OccurrencesResponse, self).__init__(warnings, errors)
class PersonResponse(GeneralResponse):
def __init__(self,
booking_id=None,
warnings=[],
errors=[]):
self.booking_id = booking_id
super(PersonResponse, self).__init__(warnings, errors)
class PeopleResponse(GeneralResponse):
def __init__(self,
people=[],
warnings=[],
errors=[]):
self.people = people
super(PeopleResponse, self).__init__(warnings, errors)
class CastingResponse(GeneralResponse):
def __init__(self,
castings=[],
warnings=[],
errors=[]):
self.castings = castings
super(CastingResponse, self).__init__(warnings, errors)
class ScheduleResponse(GeneralResponse):
def __init__(self,
schedule_items=[],
warnings=[],
errors=[]):
self.schedule_items = schedule_items
super(ScheduleResponse, self).__init__(warnings, errors)
class RolesResponse(GeneralResponse):
def __init__(self,
roles=[],
warnings=[],
errors=[]):
self.roles = roles
super(RolesResponse, self).__init__(warnings, errors)
class EvalInfoResponse(GeneralResponse):
def __init__(self,
occurrences=[],
questions=[],
answers=[],
warnings=[],
errors=[]):
self.occurrences = occurrences
self.questions = questions
self.answers = answers
super(EvalInfoResponse, self).__init__(warnings, errors)
class EvalSummaryResponse(GeneralResponse):
def __init__(self,
occurrences=[],
questions=[],
summaries={},
count=None,
warnings=[],
errors=[]):
self.occurrences = occurrences
self.questions = questions
self.summaries = summaries
self.count = count
super(EvalSummaryResponse, self).__init__(warnings, errors)
| [
"bethlakshmi@gmail.com"
] | bethlakshmi@gmail.com |
965cf3b5c4d6b07442c90621692a3c7c1c91d249 | 729243a020efed22445849c5cd95e78506f9845d | /Semana02/prog07.py | e9f57ada5268f62c217a8b7dbd7690afb0d86dd5 | [] | no_license | yuri-almeid/SEII-YuriLimaAlmeida | 6f031667943f469827bcb89db968d5b7a0188c2f | 81fbf275fcc74a99d8b3630c953aece416546416 | refs/heads/main | 2023-08-10T19:49:18.162469 | 2021-10-07T11:02:25 | 2021-10-07T11:02:25 | 347,435,913 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py |
def pass_func():
pass
def hello_func():
return 'Hello Function'
def hello_func_greeting(greeting, name='You'):
return '{}, {}'.format(greeting, name)
print(pass_func)
print(hello_func())
print(hello_func().upper())
print(hello_func_greeting('Bye'))
def student_info(*args, **kwargs):
print(args)
print(kwargs)
student_info('Math', 'Art', name='John', age=22)
courses = ['Math', 'Art']
info = {'name': 'John', 'age': 22}
student_info(courses, info)
student_info(*courses, **info)
month_days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def is_leap(year):
"""Return True for leap years, False for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def days_in_month(year, month):
"""Return number of days in that month in that year."""
if not 1 <= month <= 12:
return 'Invalid Month'
if month == 2 and is_leap(year):
return 29
return month_days[month]
print(days_in_month(2017, 2))
| [
"yurilima95@gmail.com"
] | yurilima95@gmail.com |
906f027f40c8a6910f73ee7bc7f3b9e2a224bb64 | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/tlm/training/debugging_model/run_ck_lr_opt_copied.py | f06da55e5bada644bbbda70ee07688dfe9b8d142 | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 1,400 | py |
from my_tf import tf
from taskman_client.wrapper import report_run
from tf_util.tf_logging import tf_logging, MuteEnqueueFilter
from tlm.model.base import BertConfig
from tlm.training.debugging_model.classification_opt_copied import model_fn_classification
from tlm.training.flags_wrapper import get_input_files_from_flags, show_input_files
from tlm.training.input_fn import input_fn_builder_use_second_input
from tlm.training.train_config import TrainConfigEx
from tlm.training.train_flags import *
from trainer.tpu_estimator import run_estimator
def run_classification_w_second_input():
input_files = get_input_files_from_flags(FLAGS)
bert_config = BertConfig.from_json_file(FLAGS.bert_config_file)
train_config = TrainConfigEx.from_flags(FLAGS)
show_input_files(input_files)
model_fn = model_fn_classification(
bert_config,
train_config,
)
input_fn = input_fn_builder_use_second_input(FLAGS)
if FLAGS.do_predict:
tf_logging.addFilter(MuteEnqueueFilter())
result = run_estimator(model_fn, input_fn)
return result
@report_run
def main(_):
return run_classification_w_second_input()
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
flags.mark_flag_as_required("run_name")
tf.compat.v1.app.run()
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
c40b20a7acd6aaddf25f265954ebe8ee03ced8d1 | 396d5838873d84145e5b125a8c1bc3db2313ac8f | /tests/conftest.py | cbca1a49f99b4a6ca40353b42289414df64a8a97 | [
"MIT"
] | permissive | magnologan/sqlalchemy_aio | 96480d2be46bae804b6bdc0e59568fb240d12b0e | 915b00bd024b29afa712749695c0ad4ced0e9c37 | refs/heads/master | 2023-05-26T18:44:48.870565 | 2016-12-19T01:36:00 | 2016-12-19T01:36:00 | 77,687,272 | 0 | 0 | NOASSERTION | 2023-05-17T01:59:44 | 2016-12-30T13:17:13 | Python | UTF-8 | Python | false | false | 1,110 | py | import pytest
from sqlalchemy import Column, Integer, MetaData, Table, create_engine, event
from sqlalchemy_aio import ASYNCIO_STRATEGY
def fix_pysqlite_transactions(engine):
"""See http://docs.sqlalchemy.org/en/latest/dialects/
sqlite.html#serializable-isolation-savepoints-transactional-ddl
"""
@event.listens_for(engine, 'connect')
def connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, 'begin')
def begin(conn):
# emit our own BEGIN
conn.execute('BEGIN')
@pytest.fixture
def engine(event_loop):
engine = create_engine('sqlite://', strategy=ASYNCIO_STRATEGY,
loop=event_loop)
fix_pysqlite_transactions(engine._engine)
return engine
@pytest.fixture
def mytable():
metadata = MetaData()
mytable = Table(
'mytable', metadata,
Column('id', Integer, primary_key=True),
)
return mytable
| [
"frazer@frazermclean.co.uk"
] | frazer@frazermclean.co.uk |
cbc9e2fa617c023d59f637da868347726bef60c7 | 7ba5ec9aa9ddca3f9b3384fc4457b0a865c2a0a1 | /src/397.py | 536dc63ec74a520ba95396ed731a8cdb9ac5918e | [] | no_license | ecurtin2/Project-Euler | 71f79ee90a9abd0943421677d78a6c087419e500 | 79479da7a45b3ae67c0c7ea24da5f7d43c6f25d3 | refs/heads/master | 2021-03-19T14:52:57.045443 | 2018-04-12T22:05:37 | 2018-04-12T22:05:37 | 100,059,180 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | """
On the parabola y = x2/k, three points A(a, a2/k), B(b, b2/k) and C(c, c2/k) are chosen.
Let F(K, X) be the number of the integer quadruplets (k, a, b, c) such that at least one angle of the triangle ABC is 45-degree, with 1 ≤ k ≤ K and -X ≤ a < b < c ≤ X.
For example, F(1, 10) = 41 and F(10, 100) = 12492.
Find F(106, 109).
""" | [
"ecurtin2@illinois.edu"
] | ecurtin2@illinois.edu |
fb1fc36e9c6ef029516926c94dcb77ea38e889f8 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /StructuredFund/creationRedemption/YW_GPMM_SZXJ_068.py | b95472014ee745c0484b0c9227976c740b7b3614 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/StructuredFund/serviceCreationRedemption")
from mainService import *
from QueryStructuredFundInfo import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class YW_GPMM_SZXJ_068(xtp_test_case):
# YW_GPMM_SZXJ_068
def test_YW_GPMM_SZXJ_068(self):
title='限价委托卖-部成撤单 '
#定义当前测试用例的期待值
#期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
#xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '部撤',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStructuredFundInfo('999999','2','0','2','0','S',case_goal['期望状态'],Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果':stkparm['返回结果'],
'测试错误原因':'获取下单参数失败,'+stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity': 200
}
ParmIni(Api,case_goal['期望状态'],wt_reqs['price_type'])
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ',' + str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
fcd715b6160b971e53992a3d28a0a9732a3fc8b2 | a893537a71aa285071a68035c968ba6f5c0ca57d | /ch08/79/79_one_layer_linear.py | 920abd33cdaa3671c3f03e7b8b86fca8e0ed3ea2 | [] | no_license | sinchir0/2020_NLP100 | 0a1810b0c299c29fa1a811f68fa87be74f9b3cf9 | 772123da5b5ac4094c26fdce2e192637dc55190a | refs/heads/main | 2023-07-18T04:03:04.123302 | 2021-09-08T22:54:44 | 2021-09-08T22:54:44 | 257,416,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,181 | py | # 79. 多層ニューラルネットワーク
# 問題78のコードを改変し,バイアス項の導入や多層化など,ニューラルネットワークの形状を変更しながら,高性能なカテゴリ分類器を構築せよ.
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
class TextDataset(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
class Net(nn.Module):
def __init__(self, in_shape: int, out_shape: int):
super().__init__()
self.fc = nn.Linear(300, 4, bias=True)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.fc(x)
x = self.softmax(x)
return x
def train_fn(model, loader, optimizer, loss) -> Union[float, float]:
"""model, loaderを用いて学習を行い、lossを返す"""
train_running_loss = 0.0
valid_running_loss = 0.0
for dataloader_x, dataloader_y in loader:
optimizer.zero_grad()
dataloader_y_pred_prob = model(dataloader_x)
# dataset_xでの損失の計算
dataloader_loss = loss(dataloader_y_pred_prob, dataloader_y)
dataloader_loss.backward()
# 訓練データ、検証データでの損失の平均を計算する
train_running_loss += dataloader_loss.item() / len(loader)
valid_running_loss += loss(model(valid_x), valid_y).item() / len(loader)
optimizer.step()
return train_running_loss, valid_running_loss
def calc_acc(model, train_x, y_true) -> float:
"""modelと学習データ、正解データを用いて、正解率を計算する"""
# 最も正解率の高い予測確率を正解ラベルとする。
_, y_pred = torch.max(model(train_x), 1)
# 学習データに対する正解率の計算
correct_num = (y_pred == y_true).sum().item()
total_size = y_true.size(0)
acc = (correct_num / total_size) * 100
return acc
def make_graph(value_dict: dict, value_name: str, method: str) -> None:
"""value_dictに関するgraphを生成し、保存する。"""
for phase in ["train", "valid"]:
plt.plot(value_dict[phase], label=phase)
plt.xlabel("epoch")
plt.ylabel(value_name)
plt.title(f"{value_name} per epoch")
plt.legend()
plt.savefig(f"{method}_{value_name}.png")
plt.close()
if __name__ == "__main__":
METHOD = "one_layer_linear"
if not torch.cuda.is_available():
print("No cuda")
PATH = ".."
device = (
torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
)
# 学習データの読み込み
train_x = torch.tensor(
np.load(f"{PATH}/70/train_vector.npy"), requires_grad=True
).to(device)
train_y = torch.tensor(np.load(f"{PATH}/70/train_label.npy")).to(device)
# 評価データの読み込み
valid_x = torch.tensor(
np.load(f"{PATH}/70/valid_vector.npy"), requires_grad=True
).to(device)
valid_y = torch.tensor(np.load(f"{PATH}/70/valid_label.npy")).to(device)
# modelの設定
model = Net(in_shape=train_x.shape[1], out_shape=4).to(device)
# loss, optimizerの設定
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# DataLoaderの構築
dataset = TextDataset(train_x, train_y)
# parameterの更新
BATCHSIZE = 32
loader = DataLoader(dataset, batch_size=BATCHSIZE, shuffle=True)
train_losses = []
train_accs = []
valid_losses = []
valid_accs = []
EPOCH = 10
for epoch in tqdm(range(EPOCH)):
# 学習
train_running_loss, valid_running_loss = train_fn(
model, loader, optimizer, loss
)
# 訓練データでの損失の保存
train_losses.append(train_running_loss)
# 訓練データでの正解率の計算
train_acc = calc_acc(model, train_x, train_y)
# 訓練データでの正解率の保存
train_accs.append(train_acc)
# 検証データでの損失の保存
valid_losses.append(valid_running_loss)
# 検証データでの正解率の計算
valid_acc = calc_acc(model, valid_x, valid_y)
# 検証データでの正解率の保存
valid_accs.append(valid_acc)
# 20epoch毎にチェックポイントを生成
if epoch % 20 == 0:
torch.save(model.state_dict(), f"79_model_bs_epoch{epoch}.pth")
torch.save(
optimizer.state_dict(),
f"79_optimizer_epoch{epoch}.pth",
)
# グラフへのプロット
losses = {"train": train_losses, "valid": valid_losses}
accs = {"train": train_accs, "valid": valid_accs}
make_graph(losses, "losses", METHOD)
make_graph(accs, "accs", METHOD)
print(f"train_acc: {train_acc}")
print(f"valid_acc: {valid_acc}")
# train_acc: 76.90217391304348
# valid_acc: 78.71064467766116
| [
"s-saito@chic.ocn.ne.jp"
] | s-saito@chic.ocn.ne.jp |
fd004e23d8775a14f91d226e6f75843c058b5f43 | 369b7f114f9bd9b45dd5fef77a070cb73abb68d1 | /handle/itl/temp/handleForWh.py | adbc5ffe74a6e805871bce67b303dfa25a1570e6 | [] | no_license | lyjloveabc/thor_handle | d790ee25317f724825c94a6b346a034ec0ae6e3d | 8b9eda97ec873f3bf1732a428898a04d6a55c0af | refs/heads/master | 2021-12-27T10:15:16.668264 | 2021-08-16T13:45:34 | 2021-08-16T13:45:34 | 84,824,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | base_sql = 'INSERT INTO itl_job_title VALUES ("{id}", now(), now(), "{name}", "", "{remark}", "公司总部");'
with open('hehe.txt', 'r') as f:
index = 19
for line in f.readlines():
hehe = line[:-1].split(' ')
print(base_sql.format(id=index, name=hehe[0], remark=hehe[1]))
index += 1
| [
"546223592@qq.com"
] | 546223592@qq.com |
ae227652a174fe7076a5a971d1021bb31d494c08 | e2c84bbefe728e20042a6befdf9effd0480b6cf0 | /Text_processing/2. Character Multiplier.py | e2632389cf8db53afacd592a70c798b43c0c9eee | [] | no_license | vmakksimov/PythonFundamentals | ffe5f606f592a9f0650f45f225936f13f4659992 | 4a5b74e40cfaa19f777187404428e0dff9d66a16 | refs/heads/main | 2023-08-22T03:33:15.002041 | 2021-10-15T00:07:58 | 2021-10-15T00:07:58 | 410,258,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | strings = input().split()
string1 = strings[0]
string2 = strings[1]
total_sum = 0
first_sting = []
second_digit = []
for chr in string1:
first_sting.append(ord(chr))
for chr in string2:
second_digit.append(ord(chr))
cycle_iterations = min(len(first_sting), len(second_digit))
max_cycle = max(len(first_sting), len(second_digit)) - cycle_iterations
if len(first_sting) != len(second_digit):
if len(first_sting) > len(second_digit):
for i in range(0, cycle_iterations):
product = first_sting[i] * second_digit[i]
total_sum += product
for m in range(cycle_iterations, len(first_sting)):
total_sum += first_sting[m]
elif len(first_sting) < len(second_digit):
for i in range(0, cycle_iterations):
product = first_sting[i] * second_digit[i]
total_sum += product
for m in range(cycle_iterations, len(second_digit)):
total_sum += second_digit[m]
else:
for i in range(0, cycle_iterations):
product = first_sting[i] * second_digit[i]
total_sum += product
print(total_sum)
| [
"vmakksimov@gmail.com"
] | vmakksimov@gmail.com |
911acf65537c8dd5a41f4d656e1c3818bae26905 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2307/60581/234662.py | d8aff6f4b42dcf471b1cc7a0261a810c5e9f8fef | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import sys
lst = []
for line in sys.stdin:
if line.strip() == '':
break
lst.append(line)
number = lst[0]
wholeCount = 0
beginNumber = 1
while wholeCount < int(number):
answer = []
numberOfList = int(lst[beginNumber])
numberList = lst[beginNumber+1].split()
for i in range(0,len(numberList)) :
if numberList.count(numberList[i]) >= numberOfList/2 :
if answer.count(numberList[i])==0 :
answer.append(numberList[i])
if len(answer) == 0:
answer.append(-1)
print(int(answer[0]))
beginNumber += 2
wholeCount += 1
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
a3a2782843a392afae3025bc78ecd02479d01184 | 19d9d25bf1de4007f42abc43acaa23d66c3428ab | /apps/about/admin.py | 8eeb24983365004463d91b92632619725eadc495 | [] | no_license | od-5/enjoy-sa | aa567268941d818212cf1e0b3358df470f4c29e9 | 5e3600aaab3e4f405680f9f32bffb4be0f2bf601 | refs/heads/master | 2021-01-10T05:48:06.442696 | 2016-03-23T11:48:49 | 2016-03-23T11:48:49 | 45,059,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | # coding=utf-8
from django.contrib import admin
from .models import About
__author__ = 'alexy'
class AboutAdmin(admin.ModelAdmin):
list_display = ('title',)
admin.site.register(About, AboutAdmin)
| [
"od-5@yandex.ru"
] | od-5@yandex.ru |
ba7472cea52dbd9a622962746d153fa5d21696f7 | 4723fed48970c7bcc50eaa3acbe1c66577f8f3cb | /ss/download/ss_coroutine.py | b939bdb56235bd2575c73012a01e8d8d6d4be429 | [] | no_license | myetc4/spider | c03157f2203ea3c0ae0af30dc66e810f0ca20d06 | 4ffb36bc2551c1494ebdf0357da69ebb1c6c524d | refs/heads/master | 2020-04-02T01:44:23.358947 | 2018-10-20T03:43:35 | 2018-10-20T03:43:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/18 22:07
# @Author : SmallStrong
# @Des :
# @File : ss_coroutine.py
# @Software: PyCharm
import sys
import os
# 被逼无奈
sys.path.append(os.getcwd().replace('/ss', ''))
from ss.spider_core import go
from ss.common.func import exe_time
from gevent import monkey, pool
import ss.config
monkey.patch_all()
@exe_time
def main():
p = pool.Pool(ss.config.COROUTINE_LIMIT_NUM)
while ss.config.FLAG:
p.spawn(go)
if __name__ == '__main__':
main()
| [
"393019766@qq.com"
] | 393019766@qq.com |
4374be84930a4f11eb60ad2e0cdd4aaf8ed777ac | bbf744bfbfd9a935bd98c7cf54152a5d41194161 | /chapter_05/e5-7_favorite_fruit.py | 784ea00cac661066d715db00f0f223d403b8582c | [] | no_license | terranigmark/python-crash-course-projects | 65a7863be2d26fe8b91ac452b12203386eb0259a | 79ed9ed8e6a1bf015990a9556689379274231d13 | refs/heads/master | 2022-12-05T21:59:00.352140 | 2020-08-21T04:59:50 | 2020-08-21T04:59:50 | 266,263,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py |
def main():
favorite_fruits = ['melon', 'watermelon', 'apple', 'pineapple', 'banana', 'cranberry']
for times in range(5):
fruit = str(input("Pick a fruit to know if it's on my list: "))
if fruit in favorite_fruits:
print(f"You really like {fruit}")
if __name__ == "__main__":
main() | [
"linnk99@gmail.com"
] | linnk99@gmail.com |
ae46917b4e5e1ee29516b367b756951f1ce8df78 | f60b964dc39ba54bb84f1c4949be3b91a92b8346 | /issue_order/tasks.py | c521a2ede5ab11c2f598a398a3bee9e017fc01e6 | [
"Apache-2.0"
] | permissive | jiejiang/courier | 4b0b4fc56c5510228ffcc4de51b074c7aff9502f | 6fdeaf041c77dba0f97e206adb7b0cded9674d3d | refs/heads/master | 2022-11-30T14:24:53.950502 | 2019-12-06T16:42:00 | 2019-12-06T16:42:00 | 195,387,643 | 0 | 0 | Apache-2.0 | 2022-11-22T01:22:33 | 2019-07-05T10:08:19 | Python | UTF-8 | Python | false | false | 2,650 | py | # *- coding: utf-8 -*
from __future__ import absolute_import
from django.utils.translation import ugettext as _
import datetime, sys
from celery import shared_task
from django.db import transaction
from mezzanine.conf import settings
from .models import CourierBatch, Profile
from .courier_systems import query_courier_batch
@shared_task
def sample_task():
print settings.COURIER_SYSTEMS
print CourierBatch.objects.filter(state=CourierBatch.STATUS[0][0]).all()
@shared_task
def sync_waiting_courier_batches():
for courier_batch in CourierBatch.objects.filter(state=CourierBatch.STATUS[0][0]):
try:
if not courier_batch.system in settings.COURIER_SYSTEMS:
raise Exception, "System not configured: %s" % courier_batch.system
system_config = settings.COURIER_SYSTEMS[courier_batch.system]
if not 'url_base' in system_config or not 'user_name' in system_config or not 'password' in system_config:
raise Exception, "Invalid system_config: %s" % str(system_config)
batch_obj = query_courier_batch(system_config['url_base'], system_config['user_name'],
system_config['password'], courier_batch.uuid)
if batch_obj['status'] == "Waiting":
courier_batch.status = _(u"等待中")
elif batch_obj['status'] == "Processing":
courier_batch.status = _(u"处理中")
elif batch_obj['status'] == "Completed":
courier_batch.state, courier_batch.status = CourierBatch.STATUS[1]
elif batch_obj['status'] == "Failed":
courier_batch.state, courier_batch.status = CourierBatch.STATUS[2]
if courier_batch.credit is not None:
with transaction.atomic():
profile = Profile.objects.select_for_update().get(user=courier_batch.user)
profile.credit += courier_batch.credit
profile.save()
courier_batch.credit = 0
elif batch_obj['status'] == "Deleted":
courier_batch.state, courier_batch.status = CourierBatch.STATUS[3]
else:
raise Exception, "Batch obj status invalid: %s" % str(batch_obj)
courier_batch.percentage = batch_obj['percentage']
courier_batch.message = batch_obj['message']
courier_batch.save()
except Exception, inst:
import traceback
traceback.print_exc(sys.stderr)
print >> sys.stderr, "Failed to sync batch: %s" % courier_batch.uuid | [
"mail.jie.jiang@gmail.com"
] | mail.jie.jiang@gmail.com |
f75e0e32e45291a28fc904599882abe66c9d2750 | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/mgmt/compute/models/virtual_machine_scale_set_ip_configuration.py | 191b9e2ebb24995c979775a4515a352dcd782cd6 | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 3,399 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualMachineScaleSetIPConfiguration(SubResource):
"""
Describes a virtual machine scale set network profile's IP configuration.
:param id: Resource Id
:type id: str
:param name: Gets or sets the IP configuration name.
:type name: str
:param subnet: Gets or sets the subnet.
:type subnet: :class:`ApiEntityReference
<azure.mgmt.compute.models.ApiEntityReference>`
:param application_gateway_backend_address_pools: Gets or sets the
application gateway backend address pools.
:type application_gateway_backend_address_pools: list of
:class:`SubResource <azure.mgmt.compute.models.SubResource>`
:param load_balancer_backend_address_pools: Gets or sets the load
balancer backend address pools.
:type load_balancer_backend_address_pools: list of :class:`SubResource
<azure.mgmt.compute.models.SubResource>`
:param load_balancer_inbound_nat_pools: Gets or sets the load balancer
inbound nat pools.
:type load_balancer_inbound_nat_pools: list of :class:`SubResource
<azure.mgmt.compute.models.SubResource>`
"""
_validation = {
'name': {'required': True},
'subnet': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(self, name, subnet, id=None, application_gateway_backend_address_pools=None, load_balancer_backend_address_pools=None, load_balancer_inbound_nat_pools=None):
super(VirtualMachineScaleSetIPConfiguration, self).__init__(id=id)
self.name = name
self.subnet = subnet
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
| [
"me@teopeurt.com"
] | me@teopeurt.com |
a1de48a0569407301769af6688ace732cfce8112 | f9248ec00b661ee4790a780b7adaec79c0d68ec8 | /sumDigit.py | ed3440baa63aed50363a7e23b2a7dcc418c05215 | [] | no_license | michaelzh17/6001_Python | 0ec463f02840bf3162cd3247d76494d1592e82e3 | 53833604db4d769f71e63044813e3500f3e0fb6f | refs/heads/master | 2021-01-11T22:31:17.832117 | 2018-12-21T10:34:18 | 2018-12-21T10:34:18 | 78,979,863 | 0 | 0 | null | 2017-04-06T02:54:57 | 2017-01-15T00:07:38 | Python | UTF-8 | Python | false | false | 366 | py | #!/usr/bin/env python3
def sumDigits(s):
"""Assumes s is a string
Returns the sum of the decimal digits in s
For example, if s is 'a2b3c' it returns 5"""
sum_digit = 0
for e in s:
try:
sum_digit += int(e)
except ValueError:
continue
return sum_digit
s = 'x5a9n2'
a = sumDigits(s)
print(a)
| [
"macalzhang@gmail.com"
] | macalzhang@gmail.com |
be37b4349655bde35cdd96eb797c2a72b4d8bc78 | 704976ea552111c6a5af9cd7cb62b9d9abaf3996 | /pypy/module/test_lib_pypy/ctypes_tests/test_cfuncs.py | 5d3d816ee52701354d65e960dff269715097ef56 | [
"BSD-3-Clause"
] | permissive | mesalock-linux/mesapy | 4f02c5819ce7f2f6e249d34840f1aa097577645d | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | refs/heads/mesapy2.7 | 2023-08-16T21:33:02.239581 | 2019-08-13T10:29:43 | 2019-08-13T18:06:45 | 136,080,721 | 396 | 33 | NOASSERTION | 2020-04-01T03:05:18 | 2018-06-04T20:45:17 | Python | UTF-8 | Python | false | false | 6,882 | py | # A lot of failures in these tests on Mac OS X.
# Byte order related?
from ctypes import *
import py
from .support import BaseCTypesTestChecker
def setup_module(mod):
import conftest
mod._ctypes_test = str(conftest.sofile)
# this means you cannot run tests directly without invoking this
mod.TestCFunctions._dll = CDLL(_ctypes_test)
class TestCFunctions(BaseCTypesTestChecker):
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
assert self._dll.tf_b(-126) == -42
assert self.S() == -126
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
assert self._dll.tf_bb(0, -126) == -42
assert self.S() == -126
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
assert self._dll.tf_B(255) == 85
assert self.U() == 255
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
assert self._dll.tf_bB(0, 255) == 85
assert self.U() == 255
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
assert self._dll.tf_h(-32766) == -10922
assert self.S() == -32766
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
assert self._dll.tf_bh(0, -32766) == -10922
assert self.S() == -32766
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
assert self._dll.tf_H(65535) == 21845
assert self.U() == 65535
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
assert self._dll.tf_bH(0, 65535) == 21845
assert self.U() == 65535
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
assert self._dll.tf_i(-2147483646) == -715827882
assert self.S() == -2147483646
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
assert self._dll.tf_bi(0, -2147483646) == -715827882
assert self.S() == -2147483646
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
assert self._dll.tf_I(4294967295) == 1431655765
assert self.U() == 4294967295
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
assert self._dll.tf_bI(0, 4294967295) == 1431655765
assert self.U() == 4294967295
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
assert self._dll.tf_l(-2147483646) == -715827882
assert self.S() == -2147483646
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
assert self._dll.tf_bl(0, -2147483646) == -715827882
assert self.S() == -2147483646
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
assert self._dll.tf_L(4294967295) == 1431655765
assert self.U() == 4294967295
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
assert self._dll.tf_bL(' ', 4294967295) == 1431655765
assert self.U() == 4294967295
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
assert self._dll.tf_q(-9223372036854775806) == -3074457345618258602
assert self.S() == -9223372036854775806
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
assert self._dll.tf_bq(0, -9223372036854775806) == -3074457345618258602
assert self.S() == -9223372036854775806
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
assert self._dll.tf_Q(18446744073709551615) == 6148914691236517205
assert self.U() == 18446744073709551615
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
assert self._dll.tf_bQ(0, 18446744073709551615) == 6148914691236517205
assert self.U() == 18446744073709551615
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
assert self._dll.tf_f(-42.) == -14.
assert self.S() == -42
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
assert self._dll.tf_bf(0, -42.) == -14.
assert self.S() == -42
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
assert self._dll.tf_d(42.) == 14.
assert self.S() == 42
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
assert self._dll.tf_bd(0, 42.) == 14.
assert self.S() == 42
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
assert self._dll.tf_i(42) == 28
assert self.S() == 42
assert self._dll.tf_i(-42) == -28
assert self.S() == -42
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
assert self._dll.tv_i(42) == None
assert self.S() == 42
assert self._dll.tv_i(-42) == None
assert self.S() == -42
# The following repeates the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
class TestStdcallCFunctions(TestCFunctions):
def setup_class(cls):
TestCFunctions.setup_class.im_func(cls)
cls._dll = stdcall_dll(_ctypes_test)
| [
"mssun@mesalock-linux.org"
] | mssun@mesalock-linux.org |
d3f4dc5383a82d4848dd5b8c53976b56998b176c | cbed9822648c05601fb84803c4cbfb63b3f9f6f5 | /supervised_learning/0x06-keras/5-main.py | e105ddb4ab40795846f405606c414ac52ea1abb9 | [] | no_license | thomasmontoya123/holbertonschool-machine_learning | 793ba1f732ccaf2e08b832f3c57e10d12eabe808 | 8f0f2ce67339e574b21d7dfbba7f88545adac807 | refs/heads/master | 2022-12-26T09:55:07.302952 | 2020-10-04T23:38:23 | 2020-10-04T23:38:23 | 279,631,927 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | #!/usr/bin/env python3
import numpy as np
import tensorflow as tf
build_model = __import__('1-input').build_model
optimize_model = __import__('2-optimize').optimize_model
one_hot = __import__('3-one_hot').one_hot
train_model = __import__('5-train').train_model
if __name__ == '__main__':
datasets = np.load('supervised_learning/data/MNIST.npz')
X_train = datasets['X_train']
X_train = X_train.reshape(X_train.shape[0], -1)
Y_train = datasets['Y_train']
Y_train_oh = one_hot(Y_train)
X_valid = datasets['X_valid']
X_valid = X_valid.reshape(X_valid.shape[0], -1)
Y_valid = datasets['Y_valid']
Y_valid_oh = one_hot(Y_valid)
np.random.seed(0)
tf.set_random_seed(0)
lambtha = 0.0001
keep_prob = 0.95
network = build_model(784, [256, 256, 10], ['relu', 'relu', 'softmax'], lambtha, keep_prob)
alpha = 0.001
beta1 = 0.9
beta2 = 0.999
optimize_model(network, alpha, beta1, beta2)
batch_size = 64
epochs = 5
train_model(network, X_train, Y_train_oh, batch_size, epochs, validation_data=(X_valid, Y_valid_oh)) | [
"tomasmontoya123@gmail.com"
] | tomasmontoya123@gmail.com |
0ecc04c7a3bebfd4f58ae6f8a34d8d47d464687f | 05e590af914370c3fe02526794ce9b41be893d2c | /day03/BMI多筆計算.py | c2dc0a100071d9bad093d8913fc909a910f75bed | [] | no_license | vincenttuan/yzu_python_20210414 | bf0f1d9f8549086008fe15701204dfc3a9ebf85a | b464c4691ce12e9076c8c2ab74158aeb4edc5bc7 | refs/heads/master | 2023-06-09T19:30:24.930453 | 2021-06-30T13:31:29 | 2021-06-30T13:31:29 | 362,433,389 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | import math
# 有三組資料 170, 50 ; 180, 70; 160, 60
def printBMI(h, w):
bmi = w / math.pow(h/100, 2)
#result = "過重" if bmi > 23 else "過輕" if bmi <= 18 else "正常"
result = "過輕"
if 18 < bmi <= 23:
result = "正常"
elif bmi > 23:
result = "過重"
print("h= %.1f w=%.1f bmi=%.2f result=%s" % (h, w, bmi, result))
printBMI(170, 50)
printBMI(180, 70)
printBMI(160, 60) | [
"vincentjava@yahoo.com.tw"
] | vincentjava@yahoo.com.tw |
b453077a97c2032779a9f82f95ec352f4048fa05 | 065e4cdb3b79c3697f323cbc3d29a79ca696b47f | /src/stomp/exc.py | d2e6aa7b1c92d9d1e93fd4c0dc3bd58ed9ee7f1b | [] | no_license | sousouindustries/python-stomp | 59aaa47884013ebdc3bfb6c7f4756ef3ee03547e | b2de7aa2f1658eaa49bffd977bd1c9630ef58f0c | refs/heads/master | 2021-01-10T03:35:04.103347 | 2016-01-08T16:35:10 | 2016-01-08T16:35:10 | 44,618,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py |
class FatalException(Exception):
pass
class InvalidCommandType(FatalException):
pass
class MalformedFrame(FatalException):
pass
class StompException(FatalException):
@classmethod
def fromframe(cls, frame):
return cls(frame)
class FrameNotConfirmed(Exception):
pass
| [
"cochiseruhulessin@gmail.com"
] | cochiseruhulessin@gmail.com |
236b411cb4481023898e177f51e21412dc57c3ff | 48a7b266737b62da330170ca4fe4ac4bf1d8b663 | /molsysmt/form/openmm_Topology/to_openmm_System.py | ec0c582b7538b5b681e118cc0f7c1a1197793d15 | [
"MIT"
] | permissive | uibcdf/MolSysMT | ddab5a89b8ec2377f383884c5169d147cab01322 | c3d713ba63db24eb8a2426115cf8d9cb3665d225 | refs/heads/main | 2023-08-08T15:04:16.217967 | 2023-08-04T05:49:56 | 2023-08-04T05:49:56 | 137,937,243 | 15 | 3 | MIT | 2023-06-04T20:27:06 | 2018-06-19T19:38:44 | Python | UTF-8 | Python | false | false | 1,223 | py | from molsysmt._private.digestion import digest
@digest(form='openmm.Topology')
def to_openmm_System(item, atom_indices='all', forcefield=None, water_model=None, implicit_solvent=None,
non_bonded_method=None, constraints=None, switch_distance=None,
dispersion_correction=None, ewald_error_tolerance=None):
from openmm import app
from molsysmt.molecular_mechanics import forcefield_to_engine
forcefield = forcefield_to_engine(forcefield,
water_model=water_model, implicit_solvent=implicit_solvent,
engine='OpenMM')
forcefield = app.ForceField(*forcefield)
if non_bonded_method=='no cutoff':
non_bonded_method=app.NoCutoff
if constraints=='hbonds':
contraints=app.HBonds
system = forcefield.createSystem(item, nonbondedMethod=non_bonded_method, constraints=app.HBonds)
if dispersion_correction or ewald_error_tolerance:
forces = {ii.__class__.__name__ : ii for ii in system.getForces()}
if dispersion_correction:
forces['NonbondedForce'].setUseDispersionCorrection(True)
if ewald_error_tolerance:
forces['NonbondedForce'].setEwaldErrorTolerance(ewald_error_tolerance)
return system
| [
"prada.gracia@gmail.com"
] | prada.gracia@gmail.com |
6a23f800b0642149d86c1b273e549390add95953 | 09fd456a6552f42c124c148978289fae1af2d5c3 | /Greedy/1046.py | 38cf41d0ce78d7ec9ca4fb81ad2f34904cf89bd5 | [] | no_license | hoang-ng/LeetCode | 60b4e68cbcf54cbe763d1f98a70f52e628ab32fb | 5407c6d858bfa43325363503c31134e560522be3 | refs/heads/master | 2021-04-10T11:34:35.310374 | 2020-07-28T10:22:05 | 2020-07-28T10:22:05 | 248,932,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | # 1046. Last Stone Weight
# We have a collection of stones, each stone has a positive integer weight.
# Each turn, we choose the two heaviest stones and smash them together. Suppose the stones have weights x and y with x <= y. The result of this smash is:
# If x == y, both stones are totally destroyed;
# If x != y, the stone of weight x is totally destroyed, and the stone of weight y has new weight y-x.
# At the end, there is at most 1 stone left. Return the weight of this stone (or 0 if there are no stones left.)
# Example 1:
# Input: [2,7,4,1,8,1]
# Output: 1
# Explanation:
# We combine 7 and 8 to get 1 so the array converts to [2,4,1,1,1] then,
# we combine 2 and 4 to get 2 so the array converts to [2,1,1,1] then,
# we combine 2 and 1 to get 1 so the array converts to [1,1,1] then,
# we combine 1 and 1 to get 0 so the array converts to [1] then that's the value of last stone.
# Note:
# 1 <= stones.length <= 30
# 1 <= stones[i] <= 1000
import heapq
class Solution(object):
def lastStoneWeight(self, stones):
stones = [-val for val in stones]
heapq.heapify(stones)
while len(stones) > 1:
x1 = heapq.heappop(stones)
x2 = heapq.heappop(stones)
if x1 != x2:
heapq.heappush(stones,x1-x2)
if len(stones) == 0:
return 0
return -stones[0]
| [
"hoang2109@gmail.com"
] | hoang2109@gmail.com |
a54e35e05166aadbfab0c2e00094afb31d34ea9e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/38/usersdata/74/15433/submittedfiles/decimal2bin.py | 2762553aa515ae3fd3d920e2479efa3f6bf9e8d9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | # -*- coding: utf-8 -*-
from __future__ import division
n = input('Digite o numero binario: ')
k = n
cont = 0
d = 0
while n>1:
n = n/10
cont = cont+1
while n>=1:
n = n*10
j = n//1
d = d+j*2**cont
cont = cont-1
n = n-j
print('%d'% d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f630dbcad916b681dfd873c5ff8309f1c6505c2d | 82ed0cacf82da9a89fb65d9ddda6e603070aa639 | /reaper/__init__.py | 379855b02338d5c05153e42e2ad57c947ba033cc | [
"MIT"
] | permissive | pombredanne/reaper | 243dd08e8f570f9493c007face1cf2d4a7413f27 | 52f64d3e7d9a658df54475973796b48267f5c8a1 | refs/heads/master | 2021-01-24T03:36:30.781446 | 2015-11-12T03:48:46 | 2015-11-12T03:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,629 | py | """
Reaper
Deprecation warnings that turns automatically to Exception once your package version is bumped.
"""
__version__ = '0.0.1'
import warnings
import semver
import traitlets
class DeprecationException(DeprecationWarning):pass
class DeprecationReaper:
"""
Decorator for a function to be deprecated and remove.
The function will trigger a DeprecationWarning when called while the `versionspec` is not satisfied,
then raise once the version spec is satisfied.
Deprecation warning Example:
In [1]: from reaper import deprecate
In [2]: @deprecate("IPython",">=5.0.0")
...: def function(value):
...: return value
...:
In [3]: function(1)
DeprecationWarning: Support of `function` will end with IPython>=5.0.0
Out[3]: 1
Deprecation Error Example:
In [4]: import IPython
In [5]: IPython.__version__='5.0.0'
In [6]: @deprecate("IPython",">=5.0.0")
...: def function(value):
...: return value
...:
---------------------------------------------------------------------------
DeprecationWarning Traceback (most recent call last)
<ipython-input-6-52c92c195b7c> in <module>()
----> 1 @deprecate("IPython",">=5.0.0")
2 def function(value):
3 return value
4
DeprecationWarning: `function` is not supported on IPython>=5.0.0
"""
def __init__(self, package, versionspec):
# if something deprecated '>=4.1.0' we want it to raise during the 4.1.0-dev, and 4.1.0-rc,
# not just when we release 4.1.0, so remove any extra-tags.
versionspec = versionspec.split('-')[0]
current_version = traitlets.import_item(package+'.__version__')
self.match = semver.match(current_version, versionspec)
self.package = package
self.spec = versionspec
def __call__(self, wrapped):
data = {
'name':wrapped.__qualname__,
'p':self.package,
's':self.spec,
}
if self.match:
raise DeprecationException("`{name}` is not supported on {p}{s}".format(**data))
else:
def _wrap(*args, **kwargs):
warnings.warn("Support of `{name}` will end with {p}{s}".format(**data), DeprecationWarning, stacklevel=2)
return wrapped(*args, **kwargs)
return _wrap
deprecate = DeprecationReaper
| [
"bussonniermatthias@gmail.com"
] | bussonniermatthias@gmail.com |
dde1a97b3865fb7a6766da26d1bc744f1ce5fca6 | 500047f47a6b372fa7ff1e96b11315ee26acf5ef | /Chapter-07/text_ctrl.py | bbf85947f6e09f04be5a8a4f152c8b65751618ee | [] | no_license | ra2003/Tkinter-In-Action | 9f3a80bb2cab8dccf78621915f234f80cf79c58d | 2a35ae029c2cfabb53adee8dae5fd0a7c6db817f | refs/heads/master | 2022-03-02T16:25:26.146299 | 2019-10-07T06:36:41 | 2019-10-07T06:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | #!/usr/bin/python3
import tkinter as tk
class TextFrame(tk.Frame):
def __init__(self, parent):
super().__init__()
parent.title('Text Entry Example')
parent.geometry("300x100")
panel = tk.Frame()
basicLabel = tk.Label(panel, text="Basic Control:").grid(row=0, column=0, sticky=tk.W)
basicText = tk.Entry(panel, bg="white")
basicText.grid(row=0, column=1, sticky=tk.W+tk.E)
basicText.insert(0, "I've entered some text!")
pwdLabel = tk.Label(panel, text="Password:",).grid(row=1, column=0, sticky=tk.W)
pwdText = tk.Entry(panel, bg="white", show="*")
pwdText.grid(row=1, column=1, sticky=tk.W+tk.E)
pwdText.insert(0, "password")
panel.pack(fill=tk.BOTH, expand=1)
def main():
app = tk.Tk()
TextFrame(app)
app.mainloop()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | ra2003.noreply@github.com |
21e477ad9b77ff873b0306759123da5fac5fd96d | 3a6d382503e11753dd81b291145847a2eabb8ec6 | /experimental/dsmith/lab/autotest/opencl.py | 7607641290ba1f03b1ffb994ec921ded7b2218a1 | [] | no_license | QuXing9/phd | 7e6f107c20e0b3b1de2b25eb99e0b640a4a0bfcf | 58ba53b6d78515ed555e40527f6923e28941cc19 | refs/heads/master | 2022-02-27T03:29:05.126378 | 2019-10-22T02:46:57 | 2019-10-22T02:46:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | #!/usr/bin/env python3
import json
import logging
import sys
from pathlib import Path
from typing import List
import autotest
import cldrive
from dsmith.opencl import clsmith
from labm8 import crypto
class OpenCLTestcase(object):
def __init__(self, path: Path):
self.path = path
@property
def src(self):
with open(self.path) as infile:
return infile.read()
def __repr__(self):
return self.src
class CLSmithGenerator(autotest.Generator):
def __init__(self, exec: Path):
self.exec = exec
exec_checksum = crypto.sha1_file(self.exec)
logging.debug(f"CLSmith binary '{self.exec}' {exec_checksum}")
def _clsmith(self, path: Path, *flags, attempt_num=1) -> Path:
""" Generate a program using CLSmith """
if attempt_num >= 1000:
raise autotest.GeneratorError(
f"failed to generate a program using CLSmith after {attempt_num} attempts")
flags = ['-o', path, *flags]
logging.debug(" ".join([self.exec] + flags))
_, returncode, stdout, stderr = clsmith.clsmith(
*flags, exec_path=self.exec)
# A non-zero returncode of clsmith implies that no program was
# generated. Try again
if returncode:
logging.debug(f"CLSmith call failed with returncode {returncode}:")
logging.debug(stdout)
self._clsmith(path, *flags, attempt_num=attempt_num + 1)
return path
def next_batch(self, batch_size: int) -> List[OpenCLTestcase]:
outbox = []
for i in range(batch_size):
generated_kernel = self._clsmith(f"clsmith-{i}.cl")
outbox.append(OpenCLTestcase(generated_kernel))
return outbox
class DeviceUnderTest(object):
def __init__(self, platform: str, device: str, flags: List[str]):
self.device = device
self.platform = platform
self.flags = flags
self.env = cldrive.make_env(self.platform, self.device)
self.ids = self.env.ids()
def run(self, testcase: autotest.testcase_t) -> autotest.output_t:
runtime, returncode, stdout, stderr = clsmith.cl_launcher(
testcase.path, *self.ids, *self.flags)
print(runtime)
print(returncode)
print(stdout[:200])
print(stderr[:200])
class StaticAnalyzer(object):
def __init__(self):
pass
def is_valid(self, testcase: autotest.testcase_t) -> bool:
pass
class DynamicAnalyzer(object):
def __init__(self):
pass
def is_valid(self, testcase: autotest.testcase_t,
duts: List[autotest.DeviceUnderTest],
outputs: List[autotest.output_t]) -> bool:
pass
class Reducer(object):
def __init__(self):
pass
def reduce(self, testcase: autotest.testcase_t,
dut: autotest.DeviceUnderTest) -> autotest.output_t:
pass
def main(args):
assert len(args) == 2
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.DEBUG)
with open(args[0]) as infile:
json_config = json.loads(infile.read())
logging.debug(f"parsed config file '{args[0]}'")
num_batches = int(args[1])
generator = CLSmithGenerator(clsmith.exec_path)
preflight_checks = [
StaticAnalyzer(**x) for x in json_config["preflight_checks"]]
duts = [DeviceUnderTest(**x) for x in json_config["duts"]]
comparator = autotest.Comparator(**json_config["comparator"])
postflight_checks = [
DynamicAnalyzer(**x) for x in json_config["postflight_checks"]]
reducer = Reducer(**json_config["reducer"])
autotest.autotest(num_batches, generator, preflight_checks, duts,
comparator, postflight_checks, reducer)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"chrisc.101@gmail.com"
] | chrisc.101@gmail.com |
e82d51daf8991cd5a31f5ce012cfc827ab74503f | a829617f9ad158df80a569dd02a99c53639fa2c6 | /test/hep/cut2.py | 889f316c9b7b192c2057c2917c3f6a0e06e5ca90 | [] | no_license | alexhsamuel/pyhep | 6db5edd03522553c54c8745a0e7fe98d96d2b7ae | c685756e9065a230e2e84c311a1c89239c5d94de | refs/heads/master | 2021-01-10T14:24:08.648081 | 2015-10-22T13:18:50 | 2015-10-22T13:18:50 | 44,745,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | #-----------------------------------------------------------------------
# imports
#-----------------------------------------------------------------------
import cPickle
import hep.cuts
from hep.draw import Line
import hep.hist
from hep.hist import ezplot
from numarray import array, Float32
from random import normalvariate, random
import os
#-----------------------------------------------------------------------
# test
#-----------------------------------------------------------------------
if not os.path.isfile("cut2.pickle"):
sig_values = array(shape=(1000, 4), type=Float32)
for i in range(sig_values.shape[0]):
sig_values[i, 0] = normalvariate(0, 1)
sig_values[i, 1] = normalvariate(0, 1)
sig_values[i, 2] = normalvariate(0, 1)
sig_values[i, 3] = normalvariate(0, 1)
bkg_values = array(shape=(5000, 4), type=Float32)
for i in range(bkg_values.shape[0]):
bkg_values[i, 0] = normalvariate( 0, 2)
bkg_values[i, 1] = normalvariate( 1, 1)
bkg_values[i, 2] = normalvariate(-1, 1)
bkg_values[i, 3] = normalvariate(-1, 1)
cPickle.dump((sig_values, bkg_values), file("cut2.pickle", "w"), 1)
else:
sig_values, bkg_values = cPickle.load(file("cut2.pickle"))
cuts = [
(0, "<", random()),
(0, ">", random()),
(1, "<", random()),
(2, ">", random()),
(3, ">", random()),
]
fom_fn = hep.cuts.s_squared_over_s_plus_b
cuts, fom = hep.cuts.iterativeOptimize(sig_values, bkg_values, cuts, fom_fn)
fom_curves = hep.cuts.makeFOMCurves(sig_values, bkg_values, cuts, fom_fn)
gallery = ezplot.Gallery(3 * (1, ), border=0.03)
print "optimal cuts:"
for (var_index, cut_sense, cut_value), fom_curve in zip(cuts, fom_curves):
print " variable #%d %s %f" % (var_index, cut_sense, cut_value)
sig_hist = hep.hist.Histogram1D(120, (-5.0, 5.0), name="signal")
map(sig_hist.accumulate, sig_values[:, var_index])
bkg_hist = hep.hist.Histogram1D(120, (-5.0, 5.0), name="background")
map(bkg_hist.accumulate, bkg_values[:, var_index])
fom_curve.name = "cut FoM after other cuts"
plot = ezplot.curves1D(sig_hist, bkg_hist, fom_curve)
range = hep.hist.function.getRange(fom_curve, sig_hist.axis.range)
plot.annotations.append(Line(
((cut_value, 0), (cut_value, range[1]))))
gallery << plot
print "figure of merit =", fom
gallery.toPSFile("cut2.ps")
| [
"alex@alexsamuel.net"
] | alex@alexsamuel.net |
34ca8c73e475540d5a16ca14550bb83107603dd5 | 7eb67443c603719458f67f7ea369b55c6854bccb | /assignment7/mapper.py | 1d63425582c2c6ca752f28a5d700538581efaf32 | [
"MIT"
] | permissive | IITDU-BSSE06/ads-demystifying-the-logs-Arafat123-iit | bf85971cb35543734118d744a419baf8b722f886 | 2c7b9132eab2161162fc7ac0e9761990ffbaea8f | refs/heads/master | 2021-08-07T12:51:11.703193 | 2017-11-08T07:04:34 | 2017-11-08T07:04:34 | 109,237,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/python
import urlparse
import sys
for line in sys.stdin:
data = line.strip().split(" ")
if len(data) == 10:
a0, a1 ,a2 ,a3 ,a4 ,a5 ,a6 ,a7 ,a8 ,a9= data
# path = urlparse.urlparse(a6).path
print "{0}\t{1}".format(a5,a5)
| [
"noreply@github.com"
] | IITDU-BSSE06.noreply@github.com |
887284504418d7fb5752e642ca499b54970ebb22 | 82352d4737a87f62328aa061853d544820f417f7 | /utils/sample_statistics.py | 4605586f1da692068bb8f9b2fa3487316847c40c | [] | no_license | zuiyueyin/python-learning-notes | 054162c25db881f0f2133088099b5ca217143952 | f45f0879cc70eb59de67a270a6ec8dbb2cf8e742 | refs/heads/master | 2023-05-29T21:50:04.702395 | 2020-06-09T04:00:19 | 2020-06-09T04:00:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | # -*-coding: utf-8 -*-
"""
@Project: python-learning-notes
@File : sample_statistics.py
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2019-07-13 15:09:33
"""
from utils import file_processing, plot_utils
import numpy as np
import pandas as pd
from modules.pandas_json import pandas_tools
def count_data_info(data_list, _print=True, plot=True, title="data count info", line_names="data"):
'''
statis sample nums
print(pd.value_counts(label_list))
:param data_list:
:return: label_set : label set
label_count: label nums
'''
data_set = list(set(data_list))
data_set.sort()
count_list = []
for s in data_set:
nums = data_list.count(s)
count_list.append(nums)
print("mean count :{}/{}={}".format(len(data_list), len(data_set), len(data_list) / len(data_set)))
if plot:
plot_utils.plot_bar(x_data=data_set, y_data=count_list, title=title, xlabel="ID", ylabel="COUNT")
# plot_utils.plot_multi_line([data_set], [count_list], [line_names], title=title, xlabel="ID", ylabel="COUNT")
return count_list, data_set
def count_data_dict(data_list):
'''
statis sample nums
print(pd.value_counts(label_list))
:param data_list:
:return: label_set : label set
label_count: label nums
'''
data_set = list(set(data_list))
data_set.sort()
count_dict = []
for s in data_set:
nums = data_list.count(s)
count_dict[s] = nums
return count_dict
def count_data_info_pd(data_list, _print=True, plot=True, title="data count info", line_names="data"):
p = pd.value_counts(data_list, sort=False)
if _print:
print(p)
data_set = []
count_list = []
for key, count in p.items():
# count=p[key]
data_set.append(key)
count_list.append(count)
print("mean count :{}/{}={}".format(len(data_list), len(data_set), len(data_list) / len(data_set)))
if plot:
data_range = list(range(0, len(data_set)))
# data_range=data_set
plot_utils.plot_bar(x_data=data_range, y_data=count_list, title=title, xlabel="ID", ylabel="COUNT")
# plot_utils.plot_multi_line([data_set], [count_list], [line_names], title=title, xlabel="ID", ylabel="COUNT")
# return count_list, data_set
return p
if __name__ == "__main__":
# image_dir = "/media/dm/dm2/project/dataset/face_recognition/NVR/facebank/NVR_3_20190605_1005_VAL"
# dataset="/media/dm/dm2/project/dataset/face_recognition/CASIA-FaceV5/"
# image_dir = dataset+"CASIA-Faces"
# dataset="/media/dm/dm2/project/dataset/face_recognition/celebs_add_movies/"
# image_dir = dataset+"Asian_Faces"
image_dir = '/media/dm/dm1/project/dataset/face_recognition/X2T/X2T_Face233/val'
# image_dir = '/media/dm/dm1/project/dataset/face_recognition/NVR/face/NVR1/trainval'
image_list, label_list = file_processing.get_files_labels(image_dir)
name_table = list(set(label_list))
label_list = file_processing.encode_label(name_list=label_list, name_table=name_table)
label_list = [int(l) for l in label_list]
label_list.sort()
# count = Counter(label_list)
# count = label_list.count()
# print(count)
pd_data = count_data_info_pd(label_list)
filename = "my_test2.csv"
pd = pandas_tools.construct_pd(index=None, columns_name=["A"], content=pd_data, filename=filename)
print(pd)
| [
"panjinquan@dm-ai.cn"
] | panjinquan@dm-ai.cn |
feaa2e232a12eb04ca4a8f06a89234c1a1fb0dbc | e34ba843cf682892462aec8b477d4a708968286d | /dlgo/reinforce/experience_test.py | 1d6a67e69a72733b4a649f9e599cc802f65cb0df | [] | no_license | mecha2k/mygo | e088e4abff292aa225dd22655ef9032cd89ddabc | db77aeade0ef25b9cd8d0097aff7dd7cc7d78ef6 | refs/heads/master | 2023-01-21T21:37:57.930762 | 2020-11-26T14:02:33 | 2020-11-26T14:02:33 | 303,343,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | import unittest
import numpy as np
from dlgo.reinforce import experience
class ExperienceTest(unittest.TestCase):
def test_combine_experience(self):
collector1 = experience.ExperienceCollector()
collector1.begin_episode()
collector1.record_decision(
state=np.array(
[
[1, 1],
[1, 1],
]
),
action=1,
)
collector1.record_decision(
state=np.array(
[
[2, 2],
[2, 2],
]
),
action=2,
)
collector1.complete_episode(reward=1)
collector1.begin_episode()
collector1.record_decision(
state=np.array(
[
[3, 3],
[3, 3],
]
),
action=3,
)
collector1.complete_episode(reward=2)
collector2 = experience.ExperienceCollector()
collector2.begin_episode()
collector2.record_decision(
state=np.array(
[
[4, 4],
[4, 4],
]
),
action=4,
)
collector2.complete_episode(reward=3)
combined = experience.combine_experience([collector1, collector2])
# 4 decisions. Each state is a 2x2 matrix
self.assertEqual((4, 2, 2), combined.states.shape)
self.assertEqual((4,), combined.actions.shape)
self.assertEqual([1, 2, 3, 4], list(combined.actions))
self.assertEqual((4,), combined.rewards.shape)
self.assertEqual([1, 1, 2, 3], list(combined.rewards))
| [
"mecha2k@naver.com"
] | mecha2k@naver.com |
37e9ec85ea551c5a0f77ba61a24f955da77d0426 | 6b3b61d2c5ba4998e7390c76be87be569c713f7a | /Exercicio_022/desafio_022.py | 7091a3a066c9a597d33f70c27ebb69bd37711884 | [] | no_license | loc-dev/CursoEmVideo-Python-Exercicios | 22a8b4621eb4bd95ddfca2553693eccca4a0786e | 9bceec567e653c1fbaa01b9668cd0e7a828e53a9 | refs/heads/master | 2022-12-02T18:46:18.506684 | 2020-08-22T20:50:15 | 2020-08-22T20:50:15 | 256,840,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # Desafio 022 - Referente aula Fase09
# Crie um programa que leia o nome completo de uma pessoa
# e mostre:
# - O nome com todas as letras maiúsculas e minúsculas.
# - Quantas letras no total (sem considerar espaços).
# - Quantas letras tem o primeiro nome.
nome = input("Digite o seu nome completo: ")
print('')
print("Analisando o seu nome...")
print("Seu nome em letras maiúsculas é: {}".format(nome.upper()))
print("Seu nome em letras minúsculas é: {}".format(nome.lower()))
print("Seu nome tem ao todo {} letras".format(len(nome.replace(" ", ""))))
print("Seu primeiro nome é {} e ele tem {} letras".format(nome.split()[0], len(nome.split()[0])))
| [
"leonardoc.developer@gmail.com"
] | leonardoc.developer@gmail.com |
60994e422d1fdc9199f7d90ae6cd7856ef8e3102 | c1f60f28cbd74a639dc89b22518ae33765267af4 | /ravem/indico_ravem/__init__.py | cbc2ba2885adce70b301cb3a688236fbcc23aee4 | [
"MIT"
] | permissive | DalavanCloud/indico-plugins-cern | b0a97dbbd7fb4dc272977b121ec92931ee316ad7 | bb67d2fb9e3d24faeeff2b78a5e9bcff52ac5f26 | refs/heads/master | 2020-04-22T08:58:17.025951 | 2019-01-21T14:25:08 | 2019-01-21T14:25:54 | 170,255,750 | 1 | 0 | null | 2019-02-12T05:08:40 | 2019-02-12T05:08:40 | null | UTF-8 | Python | false | false | 376 | py | # This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2018 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
from indico.util.i18n import make_bound_gettext
_ = make_bound_gettext('ravem')
| [
"adrian.moennich@cern.ch"
] | adrian.moennich@cern.ch |
d37b2518c4bf1cc6aac3deab5e80bfaa2e4e17a7 | f897f0e594a9157b9e56cee8b24f68bb9c9221f8 | /aml_workspace/src/AML/aml_io/src/aml_io/tf_io.py | 7edd96ff4f34235b45528fe63e5db611b68b9503 | [] | no_license | HDClark94/Honeycomb | b1056a0f8b41312d0a87d5cd80803a2baf613bae | 96899e6b362358d0ce1e3671cbc77f30856c80ac | refs/heads/master | 2020-03-29T19:48:29.330790 | 2018-10-22T07:50:01 | 2018-10-22T07:50:01 | 150,281,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import tensorflow as tf
def load_tf_check_point(session, filename):
saver = tf.train.Saver()
saver.restore(session, filename)
print("Model restored.")
def save_tf_check_point(session, filename):
saver = tf.train.Saver()
save_path = saver.save(session, filename)
print("tf checkpoint saved in file: %s" % save_path) | [
"harrydclark91@gmail.com"
] | harrydclark91@gmail.com |
202679d52735c93032e4fa601cba695539c3a1ba | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /AaSXX4SKNdZ7mgqK7_14.py | 83c2bdb8a06c274f5040c0a84f78e2868910eb94 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | """
Check the principles of minimalist code in the [intro to the first
challenge](https://edabit.com/challenge/2XLjgZhmACph76Pkr).
In the **Code** tab you will find a code that is missing a single character in
order to pass the tests. However, your goal is to submit a function as
**minimalist** as possible. Use the tips in the tips section below.
Write a function that returns the **first truthy argument** passed to the
function. If all arguments are falsy, return the string `"not found"`. The
function will be called with a **minimum of one** and a **maximum of four**
arguments: `a`, `b`, `c`, `d`.
### Tips
The operator `or` can be used to assign or return the first truthy value among
two or more elements. If no truthy value is found, the last element will be
returned.
For example, the code:
def one_of_these(a, b, c):
return a if a else b if b else c
Can be simplified to:
def one_of_these(a, b, c):
return a or b or c
### Bonus
Once a truthy value is found, the rest of the elements will not be checked.
This can be used to define a sort of default value that will be returned if
all of the previous elements happen to be false or empty:
txt1 = ""
txt2 = "Edabit"
txt1 or "Empty string" ➞ "Empty string"
txt2 or "Empty string" ➞ "Edabit"
### Notes
* This is an open series: there isn't a definite list of features for the challenges. Please, do not hesitate to leave your **suggestions** in the **Comments**.
* _ **Readability**_ is indeed a subjective concept. **Let's discuss it!** Feel free to leave your opinion in the **Comments**.
* You can find all the exercises in this series [over here](https://edabit.com/collection/8F3LA2Mwrf5bp7kse).
"""
def first_one(a, b=None ,c=None, d=None):
return a or b or c or d or 'not found'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
8ca9448dc3b945debb5c81ca77e931b7ffa80336 | 683d81b0d0ac10e3782b42f1ea6007124d72a663 | /1. Problems/c. Array/a. 1D 2 - Sequence - Split Array into Conscutive Subsequences.py | 02ae577f943539e02833f37b9170ed94defdd809 | [] | no_license | valleyceo/code_journal | 4b5e6fcbd792fedc639f773ca2bbf6725a9b9146 | 0191a6623e7a467c2c0070c4545358301a5e42ba | refs/heads/master | 2022-09-16T17:47:55.343712 | 2022-09-03T23:46:38 | 2022-09-03T23:46:38 | 129,997,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | # LC 659. Split Array into Consecutive Subsequences
'''
You are given an integer array nums that is sorted in non-decreasing order.
Determine if it is possible to split nums into one or more subsequences such that both of the following conditions are true:
Each subsequence is a consecutive increasing sequence (i.e. each integer is exactly one more than the previous integer).
All subsequences have a length of 3 or more.
Return true if you can split nums according to the above conditions, or false otherwise.
A subsequence of an array is a new array that is formed from the original array by deleting some (can be none) of the elements without disturbing the relative positions of the remaining elements. (i.e., [1,3,5] is a subsequence of [1,2,3,4,5] while [1,3,2] is not).
Example 1:
Input: nums = [1,2,3,3,4,5]
Output: true
Explanation: nums can be split into the following subsequences:
[1,2,3,3,4,5] --> 1, 2, 3
[1,2,3,3,4,5] --> 3, 4, 5
Example 2:
Input: nums = [1,2,3,3,4,4,5,5]
Output: true
Explanation: nums can be split into the following subsequences:
[1,2,3,3,4,4,5,5] --> 1, 2, 3, 4, 5
[1,2,3,3,4,4,5,5] --> 3, 4, 5
'''
class Solution:
def isPossible(self, nums: List[int]) -> bool:
return self.optimizedSolution(nums)
# O(n) time | O(n) space
def optimizedSolution(self, nums: List[int]) -> bool:
left = Counter(nums)
end_pointer = Counter()
for n in nums:
if not left[n]:
continue
left[n] -= 1
if end_pointer[n - 1] > 0:
end_pointer[n - 1] -= 1
end_pointer[n] += 1
elif left[n + 1] and left[n + 2]:
left[n + 1] -= 1
left[n + 2] -= 1
end_pointer[n + 2] += 1
else:
return False
return True
"""
Insight:
* You cannot check for more maximum sequence (ex: [1,2,3,3,4,4,5] -> [1,2,3,4,5], [3,4] is wrong)
- You still can solve greedily:
- Create a counter and a last seq poimarkernter.
- For each number sequence, if there is a prior sequence then add it
- If not, then create a new sequence (check if beginning sequence is larger than 2)
- Why do you need a prev seq marker?
- Because seq needs to stop at 3 and see if new array is formed (ex. [1, 2, 3, 3, 4, 5]).
- Checking new sequence comes first, and checking prev marker comes first on next iteration
"""
| [
"ericjkim9@gmail.com"
] | ericjkim9@gmail.com |
7192fd53903767f99c7e0de95b447794230dc45d | 658849ce4adc682e403631df460d886c21c55146 | /pyQt/02_pdfText.py | 19a44069fc908e8973ddfdbe273a462d091b29ad | [] | no_license | gsrr/Python | eec6b6d1189a5a08ab913925cc5428e9cac0e4ce | d412a7a40bc7875d2fce58311f099945698569f5 | refs/heads/master | 2023-02-11T12:36:49.346289 | 2023-02-01T17:28:52 | 2023-02-01T17:28:52 | 40,101,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from PyQt4.QtGui import *
import sys
app = QApplication(sys.argv)
text_file_path = open('02_sample.txt').read()
doc = QTextDocument(text_file_path)
printer = QPrinter(QPrinter.HighResolution)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName('02_sample.pdf')
doc.print_(printer) | [
"jerrycheng1128@gmail.com"
] | jerrycheng1128@gmail.com |
10b44701f84e0e8b55cff95887fbae486fc399ff | a6fae33cdf3d3cb0b0d458c2825a8d8cc010cd25 | /l3/z3/.history/moves_manager_20200522163947.py | 86bb3edd1e5a58526cd512533dea3531b6a94f81 | [] | no_license | Qabrix/optimization_amh | 12aab7c7980b38812ec38b7e494e82452a4176b4 | 6a4f5b897a4bef25f6e2acf535ba20ace7351689 | refs/heads/main | 2022-12-28T10:57:00.064130 | 2020-10-17T22:57:27 | 2020-10-17T22:57:27 | 304,983,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | from random import choice, randint
class MovesManager():
def __init__(self, grid=[], start_pos=[], n=0 , m=0):
self.n = n
self.m = m
self.grid = grid
self.start_pos = start_pos
self.possible_moves = ['U', 'D', 'L', 'R']
def validate_move(self, step, pos=start_pos, grid=self.grid):
if step == 'U' and grid[pos[0]-1][pos[1]] != '1':
return True
elif step == 'D' and grid[pos[0]+1][pos[1]] != '1':
return True
elif step == 'L' and grid[pos[0]][pos[1]-1] != '1':
return True
elif step == 'R' and grid[pos[0]][pos[1]+1] != '1':
return True
else:
return False
def move(self, step, grid):
if step == 'U':
grid[0] += -1
elif step == 'D':
grid[0] += 1
elif step == 'L':
grid[1] += -1
elif step == 'R':
grid[1] += 1
def explore(self, pos, path, grid):
new_path = []
for step in path:
if self.validate_move(step, pos, grid):
self.move(step, pos)
new_path += [step]
if self.check_for_exit(pos, grid):
return new_path, True
return new_path, False
def random_moves(self, pos, grid, n, m, step_limit):
path = []
step = ''
while len(path) <= step_limit:
step = choice(self.possible_moves)
while not self.validate_move(step, pos, grid):
step = choice(self.possible_moves)
for _ in range(randint(1, min(n, m))):
if not self.validate_move(step, pos, grid):
break
self.move(step, pos)
path += [step]
if self.check_for_exit(pos, grid):
return path
return path
def check_for_exit(self, pos, grid):
return grid[pos[0]][pos[1]] == '8' or (
grid[pos[0]][pos[1] + 1] == '8' or
grid[pos[0] + 1][pos[1]] == '8' or
grid[pos[0]][pos[1] - 1] == '8' or
grid[pos[0] - 1][pos[1]] == '8'
)
| [
"kubabalicki@gmail.com"
] | kubabalicki@gmail.com |
49cff15efb1486afccf82fc66324d9b682b7fe42 | 1a2828536c57242cd72e96ed887dfea48f250715 | /divide_dataset.py | ca7d0a9cba3b47d3402e698566ee56132108c56c | [] | no_license | Frostmoune/FaseSR | 2f5f4dd421b2b6e5315d4ee55977015198dff5ff | 137837e2c79957f70f4c2546e27733290506459e | refs/heads/master | 2020-08-10T18:40:07.722703 | 2019-10-11T09:28:30 | 2019-10-11T09:28:30 | 214,397,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | from models import networks
from data import create_dataloader, create_dataset
import argparse
import torch
from sklearn.cluster import MiniBatchKMeans
import os
import numpy as np
import shutil
import pickle
if __name__ == '__main__':
opt = {
'gpu_ids': [0, 1, 2, 3],
'network_F': {
'mode': 'Sphere20a',
'path': '/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/pretrained/sphere20a_20171020.pth'
},
'dataset': {
'name': 'CelebA',
'mode': 'LRHR',
'subset_file': None,
'phase': 'train',
'data_type': 'img',
'scale': 4,
'HR_size': 96,
'use_shuffle': True,
'use_flip': False,
'use_rot': False,
'batch_size': 40,
'n_workers': 4,
'color': False
}
}
parser = argparse.ArgumentParser()
parser.add_argument('--HR_Root', type = str, default = "/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/FFHQ/HR",
help = 'Path to val HR.')
parser.add_argument('--LR_Root', type = str, default = "/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/FFHQ/LR",
help = 'Path to val LR.')
parser.add_argument('--Clusters', type = int, default = 3, help = 'Number of clusters')
parser.add_argument('--Train', type = int, default = 0, help = 'Train or not')
parser.add_argument('--Model_Path', type = str, default = "/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/FFHQ/cluster.model",
help = 'Path to Cluster model')
args = parser.parse_args()
Root = '/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/FFHQ'
opt['dataset']['dataroot_LR'] = args.LR_Root
opt['dataset']['dataroot_HR'] = args.HR_Root
test_set = create_dataset(opt['dataset'])
test_loader = create_dataloader(test_set, opt['dataset'])
device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')
sphere = networks.define_F(opt).to(device)
for i in range(args.Clusters):
try:
os.makedirs(args.HR_Root + str(i))
os.makedirs(args.LR_Root + str(i))
except:
pass
vectors = None
LR_paths = []
HR_paths = []
for i, data in enumerate(test_loader):
HR = data['HR'].to(device)
HR_vec = sphere(HR).to('cpu').numpy()
if vectors is None:
vectors = HR_vec
else:
vectors = np.concatenate((vectors, HR_vec), axis = 0)
LR_paths += data['LR_path']
HR_paths += data['HR_path']
print("Sphere %d batch"%i)
print(vectors.shape)
print("Sphere Done ...")
mean = np.mean(vectors, axis = 0, keepdims = True)
std = np.std(vectors, axis = 0, keepdims = True)
vectors = (vectors - mean) / std
if args.Train:
model = MiniBatchKMeans(n_clusters = args.Clusters, batch_size = 2000, random_state = 0, max_iter = 5000)
for i in range(0, vectors.shape[0], 2000):
model.partial_fit(vectors[i:i+2000, :])
with open(args.Model_Path, 'wb') as f:
pickle.dump(model, f)
else:
with open(args.Model_Path, 'rb') as f:
model = pickle.load(f)
labels = model.predict(vectors)
print("Cluster Done ...")
for i, label in enumerate(labels):
print(i)
shutil.copy(LR_paths[i], args.LR_Root + str(label))
shutil.copy(HR_paths[i], args.HR_Root + str(label))
print("Done") | [
"810343087@qq.com"
] | 810343087@qq.com |
1f0d272239f6f020cfd64030a8469292477a9b34 | 1800155dcdb48bf956fa423858a8cc20ed27e6cb | /game-of-life.py | 97937b7282f8adfcc28d8545dbd7e53f5206f1d9 | [] | no_license | gitprouser/LeetCode-3 | 1cc2d1dbbf439af4b3768da388dafd514cc5432b | 530ea79f0377e1fc3fbfb5c5cfe7768159144e57 | refs/heads/master | 2021-06-06T16:30:14.795093 | 2016-08-22T21:40:01 | 2016-08-22T21:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | """
According to the Wikipedia's article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970."
Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article):
Any live cell with fewer than two live neighbors dies, as if caused by under-population.
Any live cell with two or three live neighbors lives on to the next generation.
Any live cell with more than three live neighbors dies, as if by over-population..
Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
Write a function to compute the next state (after one update) of the board given its current state.
Follow up:
Could you solve it in-place? Remember that the board needs to be updated at the same time: You cannot update some cells first and then use their updated values to update other cells.
In this question, we represent the board using a 2D array. In principle, the board is infinite, which would cause problems when the active area encroaches the border of the array. How would you address these problems?
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
"""
| [
"tohaowu@gmail.com"
] | tohaowu@gmail.com |
416bc0ae6a2a36ec1c764d24e4f594644b0a7bec | 13800b7827598e76428a335559b7bf11867ec2f0 | /examples/py/binance-fetch-all-trades.py | ac56fca50cf98cd41c5da62c1b4310aafb80abd7 | [
"MIT"
] | permissive | ccxt/ccxt | b40a0466f5c430a3c0c6026552ae697aa80ba6c6 | e4065f6a490e6fc4dd7a72b375428b2faa570668 | refs/heads/master | 2023-09-04T03:41:29.787733 | 2023-09-03T19:25:57 | 2023-09-03T19:25:57 | 91,253,698 | 30,798 | 8,190 | MIT | 2023-09-14T21:59:09 | 2017-05-14T15:41:56 | Python | UTF-8 | Python | false | false | 1,686 | py | # -*- coding: utf-8 -*-
import os
import sys
import csv
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.binance()
markets = exchange.load_markets()
symbol = 'ETH/BTC'
market = exchange.market(symbol)
one_hour = 3600 * 1000
since = exchange.parse8601('2018-12-12T00:00:00')
now = exchange.milliseconds()
end = exchange.parse8601(exchange.ymd(now) + 'T00:00:00')
previous_trade_id = None
filename = exchange.id + '_' + market['id'] + '.csv'
with open(filename, mode="w") as csv_f:
csv_writer = csv.DictWriter(csv_f, delimiter=",", fieldnames=["timestamp", "size", "price", "side"])
csv_writer.writeheader()
while since < end:
try:
trades = exchange.fetch_trades(symbol, since)
print(exchange.iso8601(since), len(trades), 'trades')
if len(trades):
last_trade = trades[-1]
if previous_trade_id != last_trade['id']:
since = last_trade['timestamp']
previous_trade_id = last_trade['id']
for trade in trades:
csv_writer.writerow({
'timestamp': trade['timestamp'],
'size': trade['amount'],
'price': trade['price'],
'side': trade['side'],
})
else:
since += one_hour
else:
since += one_hour
except ccxt.NetworkError as e:
print(type(e).__name__, str(e))
exchange.sleep(60000)
| [
"igor.kroitor@gmail.com"
] | igor.kroitor@gmail.com |
a7177ae8e232a76fe17c2554369f3bd1be1b0acb | 4951103d1a112fbb90059a977582f0642546c4cb | /mitmproxy/__init__.py | 9697de8780ca420e99c33da5d4d5716bcbe6ce7d | [
"MIT"
] | permissive | takeratta/mitmproxy | 9d114f221e99e7c522fd8bdd51561753c974ae6e | 569d275d763f499cce9673fcf118dcc8d59d2eeb | refs/heads/master | 2022-01-22T05:23:46.978493 | 2017-10-22T16:06:44 | 2017-10-22T16:06:44 | 108,022,306 | 0 | 0 | MIT | 2019-07-29T09:05:59 | 2017-10-23T18:26:23 | Python | UTF-8 | Python | false | false | 137 | py | # https://github.com/mitmproxy/mitmproxy/issues/1809
# import script here so that pyinstaller registers it.
from . import script # noqa
| [
"git@maximilianhils.com"
] | git@maximilianhils.com |
ded616fccd10707347213e409fa9449335737283 | 3f576989246ddadc3ae9c9d48007a4866245a5f2 | /music_controller/api/urls.py | ae82696637729eec3a595c489018492417018065 | [] | no_license | raghavendra-musubi/django-react-house-party-app | d536fbc92508656685e36974a1cec7dca6d4b07d | b90780f993dbd90a66f6b83564854a8e6dbae3cc | refs/heads/main | 2023-03-05T14:37:59.809471 | 2021-02-17T21:52:10 | 2021-02-17T21:52:10 | 339,756,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from django.urls import path
from .views import RoomView, CreateRoomView
urlpatterns = [
path('room/',RoomView.as_view()),
path('create-room/',CreateRoomView.as_view())
]
| [
"raghavendra@techis.io"
] | raghavendra@techis.io |
c374f75d167d32f720369055aac7da3cfd415292 | d5af5459d0a68d8934219cdd516a23d73c7c52fb | /examples/08 functions/search-with-fns.py | 39a2639219179d057192c5701011f4efaf2e778a | [] | no_license | flathunt/pylearn | 1e5b147924dca792eb1cddbcbee1b8da0fc3d055 | 006f37d67343a0288e7efda359ed9454939ec25e | refs/heads/master | 2022-11-23T23:31:32.016146 | 2022-11-17T08:20:57 | 2022-11-17T08:20:57 | 146,803,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | #!/usr/local/bin/python
import re
import sys
UNDERLINE = '='
def get_arguments():
# check for correct number of arguments...
if len(sys.argv) != 3:
print("Usage: review2-search.py search_terms_file data_file", file=sys.stderr)
sys.exit(1)
# return...
terms_file, data_file = sys.argv[1:3]
return terms_file, data_file
def build_pattern(terms_file):
# Read the file of search terms and build a (RegEx) pattern...
terms_list = []
with open(terms_file) as search_words:
for entry in search_words:
terms_list.append(entry.rstrip())
pattern = '|'. join(terms_list)
# print('Pattern:', pattern, end='\n\n') # for debugging purposes
return pattern
def search_file(pattern, data_file):
# Check the data file for matches...
with open(data_file) as data:
for ln, line in enumerate(data, start=1):
m = re.search(pattern, line)
if m:
print("{:04d} {:s}".format(ln, line), end='')
print(' ' * (5 + m.start()) + UNDERLINE * (m.end() - m.start()))
# =====================================================================
# main processing: Search a file for terms provided by another file.
# Usage: search-with-fns.py search_terms_file file_to_search
# =====================================================================
def main():
terms_file, data_file = get_arguments()
pattern = build_pattern(terms_file)
search_file(pattern, data_file)
main()
| [
"porkpie@gmail.com"
] | porkpie@gmail.com |
3b3b79f20e26193d04e460d59f4bf45b4d13b244 | 5453dee97da45be8e316150a65d3308c408dd3c7 | /backend/satchel_wallet_24918/urls.py | 8241ee333f54e43e7463188a70a915a993844224 | [] | no_license | crowdbotics-apps/satchel-wallet-24918 | 22d4599b199e99297a3ccf2236917b951db9fe38 | 524e2432f97334bc25760aa3c18e464f972998a0 | refs/heads/master | 2023-03-13T07:56:43.770159 | 2021-03-09T01:11:51 | 2021-03-09T01:11:51 | 345,825,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | """satchel_wallet_24918 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Satchel Wallet"
admin.site.site_title = "Satchel Wallet Admin Portal"
admin.site.index_title = "Satchel Wallet Admin"
# swagger
api_info = openapi.Info(
title="Satchel Wallet API",
default_version="v1",
description="API documentation for Satchel Wallet App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
80d41ea8cf356fba8438c715a87477396443a76b | 9d81d0484cd0954abb1a83079904e65c850f88e6 | /plugins/tmp/client.py | 1d84a120b5721fe9d0e227077192d3c262789c02 | [
"MIT"
] | permissive | GPrathap/OpenBCIPython | 802db7e1591769e7f3e3ca1f347bf78083d7579f | 0f5be167fb09d31c15885003eeafec8cdc08dbfa | refs/heads/master | 2021-09-04T01:12:04.106419 | 2018-01-13T21:52:49 | 2018-01-13T21:52:49 | 82,319,921 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | import json
import cPickle as pickle
import socket
import sys
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('localhost', 5678)
message = [45,67,89]
try:
data_string = json.dumps(message)
# Send data
print >>sys.stderr, 'sending "%s"' % data_string
sent = sock.sendto(data_string, server_address)
# # Receive response
# print >>sys.stderr, 'waiting to receive'
# data, server = sock.recv(4096)
# print >>sys.stderr, 'received "%s"' % data
finally:
print >>sys.stderr, 'closing socket'
sock.close() | [
"ggeesara@gmail.com"
] | ggeesara@gmail.com |
1a9bb4a9ec638420d1a783e974812f7852f907a5 | 9b96c37db1f61065094d42bc5c8ad6eb3925961b | /level1/touching_keypad.py | 2df7bd27291cf9f98825fbcaf987970825dc0d85 | [] | no_license | Taeheon-Lee/Programmers | a97589498c866c498c1aa9192fdf8eec9f8e31f4 | c38b1c7dc4114c99191b77e5d19af432eaf6177e | refs/heads/master | 2023-07-09T21:10:25.064947 | 2021-08-30T05:17:49 | 2021-08-30T05:17:49 | 394,327,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,851 | py | "카패드 누르기"
# 문제 링크 "https://programmers.co.kr/learn/courses/30/lessons/67256"
dic_loc = {"left":[1,4,7],"right":[3,6,9],"middle":[2,5,8,0]} # 위치 딕셔너리
dic_dis_2 = {1:1,2:0,3:1,4:2,5:1,6:2,7:3,8:2,9:3,0:3,'*':4,'#':4} # 2 키버튼 기준 각 버튼의 거리
dic_dis_5 = {1:2,2:1,3:2,4:1,5:0,6:1,7:2,8:1,9:2,0:2,'*':3,'#':3} # 5 키버튼 기준 각 버튼의 거리
dic_dis_8 = {1:3,2:2,3:3,4:2,5:1,6:2,7:1,8:0,9:1,0:1,'*':2,'#':2} # 8 키버튼 기준 각 버튼의 거리
dic_dis_0 = {1:4,2:3,3:4,4:3,5:2,6:3,7:2,8:1,9:2,0:0,'*':1,'#':1} # 0 키버튼 기준 각 버튼의 거리
def solution(numbers, hand):
answer = '' # 정답 문자열
left_loc = '*' # 왼손 위치 초기화
right_loc = '#' # 오른손 위치 초기화
for key in numbers:
if key in dic_loc["left"]: # 눌러야할 버튼이 왼쪽 위치일 경우
answer += "L"
left_loc = key # 왼손 위치를 해당 키로 이동
elif key in dic_loc["right"]: # 눌러야할 버튼이 오른쪽 위치일 경우
answer += "R"
right_loc = key # 오른손 위치를 해당 키로 이동
else: # 눌러야할 버튼이 중간 위치일 경우
dic = dic_dis_2 if key == 2 else dic_dis_5 if key == 5 else dic_dis_8 if key == 8 else dic_dis_0 # 키에 따라 딕셔너리 선택
if dic[left_loc] < dic[right_loc]: # 왼손 거리값이 오른손 거리값보다 작은 경우
answer += "L"
left_loc = key # 왼손 위치를 해당 키로 이동
elif dic[left_loc] > dic[right_loc]: # 오른손 거리값이 왼손 거리값보다 작은 경우
answer += "R"
right_loc = key # 오른손 위치를 해당 키로 이동
else: # 양손의 거리값이 같은 경우
if hand == "right": # 오른손 잡이일 경우
answer += "R"
right_loc = key # 오른손 위치를 해당 키로 이동
else: # 왼손 잡이일 경우
answer += "L"
left_loc = key # 왼손 위치를 해당 키로 이동
return answer | [
"taeheon714@gmail.com"
] | taeheon714@gmail.com |
9fc68de1b0bedb730f5d41555ddc9324c38376a3 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /sagemaker_write_2/code-repository_create.py | 5da5051aff1f842a5568c68ab8c4d651925c6d51 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/create-code-repository.html
if __name__ == '__main__':
"""
delete-code-repository : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/delete-code-repository.html
describe-code-repository : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/describe-code-repository.html
list-code-repositories : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/list-code-repositories.html
update-code-repository : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/update-code-repository.html
"""
parameter_display_string = """
# code-repository-name : The name of the Git repository. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).
# git-config :
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("sagemaker", "create-code-repository", "code-repository-name", "git-config", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
a4d8767d31d8277c1bad4b172829568c721c2d11 | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/official/modeling/optimization/configs/learning_rate_config.py | 520a0b96141526382f8e29e733505d62273d5a1f | [
"Apache-2.0",
"MIT"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 7,705 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataclasses for learning rate schedule config."""
from typing import List, Optional
import dataclasses
from official.modeling.hyperparams import base_config
@dataclasses.dataclass
class ConstantLrConfig(base_config.Config):
"""Configuration for constant learning rate.
This class is a containers for the constant learning rate decay configs.
Attributes:
name: The name of the learning rate schedule. Defaults to Constant.
learning_rate: A float. The learning rate. Defaults to 0.1.
"""
name: str = 'Constant'
learning_rate: float = 0.1
@dataclasses.dataclass
class StepwiseLrConfig(base_config.Config):
"""Configuration for stepwise learning rate decay.
This class is a container for the piecewise constant learning rate scheduling
configs. It will configure an instance of PiecewiseConstantDecay keras
learning rate schedule.
An example (from keras docs): use a learning rate that's 1.0 for the first
100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps.
```python
boundaries: [100000, 110000]
values: [1.0, 0.5, 0.1]
Attributes:
name: The name of the learning rate schedule. Defaults to PiecewiseConstant.
boundaries: A list of ints of strictly increasing entries. Defaults to None.
values: A list of floats that specifies the values for the intervals defined
by `boundaries`. It should have one more element than `boundaries`.
The learning rate is computed as follows: [0, boundaries[0]] ->
values[0] [boundaries[0], boundaries[1]] -> values[1]
[boundaries[n-1], boundaries[n]] -> values[n] [boundaries[n],
end] -> values[n+1] Defaults to None.
"""
name: str = 'PiecewiseConstantDecay'
boundaries: Optional[List[int]] = None
values: Optional[List[float]] = None
@dataclasses.dataclass
class ExponentialLrConfig(base_config.Config):
"""Configuration for exponential learning rate decay.
This class is a containers for the exponential learning rate decay configs.
Attributes:
name: The name of the learning rate schedule. Defaults to ExponentialDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
decay_steps: A positive integer that is used for decay computation. Defaults
to None.
decay_rate: A float. Defaults to None.
staircase: A boolean, if true, learning rate is decreased at discreate
intervals. Defaults to False.
"""
name: str = 'ExponentialDecay'
initial_learning_rate: Optional[float] = None
decay_steps: Optional[int] = None
decay_rate: Optional[float] = None
staircase: Optional[bool] = None
@dataclasses.dataclass
class PolynomialLrConfig(base_config.Config):
"""Configuration for polynomial learning rate decay.
This class is a containers for the polynomial learning rate decay configs.
Attributes:
name: The name of the learning rate schedule. Defaults to PolynomialDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
decay_steps: A positive integer that is used for decay computation. Defaults
to None.
end_learning_rate: A float. The minimal end learning rate.
power: A float. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
Defaults to False.
"""
name: str = 'PolynomialDecay'
initial_learning_rate: Optional[float] = None
decay_steps: Optional[int] = None
end_learning_rate: float = 0.0001
power: float = 1.0
cycle: bool = False
@dataclasses.dataclass
class CosineLrConfig(base_config.Config):
"""Configuration for Cosine learning rate decay.
This class is a containers for the cosine learning rate decay configs,
tf.keras.experimental.CosineDecay.
Attributes:
name: The name of the learning rate schedule. Defaults to CosineDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
decay_steps: A positive integer that is used for decay computation. Defaults
to None.
alpha: A float. Minimum learning rate value as a fraction of
initial_learning_rate.
"""
name: str = 'CosineDecay'
initial_learning_rate: Optional[float] = None
decay_steps: Optional[int] = None
alpha: float = 0.0
@dataclasses.dataclass
class DirectPowerLrConfig(base_config.Config):
"""Configuration for DirectPower learning rate decay.
This class configures a schedule following follows lr * (step)^power.
Attributes:
name: The name of the learning rate schedule. Defaults to DirectPowerDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
power: A float. Defaults to -0.5, for sqrt decay.
"""
name: str = 'DirectPowerDecay'
initial_learning_rate: Optional[float] = None
power: float = -0.5
@dataclasses.dataclass
class PowerAndLinearDecayLrConfig(base_config.Config):
"""Configuration for DirectPower learning rate decay.
This class configures a schedule following follows lr * (step)^power for the
first total_decay_steps * (1 - linear_decay_fraction) steps, and follows
lr * (step)^power * (total_decay_steps - step) / (total_decay_steps *
linear_decay_fraction) for the rest of the steps.
Attributes:
name: The name of the learning rate schedule. Defaults to DirectPowerDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
power: A float. Defaults to -0.5, for sqrt decay.
"""
name: str = 'PowerAndLinearDecay'
initial_learning_rate: Optional[float] = None
total_decay_steps: Optional[int] = None
power: float = -0.5
linear_decay_fraction: float = 0.1
@dataclasses.dataclass
class LinearWarmupConfig(base_config.Config):
"""Configuration for linear warmup schedule config.
This class is a container for the linear warmup schedule configs.
Warmup_learning_rate is the initial learning rate, the final learning rate of
the warmup period is the learning_rate of the optimizer in use. The learning
rate at each step linearly increased according to the following formula:
warmup_learning_rate = warmup_learning_rate +
step / warmup_steps * (final_learning_rate - warmup_learning_rate).
Using warmup overrides the learning rate schedule by the number of warmup
steps.
Attributes:
name: The name of warmup schedule. Defaults to linear.
warmup_learning_rate: Initial learning rate for the warmup. Defaults to 0.
warmup_steps: Warmup steps. Defaults to None.
"""
name: str = 'linear'
warmup_learning_rate: float = 0
warmup_steps: Optional[int] = None
@dataclasses.dataclass
class PolynomialWarmupConfig(base_config.Config):
"""Configuration for linear warmup schedule config.
This class is a container for the polynomial warmup schedule configs.
Attributes:
name: The name of warmup schedule. Defaults to Polynomial.
power: Polynomial power. Defaults to 1.
warmup_steps: Warmup steps. Defaults to None.
"""
name: str = 'polynomial'
power: float = 1
warmup_steps: Optional[int] = None
| [
"andreas.boerzel@gmx.de"
] | andreas.boerzel@gmx.de |
03b84161a9f04f4dc964e751a7e9211dfacfe61b | 9fa8c280571c099c5264960ab2e93255d20b3186 | /algorithm/mobo/solver/parego/parego.py | 490b74696692c48610a556675780573a6df09a4e | [
"MIT"
] | permissive | thuchula6792/AutoOED | 8dc97191a758200dbd39cd850309b0250ac77cdb | 272d88be7ab617a58d3f241d10f4f9fd17b91cbc | refs/heads/master | 2023-07-23T16:06:13.820272 | 2021-09-08T14:22:18 | 2021-09-08T14:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | import numpy as np
from ..base import Solver
from pymoo.optimize import minimize
from pymoo.algorithms.so_cmaes import CMAES
from pymoo.decomposition.tchebicheff import Tchebicheff
from .utils import ScalarizedEvaluator
from multiprocess import Process, Queue
def optimization(problem, x, weights, queue):
'''
Parallel worker for single-objective CMA-ES optimization.
'''
evaluator = ScalarizedEvaluator(decomposition=Tchebicheff(), weights=weights)
res = minimize(problem, CMAES(x), evaluator=evaluator)
queue.put([res.X[0], res.F[0]])
class ParEGOSolver(Solver):
'''
Solver based on ParEGO.
'''
def __init__(self, *args, **kwargs):
self.pop_size = kwargs['pop_size']
self.n_process = kwargs.pop('n_process')
super().__init__(*args, algo=CMAES, **kwargs)
def solve(self, problem, X, Y):
'''
Solve the multi-objective problem by multiple scalarized single-objective solvers.
Parameters
----------
problem: mobo.surrogate_problem.SurrogateProblem
The surrogate problem to be solved.
X: np.array
Current design variables.
Y: np.array
Current performance values.
Returns
-------
solution: dict
A dictionary containing information of the solution.\n
- solution['x']: Proposed design samples.
- solution['y']: Performance of proposed design samples.
'''
# initialize population
sampling = self._get_sampling(X, Y)
if not isinstance(sampling, np.ndarray):
sampling = sampling.do(problem, self.pop_size)
# generate scalarization weights
weights = np.random.random((self.pop_size, Y.shape[1]))
weights /= np.expand_dims(np.sum(weights, axis=1), 1)
# optimization
xs, ys = [], []
queue = Queue()
n_active_process = 0
for i, x0 in enumerate(sampling):
Process(target=optimization, args=(problem, x0, weights[i], queue)).start()
n_active_process += 1
if n_active_process >= self.n_process:
x, y = queue.get()
xs.append(x)
ys.append(y)
n_active_process -= 1
# gather result
for _ in range(n_active_process):
x, y = queue.get()
xs.append(x)
ys.append(y)
# construct solution
self.solution = {'x': np.array(xs), 'y': np.array(ys)}
return self.solution | [
"yunsheng@mit.edu"
] | yunsheng@mit.edu |
78a6f5a2d7d2673a30b6fdfc1170f493c07e7a3d | 87eb72edb890c22cc230a3e5511a4d745f2e6f72 | /evaluation/workflow/utils.py | b1a72074d62716c97d62f632372232bdb527a091 | [
"MIT"
] | permissive | QuantumMisaka/GLUE | a16c84ec9978daa75117f607b06c1d52259c5d13 | e84cb6483971dcb1e2485080f812899baaf31b5b | refs/heads/master | 2023-07-27T17:53:51.720504 | 2021-09-14T08:21:45 | 2021-09-14T08:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | r"""
Utility functions for snakemake files
"""
# pylint: disable=missing-function-docstring, redefined-outer-name
from functools import reduce
from operator import add
from pathlib import Path
def conf_expand_pattern(conf, placeholder="null"):
expand_pattern = "-".join(f"{key}:{{{key}}}" for key in conf)
return expand_pattern if expand_pattern else placeholder
def expand(pattern, **wildcards):
from snakemake.io import expand
has_default_choices = False
for val in wildcards.values(): # Sanity check
if isinstance(val, dict):
if "default" not in val or "choices" not in val:
print(val)
raise ValueError("Invalid default choices!")
has_default_choices = True
if not has_default_choices:
return expand(pattern, **wildcards)
expand_set = set()
for key, val in wildcards.items():
if isinstance(val, dict):
wildcards_use = {key: val["choices"]}
for other_key, other_val in wildcards.items():
if other_key == key:
continue
if isinstance(other_val, dict):
wildcards_use[other_key] = other_val["default"]
else:
wildcards_use[other_key] = other_val
expand_set = expand_set.union(expand(pattern, **wildcards_use))
return list(expand_set)
def seed2range(config):
for key, val in config.items():
if isinstance(val, dict):
seed2range(val)
elif key.endswith("seed") and val != 0:
config[key] = range(val)
def target_directories(config):
seed2range(config)
dataset = config["dataset"].keys()
subsample_conf = config["subsample"] or {}
subsample_conf = expand(
conf_expand_pattern(subsample_conf, placeholder="original"),
**subsample_conf
)
def per_method(method):
prior_conf = config["prior"] or {}
prior_conf = {} if method in ("UnionCom", "iNMF_FiG", "LIGER_FiG") else prior_conf # Methods that do not use prior feature matching
prior_conf = expand(
conf_expand_pattern(prior_conf, placeholder="null"),
**prior_conf
)
hyperparam_conf = config["method"][method] or {}
hyperparam_conf = expand(
conf_expand_pattern(hyperparam_conf, placeholder="default"),
**hyperparam_conf
)
seed = 0 if method in ("bindSC", ) else config["seed"] # Methods that are deterministic
return expand(
"results/raw/{dataset}/{subsample_conf}/{prior_conf}/{method}/{hyperparam_conf}/seed:{seed}",
dataset=dataset,
subsample_conf=subsample_conf,
prior_conf=prior_conf,
method=method,
hyperparam_conf=hyperparam_conf,
seed=seed
)
return reduce(add, map(per_method, config["method"]))
def target_files(directories):
def per_directory(directory):
directory = Path(directory)
if (directory / ".blacklist").exists():
return []
return [
directory / "metrics.yaml",
directory / "cell_type.pdf",
directory / "domain.pdf"
]
return reduce(add, map(per_directory, directories))
| [
"caozj@mail.cbi.pku.edu.cn"
] | caozj@mail.cbi.pku.edu.cn |
4414de7c5cbc56534a03fc689dfd90f5e8a113b2 | 7952f66758b685f4bf045c7eb28efa3a22412a89 | /HackerRank/sol5-BigSorting.py | 88a72735750e92995e64feed6799a635fc038d8a | [] | no_license | PingPingE/Algorithm | b418fa13528c27840bb220e305933800c5b4c00a | 89a55309c44320f01d2d6fe5480181a4c5816fd2 | refs/heads/master | 2023-08-31T01:43:09.690729 | 2023-08-27T13:12:22 | 2023-08-27T13:12:22 | 172,465,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def sol(arr):
return [a for a in sorted(arr, key= lambda x: [len(x),x])]#각 숫자의 길이 정보 추가
if __name__ == '__main__':
unsorted = [ input() for _ in range(int(input()))]
for s in sol(unsorted):
print(s)
#드디어 성공!!
#하지만 discussions를 참고했다. 그래도 다양한 사람들의 의견, 코드 등을 보면서 많이 배웠다.
#컴터가 더 연산을 쉽고 빠르게 할 수 있도록 더 많은 정보를 주자!
| [
"ds03023@gmail.com"
] | ds03023@gmail.com |
860c310e893e8bb4727c39195f129cd71807aabb | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/str_cat-135.py | d0cec0551a65ecaa83019d37eaa5fd104c69b8f3 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | a:str = "Hello"
b:str = "World"
c:str = "ChocoPy"
def cat2(a:str, b:str) -> str:
return a + b
def cat3(a:str, b:str, c:str) -> str:
return a + b + c
print(cat2(a, b))
print(cat2("", c))
print($ID(a, " ", c))
print(len(a))
print(len(cat2(a,a)))
print(len(cat2("","")))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
3eb38bf3600a44172b0241a3218341a0d711cdea | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03732/s795942587.py | 80267c847271bc1bc6a9c984f1299245239a189a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | # https://atcoder.jp/contests/abc060/tasks/arc073_b
# 典型的なナップサック。だけど配列が大きいので素直に実装するとTLEになる
# 成約により、w1以上は必ず前のjを見ることに注意するとテーブルのサイズがぐっと減ることに気がつくがこれを実装するのはなかなかめんどくさそう。
# defaltdictを利用した再帰メモ化なら比較的実装可能では?
import sys
sys.setrecursionlimit(1 << 25)
read = sys.stdin.readline
def read_ints():
return list(map(int, read().split()))
def read_col(H, n_cols):
'''
H is number of rows
n_cols is number of cols
A列、B列が与えられるようなとき
'''
ret = [[] for _ in range(n_cols)]
for _ in range(H):
tmp = list(map(int, read().split()))
for col in range(n_cols):
ret[col].append(tmp[col])
return ret
N, W = read_ints()
w, v = read_col(N, 2)
from collections import defaultdict
dp = defaultdict(lambda: -1)
def f(i, j): # i番目を含んで考慮したとき重さjまでで達成できる価値の最大値
if dp[i, j] != -1:
return dp[i, j]
if i == -1:
return 0
if j - w[i] < 0:
return f(i - 1, j)
ret = max(f(i - 1, j - w[i]) + v[i], f(i - 1, j))
dp[i, j] = ret
return ret
print(f(N - 1, W))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9046a3da2df23840c71d7b015bc6bd1ebb645ffe | 070ede418be70e376da2fd1ed18a567098c951c9 | /junk/src/libs/alphavantage/windowed_dataset.py | 6599e7882ed075cb2d742f3861492b2d5ba1fcee | [] | no_license | jmnel/neuralsort | b647f745c7c7e33f4d79400493fb974aeb818426 | 9efbeac8c8c98895f2bf930e33d45ebfeffb54c7 | refs/heads/master | 2020-12-30T03:13:18.135533 | 2020-09-21T02:51:40 | 2020-09-21T02:51:40 | 245,709,197 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | import sys
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[1]))
from random import randint
from pathlib import Path
import torch
from torch.utils.data import Dataset
import numpy as np
from pprint import pprint
from db_connectors import SQLite3Connector
class WindowedDataset(Dataset):
def __init__(self,
data_path: Path,
train_size,
test_size,
prediction_window,
num_stocks,
is_train,
transform=None):
super().__init__()
self._transform = transform
db = SQLite3Connector.connect(data_path / 'clean.db')
table = 'adj_returns_clean'
# Get list of symbols by picking first (n=num_stocks) column names.
schema = db.get_schema(table)
symbols = [s['name'] for s in schema[1:]][0:num_stocks]
# Get actual price time series.
raw = db.select(table, symbols)
db.close()
k_folds = 4
fold_len = len(raw) // k_folds
print(len(raw))
print(fold_len)
# print(fold_len *
data_path = Path(__file__).absolute().parents[3] / 'data'
print(data_path)
foo = WindowedDataset(data_path,
train_size=600,
test_size=200,
prediction_window=10,
num_stocks=5,
is_train=True)
| [
"jmnel92@gmail.com"
] | jmnel92@gmail.com |
6429729d36074089835ef04f458ea4cf6e124765 | 5f4aab3f1aef88e57bf1676af6ee4d7fd0ec4f08 | /src/SConscript | bc3d26df08583e242278f4869e8687651f95b506 | [
"BSD-3-Clause"
] | permissive | chunkified/kl-iostream | 38167841c781c0052c08c1a5342da31592b6ba81 | b9f4c90b09e0b353971a35d8adc779822e186f03 | refs/heads/master | 2021-01-20T09:41:30.729656 | 2014-05-07T08:03:41 | 2014-05-07T08:03:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | #
# Copyright 2010-2014 Fabric Software Inc. All rights reserved.
#
Import('parentEnv', 'kl2edk', 'kl', 'extSuffix')
extName = 'kliostream'
env = parentEnv.Clone()
env.Append(CPPPATH = [env.Dir('.').abspath])
sources = [
env.File('kliostream.fpm.json'),
env.File('kliostream.codegen.json')
]
sources += env.Glob('*.kl')
cppFiles = [
env.File('extension.cpp'),
env.File('IFStream_functions.cpp'),
env.File('OFStream_functions.cpp')
]
extensionFiles = env.Install(env.Dir('#stage'), [env.File(extName+'.fpm.json')] + env.Glob('*.kl'))
kl2edkResults = env.RunKL2EDK(cppFiles, sources)
extLibFileName = env.File(extName + '-' + extSuffix)
libraryFiles = Flatten([env.SharedLibrary(extLibFileName, cppFiles)])
env.Depends(libraryFiles, kl2edkResults)
extensionFiles += env.Install(env.Dir('#stage'), libraryFiles[0])
Return('extensionFiles')
| [
"helge.mathee@fabricengine.com"
] | helge.mathee@fabricengine.com | |
e33fa54f4a66204c553c8ba94a758e368c1d509b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03606/s347687358.py | de597f3f689f815591d9348faf868bf8955f2a95 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from fractions import gcd
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
n = INT()
L = []
ans = 0
for i in range(n):
a, b = LIST()
ans += b - a + 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
843ed043d892d76779ec0a0ceb2832bd406da3c6 | 6fa0c051f742c3f9c99ee2800cd132db5ffb28c7 | /src/Collective/forms.py | ff624db36622f3fa0e9d6b70808263ea96555afe | [] | no_license | MCN10/NXTLVL | 9c37bf5782bfd8f24d0fb0431cb5885c585369b0 | 76d8818b7961e4f0362e0d5f41f48f53ce1bfdc5 | refs/heads/main | 2023-06-02T13:51:34.432668 | 2021-06-02T14:19:21 | 2021-06-02T14:19:21 | 328,625,042 | 1 | 0 | null | 2021-06-16T10:16:17 | 2021-01-11T10:19:44 | Python | UTF-8 | Python | false | false | 664 | py | from django.forms import ModelForm
from .models import *
class CollectiveOrderForm(ModelForm):
class Meta:
model = CollectiveOrder
fields = '__all__'
exclude = ['customer', 'transaction_id']
class CollectiveOrderItemsForm(ModelForm):
class Meta:
model = CollectiveOrderItem
fields = '__all__'
class CollectiveShippingDetailsForm(ModelForm):
class Meta:
model = CollectiveShippingAddress
fields = '__all__'
class CollectiveProductsForm(ModelForm):
class Meta:
model = CollectiveProduct
fields = '__all__'
class CollectiveCategoriesForm(ModelForm):
class Meta:
model = CollectiveCategory
fields = '__all__'
exclude = ['slug']
| [
"mcn10.foxx@gmail.com"
] | mcn10.foxx@gmail.com |
2454d230d571ade8339803b76c3950c86b824968 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.9.70/2/1569574502.py | e1c6a7512ff955c929417dcb142233ae751ca36e | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,040 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
a = 123
b = list(str(a))
print (b)
def is_palindromic(n):
"""
Funktion testet ob eine positive ganze Zahl n>0 ein Palindrom ist.
Definition Palimdrom:
Eine natürliche Zahl ist ein Palindrom, falls die Ziffernfolge ihrerDezimaldarstellung vorwärts und
rückwärts gelesen gleich ist.
args:
n: int (n > 0)
return:
bool (True, wenn n ein Palimdrom ist False wenn n kein Palimdrom ist)
"""
if type(n) != int or n < 0:
return "Nanana"
string_int = str(n)
compare = []
compare2 = list(string_int)
for index in range(len(string_int) - 1, -1, -1):
compare.append(compare2[index])
if compare == compare2:
return True
else:
return False
######################################################################
## Lösung Teil 2. (Tests)
def test_is_palindromic():
a = 123
b = 123321
c = 45654
d = 0
e = 9.09
assert is_palindromic(a) == False
assert is_palindromic(b) == True
assert is_palindromic(c) == True
assert is_palindromic(d) == True
assert is_palindromic(e) == "Nanana"
######################################################################
## Lösung Teil 3.
## Lösung Teil 4.
######################################################################
## test code
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_is_palindromic(self):
assert is_palindromic
assert 'n' in getfullargspec(is_palindromic).args
def test_gen_palindromic(self):
assert gen_palindromic
assert 'n' in getfullargspec(gen_palindromic).args
def test_represent(self):
assert represent
assert 'n' in getfullargspec(represent).args
class TestGrades:
def test_docstring_present(self):
assert is_palindromic.__doc__ is not None
assert gen_palindromic.__doc__ is not None
assert represent.__doc__ is not None
def test_typing_present(self):
assert is_palindromic.__hints__ == typing.get_type_hints(self.is_palindromic_oracle)
assert typing.get_type_hints (gen_palindromic) == typing.get_type_hints (self.gen_palindromic_oracle)
assert typing.get_type_hints (represent) == typing.get_type_hints (self.represent_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def is_palindromic_oracle(self, n:int)->list:
s = str(n)
while len (s) > 1:
if s[0] != s[-1]:
return False
s = s[1:-1]
return True
def gen_palindromic_oracle (self, n:int):
return (j for j in range (n + 1, 0, -1) if self.is_palindromic_oracle (j))
def represent_oracle (self, n:int) -> list:
for n1 in self.gen_palindromic_oracle (n):
if n1 == n:
return [n1]
for n2 in self.gen_palindromic_oracle (n - n1):
if n2 == n - n1:
return [n1, n2]
for n3 in self.gen_palindromic_oracle (n - n1 - n2):
if n3 == n - n1 - n2:
return [n1, n2, n3]
# failed to find a representation
return []
def test_is_palindromic(self):
## fill in
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
def test_gen_palindromic(self):
## fill in
pass
def test_represent (self):
def check(n, r):
for v in r:
assert self.is_palindromic_oracle (v)
assert n == sum (r)
for n in range (1,100):
r = represent (n)
check (n, r)
for i in range (100):
n = random.randrange (10000)
r = represent (n)
check (n, r)
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
6950bd92117c53aac7dea84e5af24b34e63e4288 | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res/scripts/client/gui/wgnc/actions.py | d0262b6d2850bb671b76223b5c7361d4da1ffa7e | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,812 | py | # 2015.11.18 11:57:06 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/wgnc/actions.py
import BigWorld
from adisp import process
from debug_utils import LOG_CURRENT_EXCEPTION, LOG_ERROR, LOG_WARNING, LOG_DEBUG
from gui.game_control import getBrowserCtrl
from gui.shared.utils.decorators import ReprInjector
from gui.wgnc.events import g_wgncEvents
from gui.wgnc.settings import WGNC_GUI_TYPE
@ReprInjector.simple(('_name', 'name'))
class _Action(object):
__slots__ = ('_name',)
def __init__(self, name):
super(_Action, self).__init__()
self._name = name
def getName(self):
return self._name
def validate(self, itemsHolder):
return True
def invoke(self, notID, actor = None):
raise NotImplementedError
@ReprInjector.withParent(('_purge', 'purge'), ('_isInvoked', 'isInvoked'))
class Callback(_Action):
__slots__ = ('_purge', '_isInvoked')
def __init__(self, name, purge = True):
super(Callback, self).__init__(name)
self._purge = purge
self._isInvoked = False
def doPurge(self):
return self._purge
def invoke(self, notID, actor = None):
if self._purge and self._isInvoked:
LOG_DEBUG('Callback with purge=true has been invoked, it is skipped', self._name)
return
self._isInvoked = True
try:
BigWorld.player().sendNotificationReply(notID, self._purge, self._name)
except (AttributeError, TypeError):
LOG_CURRENT_EXCEPTION()
@ReprInjector.withParent(('_url', 'url'))
class _OpenBrowser(_Action):
__slots__ = ('_url',)
def __init__(self, name, url):
super(_OpenBrowser, self).__init__(name)
self._url = url
def getURL(self):
return self._url
@ReprInjector.withParent()
class OpenInternalBrowser(_OpenBrowser):
__slots__ = ('_browserID',)
def __init__(self, name, url):
super(OpenInternalBrowser, self).__init__(name, url)
self._browserID = None
return
def invoke(self, notID, actor = None):
ctrl = getBrowserCtrl()
if ctrl:
if actor:
title = actor.getTopic()
else:
title = None
self.__doInvoke(ctrl, title)
else:
LOG_ERROR('Browser controller is not found')
return
@process
def __doInvoke(self, ctrl, title):
self._browserID = yield ctrl.load(self._url, browserID=self._browserID, title=title)
@ReprInjector.withParent()
class OpenExternalBrowser(_OpenBrowser):
def invoke(self, notID, actor = None):
try:
BigWorld.wg_openWebBrowser(self._url)
except (AttributeError, TypeError):
LOG_CURRENT_EXCEPTION()
@ReprInjector.withParent(('_target', 'target'))
class OpenWindow(_Action):
__slots__ = ('_target',)
def __init__(self, name, target):
super(OpenWindow, self).__init__(name)
self._target = target
def validate(self, itemsHolder):
return itemsHolder.getItemByName(self._target) is not None
def getTarget(self):
return self._target
def invoke(self, notID, actor = None):
g_wgncEvents.onItemShowByAction(notID, self._target)
@ReprInjector.withParent(('_text', 'text'))
class ReplaceButtons(_Action):
__slots__ = ('_text',)
def __init__(self, name, text):
super(ReplaceButtons, self).__init__(name)
self._text = text
def getTextToReplace(self):
return self._text
def invoke(self, notID, actor = None):
if not actor:
LOG_ERROR('GUI item is not found', self)
return
if actor.getType() != WGNC_GUI_TYPE.POP_UP:
LOG_WARNING('Hiding buttons is allowed in pup up only', actor, self)
return
actor.hideButtons()
actor.setNote(self._text)
g_wgncEvents.onItemUpdatedByAction(notID, actor)
def _getActions4String(value):
seq = value.split(',')
for name in seq:
yield name.strip()
@ReprInjector.simple(('__actions', 'actions'))
class ActionsHolder(object):
__slots__ = ('__actions',)
def __init__(self, items):
super(ActionsHolder, self).__init__()
self.__actions = {item.getName():item for item in items}
def clear(self):
self.__actions.clear()
def hasAction(self, name):
return name in self.__actions
def hasAllActions(self, names):
for name in _getActions4String(names):
if not self.hasAction(name):
return False
return True
def getAction(self, name):
action = None
if self.hasAction(name):
action = self.__actions[name]
return action
def validate(self, itemsHolder):
exclude = set()
for name, action in self.__actions.iteritems():
if not action.validate(itemsHolder):
LOG_WARNING('Action is invalid', action)
exclude.add(name)
for name in exclude:
self.__actions.pop(name, None)
return
def invoke(self, notID, names, actor = None):
result = False
if not notID:
LOG_ERROR('ID of notification is not defined', notID)
return result
for name in _getActions4String(names):
if self.hasAction(name):
action = self.__actions[name]
action.invoke(notID, actor)
result = True
else:
LOG_ERROR('Action is not found', name)
return result
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\wgnc\actions.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:57:06 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
e5aec4b02d12cbe033e4c663271b013101e6589c | 57c64723003e8228338b4d2314cb12c011c0f169 | /deprecated/levelset.py | 7f6b54b83ce38e09ccd85e165b0b22027acc04d8 | [] | no_license | gmaher/tcl_code | d02fa0cafb9aa491f1d5d6197cd94fd9d7dbd37c | 13c18dcdbe265490b3a47916cb22d904d79da54f | refs/heads/master | 2020-04-03T22:03:36.024349 | 2017-05-12T21:35:58 | 2017-05-12T21:35:58 | 56,552,391 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,287 | py | import SimpleITK as sitk
from utility import *
import plotly as py
###########################
# Set some input parameters
###########################
sliceid = 50
impath = '/home/marsdenlab/Dropbox/vascular_data/OSMSC0006/OSMSC0006-cm.mha'
xstart = 200
ystart = 10
dim = 64
sigma = 0.1
seedx = dim/2
seedy = dim/2
############################
# Load image and get patch
############################
reader = sitk.ImageFileReader()
reader.SetFileName(impath)
img = reader.Execute()
print img.GetSize()
patch = img[xstart:xstart+dim, ystart:ystart+dim,sliceid]
print patch
print type(patch)
np_patch = sitk.GetArrayFromImage(patch)
#heatmap(np_patch, fn='./plots/patch.html', title='image')
##########################
# Compute feature image
##########################
gradMagFilter = sitk.GradientMagnitudeRecursiveGaussianImageFilter()
gradMagFilter.SetSigma(sigma)
filt_patch = gradMagFilter.Execute(patch)
rescaleFilter = sitk.RescaleIntensityImageFilter()
filt_patch = rescaleFilter.Execute(filt_patch, 0, 1)
np_patch = sitk.GetArrayFromImage(filt_patch)
heatmap(np_patch, fn='./plots/blur.html', title='gradmag')
###############################
# Create initialization image
###############################
seed_img = sitk.Image(dim,dim,sitk.sitkUInt8)
seed_img.SetSpacing(patch.GetSpacing())
seed_img.SetOrigin(patch.GetOrigin())
seed_img.SetDirection(patch.GetDirection())
seed_img[seedx,seedy] = 1
distance = sitk.SignedMaurerDistanceMapImageFilter()
distance.InsideIsPositiveOff()
distance.UseImageSpacingOn()
dis_img = distance.Execute(seed_img)
np_patch = sitk.GetArrayFromImage(dis_img)
#heatmap(np_patch, fn='./plots/distance.html')
init_img = sitk.BinaryThreshold(dis_img, -1000, 10)
init_img = sitk.Cast(init_img, filt_patch.GetPixelIDValue())*-1+0.5
np_patch = sitk.GetArrayFromImage(init_img)
heatmap(np_patch, fn='./plots/init.html')
#####################################
# Run GeodesicActiveContour level set
#####################################
gdac = sitk.GeodesicActiveContourLevelSetImageFilter()
gdac_img = gdac.Execute(init_img, filt_patch, 0.002, -2.0, 1.0, 1.0, 1000, False)
print gdac.GetElapsedIterations()
print gdac.GetRMSChange()
gdac_patch = sitk.GetArrayFromImage(gdac_img)
heatmap(gdac_patch, fn='./plots/gdac.html', title='levelset')
| [
"gmaher2@hotmail.com"
] | gmaher2@hotmail.com |
f5b03bd3ee32d9828c0d98b5d4816615fc75d3ec | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_21190.py | 3a5c164eb892a8ba8703cc71a1a8a76d07736d16 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # Beautiful Soup conversion of Unicode characters to HTML entities
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc)
print(soup.prettify(formatter="html"))
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
59fd1d2c4e96308cb0779dd99f018e612155737c | 94724578994ab1438dcefb51b7ef4d8570da5d4c | /z42/z42/lib/heartbeat.py | 0abfd51795aeb2616810c5976bd73069d5e46a41 | [] | no_license | PegasusWang/collection_python | 6648d83203634abf44fd42c0b37b0bf7cc406d8f | 9ef019a737a0817860d3184924c67a0833bd1252 | refs/heads/master | 2023-09-01T23:15:39.813635 | 2023-08-24T06:46:12 | 2023-08-24T06:46:12 | 43,693,872 | 130 | 90 | null | 2021-04-26T15:12:55 | 2015-10-05T15:28:15 | JavaScript | UTF-8 | Python | false | false | 1,422 | py |
#coding:utf-8
import os
from threading import Timer
import socket
import sys
import requests
from datetime import datetime
def sendmail(to, subject, html):
url = 'https://sendcloud.sohu.com/webapi/mail.send.xml'
params = {
'api_user': 'postmaster@42.sendcloud.org',
'api_key' : 'kMCzqBPv',
'to' : to,
'from' : 'alert@42.sendcloud.org',
'fromname' : '42btc',
'subject' : subject,
'html': html
}
r = requests.post(url, data=params)
if r.text.find('error') != -1:
return r.text
class Heartbeat(object):
def __init__(self, interval=60):
self._quit = None
self._interval = interval
def quit(self, func):
self._quit = func
return func
def _sendmail(self):
title = '%s : %s %s'%(
socket.gethostname(),
' '.join(sys.argv),
datetime.now(),
)
html = """
%s
"""%title
#sendmail('42btc-alert@googlegroups.com', '进程自杀 : %s' % title, html)
def is_alive(self, func):
def _():
if not func():
if self._quit is not None:
self._quit()
self._sendmail()
os.kill(os.getpid(), 9)
else:
Timer(self._interval, _).start()
Timer(self._interval+60, _).start()
return _
heartbeat = Heartbeat(5)
| [
"tianma201211@gmail.com"
] | tianma201211@gmail.com |
b86130502764734456319cc9163ee400ecd16c61 | 99ca151c59afd9c0e7091b6919768448e40f88a2 | /numpy_ex1.py | 88f8860666a2f9c6e91be892b051a4713d8161c4 | [] | no_license | zainabnazari/Python_note | 1b6a454f6e7b3aca998d87a201823a600ec28815 | 3beb52beb3a0ebe17a6ac8c5695670e9dde59269 | refs/heads/main | 2023-02-10T22:32:33.160428 | 2021-01-12T18:36:54 | 2021-01-12T18:36:54 | 304,724,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | # file name: numpy_ex1.py
list1=[1,2,3,4]
list2=[1,2,3,4]
list3=[[1,2,3,4],[1,2,3,4]]
#print("list1*list2= ",list1*list2) # this will give error, the operation of multiplication on lists is not defined!
print("list1+list2= ",list1+list2)
print("list3+list1= ",list3+list1)
import numpy as np
numpyarray1=np.array([1,2,3,4])
numpyarray2=np.array([1,2,3,4])
numpyarray3=np.array([[1,2,3,4],[1,2,3,4]])
print("numpyarray1*numpyarray2= ", numpyarray1*numpyarray2)
print("numpyarray1+numpyarray2= ", numpyarray1+numpyarray2)
print("numpyarray3+numpyarray1= ", numpyarray3+numpyarray1)
print("numpyarray3*numpyarray1= ", numpyarray3*numpyarray1)
'''
output:
list1+list2= [1, 2, 3, 4, 1, 2, 3, 4]
list3+list1= [[1, 2, 3, 4], [1, 2, 3, 4], 1, 2, 3, 4]
numpyarray1*numpyarray2= [ 1 4 9 16]
numpyarray1+numpyarray2= [2 4 6 8]
numpyarray3+numpyarray1= [[2 4 6 8]
[2 4 6 8]]
numpyarray3*numpyarray1= [[ 1 4 9 16]
[ 1 4 9 16]]
'''
| [
"nazari.zainab@gmail.com"
] | nazari.zainab@gmail.com |
3bdb764fcca8a052da1946ee71d5ca3a8d849cd5 | eca0530054fcae936bf6b4b9aaf2fa5201d45588 | /final/login.py | a59d31d84d2a2881987fa8bd2c10e8450e96de21 | [] | no_license | benaka-tech/sringeri | d2a0e628485c9c221f753de345c4cb31e03c0f3e | 99b334e8b84c00a6160749dc7964a3741021c10d | refs/heads/main | 2023-03-15T13:57:14.780184 | 2021-03-12T10:52:49 | 2021-03-12T10:52:49 | 347,124,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,138 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'login.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import mysql.connector as mc
from main_screen import Ui_MainWindow1
from datetime import datetime
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setFixedSize(876, 391)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(210, 10, 311, 111))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/newPrefix/logo_colour.png"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, -10, 161, 141))
self.label_3.setText("")
self.label_3.setPixmap(QtGui.QPixmap(":/newPrefix/QDkO7nK6-removebg-preview.png"))
self.label_3.setScaledContents(True)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(540, 10, 171, 111))
self.label_4.setText("")
self.label_4.setPixmap(QtGui.QPixmap(":/newPrefix/download__2_-removebg-preview.png"))
self.label_4.setScaledContents(True)
self.label_4.setObjectName("label_4")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(730, 10, 121, 121))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap(":/newPrefix/aic-jitf-logo (1).png"))
self.label_2.setScaledContents(True)
self.label_2.setObjectName("label_2")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(80, 130, 661, 171))
font = QtGui.QFont()
font.setPointSize(10)
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.formLayoutWidget = QtWidgets.QWidget(self.groupBox)
self.formLayoutWidget.setGeometry(QtCore.QRect(39, 40, 591, 81))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setVerticalSpacing(25)
self.formLayout.setObjectName("formLayout")
self.label_5 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.lineEdit = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit)
self.label_6 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.lineEdit_2 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.pushButton = QtWidgets.QPushButton(self.groupBox)
self.pushButton.setGeometry(QtCore.QRect(300, 130, 75, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.login)
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(80, 320, 671, 41))
font = QtGui.QFont()
font.setPointSize(10)
self.label_7.setFont(font)
self.label_7.setText("")
self.label_7.setObjectName("label_7")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def login(self):
try:
username = self.lineEdit.text()
password = self.lineEdit_2.text()
mydb = mc.connect(
host="localhost",
user="root",
password="",
database="project"
)
mycursor = mydb.cursor()
mycursor.execute(
"SELECT username,password from user where username like '" + username + "'and password like '" + password + "'")
result = mycursor.fetchone()
if result == None:
self.label_7.setText("Incorrect Email & Password")
else:
self.label_7.setText("You are logged in")
self.window = QtWidgets.QMainWindow()
self.ui = Ui_MainWindow1()
self.ui.setupUi(self.window)
MainWindow.hide()
self.window.show()
except mc.Error as e:
print(e)
self.label_5.setText("Error")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Login Screen"))
self.groupBox.setTitle(_translate("MainWindow", "LOGIN"))
self.label_5.setText(_translate("MainWindow", "USERNAME"))
self.label_6.setText(_translate("MainWindow", "PASSWORD"))
self.pushButton.setText(_translate("MainWindow", "LOGIN"))
import img_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"cjayanth35@gmail.com"
] | cjayanth35@gmail.com |
a7e045ed5e51609b50acf24a6e689a58e64dd02e | 635bac115b708864707bbc9a684ce274e88d33a7 | /Tools/Scripts/libraries/webkitscmpy/webkitscmpy/program/canonicalize/__init__.py | 1687704b4b97f78d586f0b29853dc7ff904e5baf | [] | no_license | iglunix/WebKit | 131807b5c24f1644d8a5d2ffece440bf1b1ed707 | 92e63de4a92736360ecfd491a3e0e3b28f753b75 | refs/heads/main | 2023-07-03T08:30:16.089008 | 2021-03-30T17:34:53 | 2021-03-30T17:34:53 | 353,087,887 | 1 | 0 | null | 2021-03-30T17:36:18 | 2021-03-30T17:36:17 | null | UTF-8 | Python | false | false | 6,750 | py | # Copyright (C) 2020, 2021 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import tempfile
import subprocess
import sys
from webkitcorepy import arguments, run, string_utils
from webkitscmpy import log
from ..command import Command
class Canonicalize(Command):
name = 'canonicalize'
help = 'Take the set of commits which have not yet been pushed and edit history to normalize the ' +\
'committers with existing contributor mapping and add identifiers to commit messages'
@classmethod
def parser(cls, parser, loggers=None):
output_args = arguments.LoggingGroup(
parser,
loggers=loggers,
help='{} amount of logging and `git rebase` information displayed'
)
output_args.add_argument(
'--identifier', '--no-identifier',
help='Add in the identifier to commit messages, true by default',
action=arguments.NoAction,
dest='identifier',
default=True,
)
output_args.add_argument(
'--remote',
help='Compare against a different remote',
dest='remote',
default='origin',
)
output_args.add_argument(
'--number', '-n', type=int,
help='Number of commits to be canonicalized, regardless of the state of the remote',
dest='number',
default=None,
)
@classmethod
def main(cls, args, repository, identifier_template=None, **kwargs):
if not repository.path:
sys.stderr.write('Cannot canonicalize commits on a remote repository\n')
return 1
if not repository.is_git:
sys.stderr.write('Commits can only be canonicalized on a Git repository\n')
return 1
branch = repository.branch
if not branch:
sys.stderr.write('Failed to determine current branch\n')
return -1
num_commits_to_canonicalize = args.number
if not num_commits_to_canonicalize:
result = run([
repository.executable(), 'rev-list',
'--count', '--no-merges',
'{remote}/{branch}..{branch}'.format(remote=args.remote, branch=branch),
], capture_output=True, cwd=repository.root_path)
if result.returncode:
sys.stderr.write('Failed to find local commits\n')
return -1
num_commits_to_canonicalize = int(result.stdout.rstrip())
if num_commits_to_canonicalize <= 0:
print('No local commits to be edited')
return 0
log.warning('{} to be editted...'.format(string_utils.pluralize(num_commits_to_canonicalize, 'commit')))
base = repository.find('{}~{}'.format(branch, num_commits_to_canonicalize))
log.info('Base commit is {} (ref {})'.format(base, base.hash))
log.debug('Saving contributors to temp file to be picked up by child processes')
contributors = os.path.join(tempfile.gettempdir(), '{}-contributors.json'.format(os.getpid()))
try:
with open(contributors, 'w') as file:
repository.contributors.save(file)
message_filter = [
'--msg-filter',
"{} {} '{}'".format(
sys.executable,
os.path.join(os.path.dirname(__file__), 'message.py'),
identifier_template or 'Identifier: {}',
),
] if args.identifier else []
with open(os.devnull, 'w') as devnull:
subprocess.check_call([
repository.executable(), 'filter-branch', '-f',
'--env-filter', '''{overwrite_message}
committerOutput=$({python} {committer_py} {contributor_json})
KEY=''
VALUE=''
for word in $committerOutput; do
if [[ $word == GIT_* ]] ; then
if [[ $KEY == GIT_* ]] ; then
{setting_message}
printf -v $KEY "${{VALUE::$((${{#VALUE}} - 1))}}"
KEY=''
VALUE=''
fi
fi
if [[ "$KEY" == "" ]] ; then
KEY="$word"
else
VALUE="$VALUE$word "
fi
done
if [[ $KEY == GIT_* ]] ; then
{setting_message}
printf -v $KEY "${{VALUE::$((${{#VALUE}} - 1))}}"
fi'''.format(
overwrite_message='' if log.level > logging.INFO else 'echo "Overwriting $GIT_COMMIT"',
python=sys.executable,
committer_py=os.path.join(os.path.dirname(__file__), 'committer.py'),
contributor_json=contributors,
setting_message='' if log.level > logging.DEBUG else 'echo " $KEY=$VALUE"',
),
] + message_filter + ['{}...{}'.format(branch, base.hash)],
cwd=repository.root_path,
env={'FILTER_BRANCH_SQUELCH_WARNING': '1', 'PYTHONPATH': ':'.join(sys.path)},
stdout=devnull if log.level > logging.WARNING else None,
stderr=devnull if log.level > logging.WARNING else None,
)
except subprocess.CalledProcessError:
sys.stderr.write('Failed to modify local commit messages\n')
return -1
finally:
os.remove(contributors)
print('{} successfully canonicalized!'.format(string_utils.pluralize(num_commits_to_canonicalize, 'commit')))
return 0
| [
"jbedard@apple.com"
] | jbedard@apple.com |
c7339fef2a47d86a6fbcf65ffa3761ad4a3d38bd | 0e8dd5901b1f98934c44a85b133eb7ca6f44b4b9 | /osr2mp4/ImageProcess/PrepareFrames/RankingScreens/ModIcons.py | c87a50b7cd88872d95e0d5011ce4159e07f419f2 | [] | no_license | Hazuki-san/osr2mp4-core | dbd2f4d44a3d0e90974214c97b434dcbb2eedd18 | 83dc5c47bc73dcb0b4d4b6a5ae1924771c13c623 | refs/heads/master | 2022-11-24T13:41:15.703261 | 2020-07-03T14:00:54 | 2020-07-03T14:00:54 | 279,099,127 | 1 | 0 | null | 2020-07-12T16:02:35 | 2020-07-12T16:02:34 | null | UTF-8 | Python | false | false | 676 | py | from osrparse.enums import Mod
from ...PrepareFrames.YImage import YImage
selectionmod = "selection-mod-"
def prepare_modicons(scale, settings):
modnames = {
Mod.Perfect: "perfect",
Mod.Autopilot: "pilot",
Mod.Relax: "relax",
Mod.SpunOut: "spunout",
Mod.Flashlight: "flashlight",
Mod.Hidden: "hidden",
Mod.Nightcore: "nightcore",
Mod.DoubleTime: "doubletime",
Mod.SuddenDeath: "suddendeath",
Mod.HardRock: "hardrock",
Mod.HalfTime: "halftime",
Mod.NoFail: "nofail",
Mod.Easy: "easy",
}
modframes = {}
for mod in modnames:
filename = selectionmod + modnames[mod]
modframes[mod] = YImage(filename, settings, scale).img
return modframes
| [
"snkraishin87@gmail.com"
] | snkraishin87@gmail.com |
1074bb30ddb6ffd71876e31fdc25fe977ac16661 | 1a04e02811c844ecf53cc041b104667e5c987a09 | /vgrabber/model/grade.py | 4e1a3f467bfe686db281ef1013fcefb6f3d90834 | [] | no_license | janjanech/vzdelavanieGui | dff17add6e6946063597d4c1eba5d6d76b6f5374 | b2015f41f7cb1be1ecccf1c4778a91f43f8fba12 | refs/heads/master | 2021-10-24T16:21:24.911817 | 2019-01-15T17:03:49 | 2019-01-15T17:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | from enum import Enum, auto
from .files import FileList
from .finalexam import FinalExam
class Grade(Enum):
A = auto()
B = auto()
C = auto()
D = auto()
E = auto()
FX = auto()
class StudentGrade:
final_exam: FinalExam
grade: Grade
points: float
files: FileList
def __init__(self, subject, student, final_exam, grade):
self.__subject = subject
self.final_exam = final_exam
self.grade = grade
self.points = None
self.files = FileList()
self.student = student
def __str__(self):
return "<Grade {0} for final exam at {1}>".format(self.grade.name, self.final_exam.date_time.isoformat())
def clear_files(self):
self.files.clear()
| [
"janik@janik.ws"
] | janik@janik.ws |
0a466df321d2357b667e78d7b6f0c6b7799c7321 | 8c57a6e0f607fc5b0a1d601e4fa5d8e621d73dcc | /Sorting_algorithms/benchmark_sorting.py | 6d248cbcf52daef95addfe19a1415d699e8c6193 | [] | no_license | anoubhav/Data-Structures-and-Algorithms | eb3b0edd7df64e809bfadf41a86f3bf177965cae | d99bac42a86601570255bae85590fc2e485960fc | refs/heads/master | 2021-07-15T07:05:42.034648 | 2020-05-27T15:33:43 | 2020-05-27T15:33:43 | 144,583,921 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | from selection_sort import selection_sort
from insertion_sort_swapping import insertion_sort_swap
from insertion_sort_assignment import insertion_sort_assignment
from bubble_sort import bubble_sort
from merge_sort import merge_sort
from quicksort3 import quicksort3
from time import clock
import random
def create_array(size = 2000, max_num = 1000):
""" Returns random array of given size and elements upto max_num
(int, int) -> (list) """
return [random.randint(0, max_num) for _ in range(size)]
def benchmark(n = [10, 100, 1000, 5000, 10000]):
""" Benchmark the 6 sorting algorithms """
times = {'bubble':[], 'selection':[], 'merge':[], 'quicksort3':[], 'insertion_swap':[], 'insertion_ass':[]}
for size in n:
a = create_array(size = size, max_num = 10*size)
t0 = clock()
bubble_sort(a)
t1 = clock()
times['bubble'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
selection_sort(a)
t1 = clock()
times['selection'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
merge_sort(a)
t1 = clock()
times['merge'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
insertion_sort_swap(a)
t1 = clock()
times['insertion_swap'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
insertion_sort_assignment(a)
t1 = clock()
times['insertion_ass'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
quicksort3(a, 0, size)
t1 = clock()
times['quicksort3'].append(t1-t0)
print(98*'_')
print("n\tBubble\t Insertion(s)\t\tInsertion(a)\t Merge\tQuicksort3\tSelection")
print(98*'_')
for i, size in enumerate(n):
print("%d\t%5.4f\t %5.4f\t\t %5.4f\t %5.4f\t %5.4f\t %5.4f"%(size, times['bubble'][i], times['insertion_swap'][i], times['insertion_ass'][i], times['merge'][i], times['quicksort3'][i], times['selection'][i]))
benchmark(n = [10, 100])
| [
"anoubhav.agarwaal@gmail.com"
] | anoubhav.agarwaal@gmail.com |
a184a13a43f1725ecba70739affc5a1f2e1640e3 | e58c6f5ae956fe409c475e2745526c4c4451e509 | /TestCode/Spiders/scrapytest/logo/logo/settings.py | d7465747e8870ed7cb1f27e7cb0f825f369d7fee | [] | no_license | pangxie1987/uiautomator2 | 6d67dd3beeaba5ab3efa85bf6b8eabcad70b17b8 | 9a818e3b9a68ba4006ec393d5ec095ee2d10572d | refs/heads/master | 2022-11-22T17:05:00.580781 | 2021-03-31T05:17:06 | 2021-03-31T05:17:06 | 216,848,204 | 2 | 2 | null | 2022-11-22T03:17:30 | 2019-10-22T15:31:04 | Python | UTF-8 | Python | false | false | 3,404 | py | # -*- coding: utf-8 -*-
# Scrapy settings for logo project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# BOT_NAME = 'logo'
# SPIDER_MODULES = ['logo.spiders']
# NEWSPIDER_MODULE = 'logo.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'logo (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'logo.middlewares.LogoSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'logo.middlewares.LogoDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'logo.pipelines.LogoPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
import os
BOT_NAME = 'logo'
SPIDER_MODULES = ['logo.spiders']
NEWSPIDER_MODULE = 'logo.spiders'
ITEM_PIPELINES={
# 'sucai.pipelines.SucaiPipeline':1
'logo.pipelines.JsonWithEncodingPipeline':2,
'logo.pipelines.DownloadImagesPipeline':1
}
path = os.path.dirname(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(path, 'picture') | [
"lpb.waln@outlook.com"
] | lpb.waln@outlook.com |
6b086af83c2477052676f8a6f31b94fa6ff34d25 | 5d0b6d45c23337d5d074c62081445e9963b92ba8 | /src/component_parser/ranges.py | c4ea461f747c57c8eb0212e7df4e8c81ae0fb1c3 | [
"MIT"
] | permissive | ghedwards/sublimetext-cfml | f944fb8f8c35b6acca0c4d0fdc8cec4e726442fd | 6b0ef8a325a21f0392b79346a5dd47b7c0d58f30 | refs/heads/master | 2021-08-26T07:06:57.033755 | 2017-11-13T17:49:47 | 2017-11-13T17:49:47 | 111,625,801 | 0 | 0 | null | 2017-11-22T02:21:35 | 2017-11-22T02:21:34 | null | UTF-8 | Python | false | false | 5,671 | py | import re
import collections
RangeDefinition = collections.namedtuple('RangeDefinition', ['start', 'end', 'child_ranges', 'pop'])
BASE_RANGES = [
'comma',
'semicolon',
'curly_brackets',
'line_comment',
'multiline_comment',
'parentheses',
'square_brackets',
'string_double',
'string_single',
'tag_comment'
]
NON_SCRIPT_RANGES = [
'line_comment',
'multiline_comment',
'string_double',
'string_single',
'tag_comment'
]
RangeDefinitions = {
'attributes': RangeDefinition(r'(?=.)', r'\{', BASE_RANGES, 'first'),
'cfscript': RangeDefinition(r'(?=.)', r'\Z', BASE_RANGES, 'first'),
'comma': RangeDefinition(r',', r'(?=.)', [], 'first'),
'curly_brackets':RangeDefinition( r'\{', r'\}', BASE_RANGES, 'first'),
'escaped_double_quote': RangeDefinition(r'""', r'(?=.)', [], 'first'),
'escaped_hash': RangeDefinition(r'##', r'(?=.)', [], 'first'),
'escaped_single_quote': RangeDefinition(r"''", r'(?=.)', [], 'first'),
'hash': RangeDefinition(r'#', r'#', BASE_RANGES, 'first'),
'line_comment': RangeDefinition(r'//', r'\n', [], 'first'),
'multiline_comment': RangeDefinition(r'/\*', r'\*/', [], 'first'),
'non_script': RangeDefinition(r'(?=.)', r'\Z', NON_SCRIPT_RANGES, 'first'),
'parentheses': RangeDefinition(r'\(', r'\)', BASE_RANGES, 'first'),
'semicolon': RangeDefinition(r';', r'(?=.)', [], 'first'),
'square_brackets': RangeDefinition(r'\[', r'\]', BASE_RANGES, 'first'),
'string_double': RangeDefinition(r'"', r'"', ['escaped_hash', 'hash', 'escaped_double_quote'], 'last'),
'string_single': RangeDefinition(r"'", r"'", ['escaped_hash', 'hash', 'escaped_single_quote'], 'last'),
'tag_comment': RangeDefinition(r'<!---', r'--->', [], 'first'),
}
RangeRegex = {}
for name, rd in RangeDefinitions.items():
RangeRegex[name] = {
'start': re.compile(rd.start, re.S)
}
patterns = []
for cr in rd.child_ranges:
crd = RangeDefinitions[cr]
patterns.append((cr, crd.start))
if rd.pop == 'first':
patterns.insert(0, ('pop', rd.end))
else:
patterns.append(('pop', rd.end))
RangeRegex[name]['end'] = re.compile('|'.join('(?P<{}>{})'.format(*p) for p in patterns), re.S)
class Range():
def __init__(self, name, start=None, end=None):
self.name = name
self.start = start
self.end = end
self.parent = None
self.children = []
def add_child(self, child_range):
child_range.parent = self
self.children.append(child_range)
def depth(self):
depth = 0
cr = self
while cr.parent:
cr = cr.parent
depth += 1
return depth
def is_in_range(self, pt, names=None):
if names is None:
names = RangeDefinitions[self.name].child_ranges
if self.name in names and self.start <= pt and self.end > pt:
return True
for child_range in self.children:
if child_range.is_in_range(pt, names):
return True
return False
def range_at_pt(self, pt):
if self.start > pt or self.end < pt:
return None
if self.start == pt:
return self
for child_range in self.children:
r = child_range.range_at_pt(pt)
if r:
return r
return None
def deepest_range(self, pt):
if self.start > pt or self.end < pt:
return None
for child_range in self.children:
dr = child_range.deepest_range(pt)
if dr:
return dr
return self
def next_child_range(self, pt, names=None):
if self.start > pt or self.end < pt:
return None
for child_range in self.children:
if child_range.start >= pt:
if names is None or child_range.name in names:
return child_range
return None
def __repr__(self):
txt = '(' + self.name + ': '
txt += 'start=' + str(self.start)
txt += ', end=' + str(self.end)
if len(self.children) > 0:
txt += ', children=['
for c in self.children:
child_txt = str(c).replace('\n', '\n ')
txt += '\n ' + child_txt
txt += '\n]'
txt += ')'
return txt
class RangeWalker():
def __init__(self, src_txt, pos=0, name='cfscript'):
self.src_txt = src_txt
self.pos = pos
self.name = name
def walk(self):
opening_match = RangeRegex[self.name]['start'].match(self.src_txt, self.pos)
if opening_match is None:
return None
range_to_walk = Range(self.name, self.pos)
pos = opening_match.end()
current_range = range_to_walk
while current_range:
next_match = RangeRegex[current_range.name]['end'].search(self.src_txt, pos)
if next_match is None:
current_range.end = len(self.src_txt)
while current_range.parent:
current_range.parent.end = len(self.src_txt)
current_range = current_range.parent
break
name = next_match.lastgroup
pos = next_match.end()
if name == 'pop':
current_range.end = pos
current_range = current_range.parent
continue
child_range = Range(name, next_match.start(), next_match.end())
current_range.add_child(child_range)
current_range = child_range
return range_to_walk
| [
"jcberquist@outlook.com"
] | jcberquist@outlook.com |
cc5dca56154fe17edb6689970d5221ff59f86751 | 7ef5bb39938e669b5571a097f01d96ee53458ad6 | /maximal_rectangle/solution.py | d7dbfe832161fdefa7ae2748e9dfb64f82dc6ddc | [
"BSD-2-Clause"
] | permissive | mahimadubey/leetcode-python | 61cd135515b26644197b4736a92a53bb1a5870a6 | 38acc65fa4315f86acb62874ca488620c5d77e17 | refs/heads/master | 2020-08-29T09:27:45.232412 | 2019-10-28T08:06:52 | 2019-10-28T08:06:52 | 217,993,547 | 0 | 0 | BSD-2-Clause | 2019-10-28T07:55:38 | 2019-10-28T07:55:38 | null | UTF-8 | Python | false | false | 1,258 | py | class Solution:
# @param matrix, a list of lists of 1 length string
# @return an integer
def maximalRectangle(self, matrix):
# Make a list of heights
if not matrix:
return 0
n = len(matrix)
if not matrix[0]:
return 0
m = len(matrix[0])
hist = [[0 for j in range(m)] for i in range(n)]
for i in range(n):
for j in range(m):
if i == 0:
hist[i][j] = int(matrix[i][j])
else:
if matrix[i][j] == '1':
hist[i][j] = 1 + hist[i - 1][j]
res = 0
for row in hist:
res = max(res, self.max_hist_rect(row))
return res
def max_hist_rect(self, heights):
if not heights:
return 0
n = len(heights)
max_area = heights[0]
stack = []
for i in range(n + 1):
while stack and (i == n or heights[stack[-1]] > heights[i]):
h = heights[stack.pop()]
if stack:
w = i - stack[-1] - 1
else:
w = i
max_area = max(max_area, h * w)
stack.append(i)
return max_area
| [
"shichao.an@nyu.edu"
] | shichao.an@nyu.edu |
9c4c802cf858874d37d665db3ace105775e64f83 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/INTERVIEW-PREP-COMPLETE/Leetcode/215.py | 9ea3591e57858d86cf92438005c6cef00ab4ab09 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 976 | py | import heapq
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
def quickSelect(low, high, k):
i = low
for j in range(low, high):
if nums[j] <= nums[high]:
nums[i], nums[j] = nums[j], nums[i]
i += 1
nums[i], nums[high] = nums[high], nums[i]
count = high - i + 1
if count == k:
return nums[i]
if count > k:
return quickSelect(i + 1, high, k)
else:
return quickSelect(low, i - 1, k - count)
return quickSelect(0, len(nums) - 1, k)
# Time complexity: O(nlogn)
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
q = []
for i, n in enumerate(nums):
heapq.heappush(q, (-n, i))
result = None
for _ in range(k):
result = -heapq.heappop(q)[0]
return result
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
91935e9f77a4d8bc3c373d76ca627484057b389c | 53c3462ff265b6273f4a4fa17f6d59688f69def0 | /剑指offer/41_FindContinuousSequence.py | d3586055cdb08c6798a50f5d7375e5ac92d8c85a | [] | no_license | 17764591637/jianzhi_offer | b76e69a3ecb2174676da2c8d8d3372a3fc27b5c4 | 27e420ee302d5ab6512ecfdb8d469b043fb7102d | refs/heads/master | 2023-08-03T01:32:51.588472 | 2019-10-13T07:56:21 | 2019-10-13T07:56:21 | 197,692,548 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | '''
他在想究竟有多少种连续的正数序列的和为100(至少包括两个数)。
没多久,他就得到另一组连续正数和为100的序列:18,19,20,21,22。
现在把问题交给你,你能不能也很快的找出所有和为S的连续正数序列?
'''
class Solution:
def FindContinuousSequence(self, tsum):
# write code here
res = []
for i in range(1,int(tsum/2)+1):
for j in range(i,int(tsum/2)+2):
sum_ = (j+i)*(j-i+1)/2
if sum_>tsum:
break
elif sum_ == tsum:
res.append(list(range(i,j+1)))
return res
s = Solution()
res = s.FindContinuousSequence(100)
print(res) | [
"17764591637@163.com"
] | 17764591637@163.com |
4c55379b54e9cc451df5d9f8c31bbba8c65872df | e72265a8f523cd76e75ac3832e3236917746c96a | /dawp2020/hy-data-analysis-with-python-2020/part01-e06_triple_square/src/triple_square.py | 3e16029c04371d878c0a48f86024b73b5e491f6b | [
"MIT"
] | permissive | ored95/data-analysis-course | 9bde67f489a16b94f376427331a24efc330877ed | f61a953769b8e7c502f2bec28158ec1bd344f72a | refs/heads/main | 2023-04-07T05:19:22.044343 | 2021-03-30T10:25:52 | 2021-03-30T10:25:52 | 346,290,289 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | #!/usr/bin/env python3
def triple(x):
return 3 * x
def square(x):
return x ** 2
def main():
for x in range(1, 11):
x2 = square(x)
x3 = triple(x)
if x > 3:
break
print("triple({0})=={1} square({0})=={2}".format(x, x3, x2))
if __name__ == "__main__":
main()
| [
"stepup.ored@gmail.com"
] | stepup.ored@gmail.com |
7572a60f3a8fa50ee798286f14595c2f7f470535 | 99d7765da35926279c4a4fd7313d55908786f4b8 | /1/3/13458/13458.py | 6ec1bfd94fbbe36f57727f20730dcf70cbc1c8e3 | [
"MIT"
] | permissive | chr0m3/boj-codes | b8294c5d4d10a5af25b5276427bccd74d0866ef5 | d71d0a22d0a3ae62c225f382442461275f56fe8f | refs/heads/master | 2021-08-16T15:24:57.733088 | 2021-03-22T13:13:10 | 2021-03-22T13:13:10 | 91,523,558 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | room = int(input())
people = list(input().split())
a, b = map(int, input().split())
sum = 0
for i in people:
if int(i) - a <= 0:
sum += 1
continue
else:
sum += 1
if (int(i) - a) % b:
sum += int((int(i) - a) / b) + 1
else:
sum += int((int(i) - a) / b)
print(sum)
| [
"chr0m3@users.noreply.github.com"
] | chr0m3@users.noreply.github.com |
bbd400842a93d924ddbd60b272e0bebefb7c0e98 | 12972f4d9e7de2c38e79ae911f2e7b125965cac9 | /virtual/lib/python3.6/site-packages/pip/_internal/commands/show.py | 7d714f74c91ba4f1811ca8d37ca6b73ce58d95b4 | [
"MIT"
] | permissive | Michellemukami/pitch | b33d0de81cc2a0dfe70ddc1e91affc88af63ff2b | aebb7736d18766343a5a295de0782aa175245c35 | refs/heads/master | 2022-10-22T03:55:33.364628 | 2019-08-07T10:15:10 | 2019-08-07T10:15:10 | 200,673,234 | 0 | 0 | null | 2022-09-16T18:07:53 | 2019-08-05T14:38:26 | Python | UTF-8 | Python | false | false | 6,261 | py | from __future__ import absolute_import
import logging
import os
from email.parser import FeedParser
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""
Show information about one or more installed packages.
The output is in RFC-compliant mail header format.
"""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
ignore_require_venv = True
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
package['installer'] = line.strip()
break
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'user', 'user-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser cannot deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_files=False, verbose=False):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
logger.info("---")
name = dist.get('name', '')
required_by = [
pkg.project_name for pkg in pkg_resources.working_set
if name in [required.name for required in pkg.requires()]
]
logger.info("Name: %s", name)
logger.info("Version: %s", dist.get('version', ''))
logger.info("Summary: %s", dist.get('summary', ''))
logger.info("Home-page: %s", dist.get('home-page', ''))
logger.info("user: %s", dist.get('user', ''))
logger.info("user-email: %s", dist.get('user-email', ''))
logger.info("License: %s", dist.get('license', ''))
logger.info("Location: %s", dist.get('location', ''))
logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
logger.info("Required-by: %s", ', '.join(required_by))
if verbose:
logger.info("Metadata-Version: %s",
dist.get('metadata-version', ''))
logger.info("Installer: %s", dist.get('installer', ''))
logger.info("Classifiers:")
for classifier in dist.get('classifiers', []):
logger.info(" %s", classifier)
logger.info("Entry-points:")
for entry in dist.get('entry_points', []):
logger.info(" %s", entry.strip())
if list_files:
logger.info("Files:")
for line in dist.get('files', []):
logger.info(" %s", line.strip())
if "files" not in dist:
logger.info("Cannot locate installed-files.txt")
return results_printed
| [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.