blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4ae1ad21b6595e4c39a070d0a503aa110afb1699 | ef80f50dcd8139754daa8049a7625d5d83f4c9bc | /src/python/dxl/function/qlambda.py | 8f9e3bc610e0d7571da566c4c9329d24d73122eb | [
"Apache-2.0"
] | permissive | Hong-Xiang/dxfunction | 4db2693a4818d3d7019518e242f0d013600c757a | d12b26453959c0b163175e78b8c593ac9b854133 | refs/heads/master | 2020-03-22T11:55:14.895089 | 2018-08-17T08:31:06 | 2018-08-17T08:31:06 | 140,004,555 | 1 | 0 | Apache-2.0 | 2018-07-30T07:22:39 | 2018-07-06T15:57:44 | Python | UTF-8 | Python | false | false | 701 | py | from dxl.data import Function
from .base import identity
__all__ = ['x']
class QuickLambda(Function):
def __eq__(self, v):
return QuickLambda(lambda x: self.__call__(x) == v)
def __getattr__(self, *args, **kwargs):
return QuickLambda(lambda x: getattr(self.__call__(x), *args, **kwargs))
def __getitem__(self, *args, **kwargs):
return QuickLambda(lambda x: self.__call__(x).__getitem__(*args, **kwargs))
def __add__(self, v):
return QuickLambda(lambda x: self.__call__(x) + v)
def __sub__(self, v):
return QuickLambda(lambda x: self.__call__(x) - v)
def __hash__(self):
return hash(id(self))
x = QuickLambda(identity)
| [
"hx.hongxiang@gmail.com"
] | hx.hongxiang@gmail.com |
3b6b5fd21fc438a6a6c9b24df404072d992c1855 | 650ac1c6e86d8a07a0536c8066f04e2bdd96fdf6 | /tests/parser/functions/test_default_function.py | b91e6c033661d3875f94819cbe1f45689d180608 | [
"MIT"
] | permissive | griffind/vyper | 71e12426aff3e92de08a47166f7abb6c1f53991e | 1dd7d1f63face0eae1a965fb02dcbfdc9a26e3b9 | refs/heads/master | 2020-03-22T21:10:35.636914 | 2018-07-11T18:00:34 | 2018-07-11T18:00:34 | 140,663,273 | 0 | 0 | MIT | 2018-07-12T05:02:54 | 2018-07-12T05:02:54 | null | UTF-8 | Python | false | false | 1,963 | py |
def test_throw_on_sending(w3, assert_tx_failed, get_contract_with_gas_estimation):
code = """
x: public(int128)
@public
def __init__():
self.x = 123
"""
c = get_contract_with_gas_estimation(code)
assert c.x() == 123
assert w3.eth.getBalance(c.address) == 0
assert_tx_failed(lambda: w3.eth.sendTransaction({'to': c.address, 'value': w3.toWei(0.1, 'ether')}))
assert w3.eth.getBalance(c.address) == 0
def test_basic_default(w3, get_logs, get_contract_with_gas_estimation):
code = """
Sent: event({sender: indexed(address)})
@public
@payable
def __default__():
log.Sent(msg.sender)
"""
c = get_contract_with_gas_estimation(code)
logs = get_logs(w3.eth.sendTransaction({'to': c.address, 'value': 10**17}), c, 'Sent')
assert w3.eth.accounts[0] == logs[0].args.sender
assert w3.eth.getBalance(c.address) == w3.toWei(0.1, 'ether')
def test_basic_default_not_payable(w3, assert_tx_failed, get_contract_with_gas_estimation):
code = """
Sent: event({sender: indexed(address)})
@public
def __default__():
log.Sent(msg.sender)
"""
c = get_contract_with_gas_estimation(code)
assert_tx_failed(lambda: w3.eth.sendTransaction({'to': c.address, 'value': 10**17}))
def test_multi_arg_default(assert_compile_failed, get_contract_with_gas_estimation):
code = """
@payable
@public
def __default__(arg1: int128):
pass
"""
assert_compile_failed(lambda: get_contract_with_gas_estimation(code))
def test_always_public(assert_compile_failed, get_contract_with_gas_estimation):
code = """
@private
def __default__():
pass
"""
assert_compile_failed(lambda: get_contract_with_gas_estimation(code))
def test_always_public_2(assert_compile_failed, get_contract_with_gas_estimation):
code = """
Sent: event({sender: indexed(address)})
def __default__():
log.Sent(msg.sender)
"""
assert_compile_failed(lambda: get_contract_with_gas_estimation(code))
| [
"jacques@dilectum.co.za"
] | jacques@dilectum.co.za |
4883f437c4aee7cab8033d2b08e2385bb2bdd4ca | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/111/usersdata/197/63855/submittedfiles/av2_p3_m2.py | 41d3ec8899e55808a1277ff95ab57889e3544eaa | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # -*- coding: utf-8 -*-
import numpy as np
n=int(input('Digite o numero de linhas e colunas:'))
a=np.zeros((n,n))
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=int(input('Digite o valor de um numero da matriz:'))
def somalinha(b):
soma=0
lista=[]
for i in range (0,b.shape[0],1):
soma=0
for j in range (0,b.shape[1],1):
soma=soma+b[i,j]
lista.append(soma)
return (lista)
SOMAlinha=somalinha(a)
def somacoluna(b):
soma=0
lista=[]
for j in range (0,b.shape[1],1):
soma=0
for i in range (0,b.shape[0],1):
soma=soma+b[i,j]
lista.append(soma)
return (lista)
SOMAcoluna=somacoluna(a)
def diferente (lista):
for i in range (0,len(lista),1):
cont=0
for j in range (0,len(lista),1):
if lista[i]==lista[j]:
cont=cont+1
if cont==1:
return(i)
l=diferente(SOMAlinha)
c=diferente(SOMAcoluna)
print(a[l,c])
print(l)
print(c)
print(SOMAlinha)
print(SOMAcoluna)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2c24eb31e81cf54c6434ddbcf62d96d6ed95e92b | 70f5f279e051360310f95be895320d8fa6cd8d93 | /extraPackages/matplotlib-3.0.2/examples/axes_grid1/simple_axisline4.py | 91b76cf3e95678587c72ce7e196302182df033f5 | [
"BSD-3-Clause"
] | permissive | spacetime314/python3_ios | 4b16ab3e81c31213b3db1e1eb00230621b0a7dc8 | e149f1bc2e50046c8810f83dae7739a8dea939ee | refs/heads/master | 2020-05-09T20:39:14.980041 | 2019-04-08T15:07:53 | 2019-04-08T15:07:53 | 181,415,024 | 2 | 0 | BSD-3-Clause | 2019-04-15T05:00:14 | 2019-04-15T05:00:12 | null | UTF-8 | Python | false | false | 601 | py | """
================
Simple Axisline4
================
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import numpy as np
ax = host_subplot(111)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax2 = ax.twin() # ax2 is responsible for "top" axis and "right" axis
ax2.set_xticks([0., .5*np.pi, np.pi, 1.5*np.pi, 2*np.pi])
ax2.set_xticklabels(["$0$", r"$\frac{1}{2}\pi$",
r"$\pi$", r"$\frac{3}{2}\pi$", r"$2\pi$"])
ax2.axis["right"].major_ticklabels.set_visible(False)
ax2.axis["top"].major_ticklabels.set_visible(True)
plt.show()
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
a89db7d930c80d0aa3e98453c6e2dbdc6e7958dc | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python-flask/generated/openapi_server/models/com_day_cq_wcm_scripting_impl_bvp_manager_properties.py | 227ecb9ebc7b2c86c97f73b34ad77b6d5a907a79 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 2,958 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.config_node_property_array import ConfigNodePropertyArray # noqa: F401,E501
from openapi_server import util
class ComDayCqWcmScriptingImplBVPManagerProperties(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, com_day_cq_wcm_scripting_bvp_script_engines: ConfigNodePropertyArray=None): # noqa: E501
"""ComDayCqWcmScriptingImplBVPManagerProperties - a model defined in OpenAPI
:param com_day_cq_wcm_scripting_bvp_script_engines: The com_day_cq_wcm_scripting_bvp_script_engines of this ComDayCqWcmScriptingImplBVPManagerProperties. # noqa: E501
:type com_day_cq_wcm_scripting_bvp_script_engines: ConfigNodePropertyArray
"""
self.openapi_types = {
'com_day_cq_wcm_scripting_bvp_script_engines': ConfigNodePropertyArray
}
self.attribute_map = {
'com_day_cq_wcm_scripting_bvp_script_engines': 'com.day.cq.wcm.scripting.bvp.script.engines'
}
self._com_day_cq_wcm_scripting_bvp_script_engines = com_day_cq_wcm_scripting_bvp_script_engines
@classmethod
def from_dict(cls, dikt) -> 'ComDayCqWcmScriptingImplBVPManagerProperties':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The comDayCqWcmScriptingImplBVPManagerProperties of this ComDayCqWcmScriptingImplBVPManagerProperties. # noqa: E501
:rtype: ComDayCqWcmScriptingImplBVPManagerProperties
"""
return util.deserialize_model(dikt, cls)
@property
def com_day_cq_wcm_scripting_bvp_script_engines(self) -> ConfigNodePropertyArray:
"""Gets the com_day_cq_wcm_scripting_bvp_script_engines of this ComDayCqWcmScriptingImplBVPManagerProperties.
:return: The com_day_cq_wcm_scripting_bvp_script_engines of this ComDayCqWcmScriptingImplBVPManagerProperties.
:rtype: ConfigNodePropertyArray
"""
return self._com_day_cq_wcm_scripting_bvp_script_engines
@com_day_cq_wcm_scripting_bvp_script_engines.setter
def com_day_cq_wcm_scripting_bvp_script_engines(self, com_day_cq_wcm_scripting_bvp_script_engines: ConfigNodePropertyArray):
"""Sets the com_day_cq_wcm_scripting_bvp_script_engines of this ComDayCqWcmScriptingImplBVPManagerProperties.
:param com_day_cq_wcm_scripting_bvp_script_engines: The com_day_cq_wcm_scripting_bvp_script_engines of this ComDayCqWcmScriptingImplBVPManagerProperties.
:type com_day_cq_wcm_scripting_bvp_script_engines: ConfigNodePropertyArray
"""
self._com_day_cq_wcm_scripting_bvp_script_engines = com_day_cq_wcm_scripting_bvp_script_engines
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
4579d83d688efb731d165c2fb03eb50f4f93e9b5 | 6970f0edbea36c9ba9f77db1ff5335b4ff1d27a5 | /Question16.py | ce693b825c56616f940fcbe9b790a55be9eb4f55 | [] | no_license | PrakharKopergaonkar/LeetCode-June-Challenge | 2f95eb87f074354cda434a01b4322140046056c7 | 9d9077e2550a46f5e3dc0476145abf74f18804b9 | refs/heads/master | 2022-11-11T20:40:22.385018 | 2020-06-30T11:46:19 | 2020-06-30T11:46:19 | 268,437,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | #Question 16: Validate IP Address
class Solution:
def validIPAddress(self, IP: str) -> str:
if("." in IP):
ipv4 = IP.split(".")
if(len(ipv4)!=4):
return "Neither"
for i in ipv4:
try:
if(len(i)==0 ):
return "Neither"
elif(i[0]=="0" and len(i)!=1):
return "Neither"
l = int(i)
if(l>=256 or l<0):
return "Neither"
elif(l==0 and i[0]=="-"):
return "Neither"
except ValueError:
return "Neither"
return "IPv4"
elif(":" in IP):
ipv6 = IP.split(":")
if(len(ipv6)!=8):
return "Neither"
for i in ipv6:
if(i == ""):
return "Neither"
elif(len(i)>4):
return "Neither"
else:
for j in i:
try:
a = int(j)
except:
j1 = j.lower()
if(ord(j1)<97 or ord(j1)>102):
return "Neither"
return "IPv6"
else:
return "Neither"
| [
"pkopergaonkar@gmail.com"
] | pkopergaonkar@gmail.com |
d325d9b1edabf3a4b71ac6595f98232455385bd5 | ed79815a57eff597575d60649ac01fcf869ab17b | /src/lightning_app/core/constants.py | 6882598cab223babd7e2b01250ab7751b10e30ae | [
"Apache-2.0"
] | permissive | jeremyjordan/pytorch-lightning | 9a29f6d981de19d31e9a9be2d6ac2f05af849e7c | c4ddc068c90d9913b0de97a01446bf287710526c | refs/heads/master | 2023-01-11T07:47:38.047772 | 2023-01-03T22:07:52 | 2023-01-03T22:07:52 | 228,071,400 | 0 | 0 | null | 2019-12-14T18:40:15 | 2019-12-14T18:40:15 | null | UTF-8 | Python | false | false | 3,761 | py | import os
from pathlib import Path
from typing import Optional
import lightning_cloud.env
from lightning_app.utilities.port import _find_lit_app_port
def get_lightning_cloud_url() -> str:
# DO NOT CHANGE!
return os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai")
SUPPORTED_PRIMITIVE_TYPES = (type(None), str, int, float, bool)
STATE_UPDATE_TIMEOUT = 0.001
STATE_ACCUMULATE_WAIT = 0.15
# Duration in seconds of a moving average of a full flow execution
# beyond which an exception is raised.
FLOW_DURATION_THRESHOLD = 1.0
# Number of samples for the moving average of the duration of flow execution
FLOW_DURATION_SAMPLES = 5
APP_SERVER_HOST = os.getenv("LIGHTNING_APP_STATE_URL", "http://127.0.0.1")
APP_SERVER_IN_CLOUD = "http://lightningapp" in APP_SERVER_HOST
APP_SERVER_PORT = _find_lit_app_port(7501)
APP_STATE_MAX_SIZE_BYTES = 1024 * 1024 # 1 MB
WARNING_QUEUE_SIZE = 1000
# different flag because queue debug can be very noisy, and almost always not useful unless debugging the queue itself.
QUEUE_DEBUG_ENABLED = bool(int(os.getenv("LIGHTNING_QUEUE_DEBUG_ENABLED", "0")))
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = int(os.getenv("REDIS_PORT", 6379))
REDIS_PASSWORD = os.getenv("REDIS_PASSWORD", None)
REDIS_QUEUES_READ_DEFAULT_TIMEOUT = 0.005
HTTP_QUEUE_URL = os.getenv("LIGHTNING_HTTP_QUEUE_URL", "http://localhost:9801")
HTTP_QUEUE_REFRESH_INTERVAL = float(os.getenv("LIGHTNING_HTTP_QUEUE_REFRESH_INTERVAL", "1"))
HTTP_QUEUE_TOKEN = os.getenv("LIGHTNING_HTTP_QUEUE_TOKEN", None)
USER_ID = os.getenv("USER_ID", "1234")
FRONTEND_DIR = str(Path(__file__).parent.parent / "ui")
PACKAGE_LIGHTNING = os.getenv("PACKAGE_LIGHTNING", None)
CLOUD_UPLOAD_WARNING = int(os.getenv("CLOUD_UPLOAD_WARNING", "2"))
DISABLE_DEPENDENCY_CACHE = bool(int(os.getenv("DISABLE_DEPENDENCY_CACHE", "0")))
# Project under which the resources need to run in cloud. If this env is not set,
# cloud runner will try to get the default project from the cloud
LIGHTNING_CLOUD_PROJECT_ID = os.getenv("LIGHTNING_CLOUD_PROJECT_ID")
LIGHTNING_DIR = os.getenv("LIGHTNING_DIR", str(Path.home() / ".lightning"))
LIGHTNING_CREDENTIAL_PATH = os.getenv("LIGHTNING_CREDENTIAL_PATH", str(Path(LIGHTNING_DIR) / "credentials.json"))
DOT_IGNORE_FILENAME = ".lightningignore"
LIGHTNING_COMPONENT_PUBLIC_REGISTRY = "https://lightning.ai/v1/components"
LIGHTNING_APPS_PUBLIC_REGISTRY = "https://lightning.ai/v1/apps"
# EXPERIMENTAL: ENV VARIABLES TO ENABLE MULTIPLE WORKS IN THE SAME MACHINE
DEFAULT_NUMBER_OF_EXPOSED_PORTS = int(os.getenv("DEFAULT_NUMBER_OF_EXPOSED_PORTS", "50"))
ENABLE_MULTIPLE_WORKS_IN_NON_DEFAULT_CONTAINER = bool(
int(os.getenv("ENABLE_MULTIPLE_WORKS_IN_NON_DEFAULT_CONTAINER", "0"))
) # This isn't used in the cloud yet.
# env var trigger running setup commands in the app
ENABLE_APP_COMMENT_COMMAND_EXECUTION = bool(int(os.getenv("ENABLE_APP_COMMENT_COMMAND_EXECUTION", "0")))
DEBUG: bool = lightning_cloud.env.DEBUG
DEBUG_ENABLED = bool(int(os.getenv("LIGHTNING_DEBUG", "0")))
ENABLE_PULLING_STATE_ENDPOINT = bool(int(os.getenv("ENABLE_PULLING_STATE_ENDPOINT", "1")))
ENABLE_PUSHING_STATE_ENDPOINT = ENABLE_PULLING_STATE_ENDPOINT and bool(
int(os.getenv("ENABLE_PUSHING_STATE_ENDPOINT", "1"))
)
ENABLE_STATE_WEBSOCKET = bool(int(os.getenv("ENABLE_STATE_WEBSOCKET", "0")))
ENABLE_UPLOAD_ENDPOINT = bool(int(os.getenv("ENABLE_UPLOAD_ENDPOINT", "1")))
def enable_multiple_works_in_default_container() -> bool:
return bool(int(os.getenv("ENABLE_MULTIPLE_WORKS_IN_DEFAULT_CONTAINER", "0")))
def get_cloud_queue_type() -> Optional[str]:
return os.getenv("LIGHTNING_CLOUD_QUEUE_TYPE", None)
# Number of seconds to wait between filesystem checks when waiting for files in remote storage
REMOTE_STORAGE_WAIT = 0.5
| [
"noreply@github.com"
] | jeremyjordan.noreply@github.com |
0203ab2c728395ab43a8858d7ffce645e9707c25 | b7e52aeabebf7448e31723d406755809cac63099 | /source/grid_CM/SConstruct | 892eff24f21e6e80d598c22455af1f461dcc44ee | [
"BSD-3-Clause"
] | permissive | bucricket/projectMASviirs | df31af86e024499ff87d2c2b707e3b9d24813f7c | 705abc89505122351f0ef78e0edb950b7e3b7f48 | refs/heads/master | 2021-01-01T18:31:16.748864 | 2018-05-30T15:14:07 | 2018-05-30T15:14:07 | 98,354,619 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | #!python
import os
import platform
import subprocess
AddOption('--prefix',
dest='prefix',
type='string',
nargs=1,
action='store',
metavar='DIR',
help='installation prefix')
env = Environment(PREFIX = GetOption('prefix'))
prefix = os.environ.get('PREFIX')
base1 = os.path.abspath(os.path.join(prefix,os.pardir))
base = os.path.join(base1,'work')
sourcePath = os.path.join(base,'source')
binPath = os.path.join(prefix,'bin')
# Comment lines start with the # symbol
# The following sets up an Compile Environment Object with gfortran as the linker.
env = Environment(LINK='gfortran')
# The next line of code is an array of the source files names used in the program.
# The next line is the actual code that links the executable. env.Program is generates an executable.
grid_cloud_day = env.Program(target='grid_cloud_day', source= ['grid_cloud_day.f90'])
grid_cloud_night = env.Program(target='grid_cloud_night', source= ['grid_cloud_night.f90'])
agg_cloud = env.Program(target='agg_cloud', source= ['agg_cloud.f90'])
env.Install(binPath, [grid_cloud_day,grid_cloud_night,agg_cloud])
env.Alias('install', binPath)
| [
"bucricket@gmail.com"
] | bucricket@gmail.com | |
272b83cdcad7c074e2f07da09755ec60b9d52810 | 49e3663fea29ae7fabec581ebae1fda5361effcd | /accounts/urls.py | f12a9c88575ec2d7026f8a383e090da6e8939f7d | [] | no_license | ikonitas/old_causenaffect | 8fdabdc3108efa16046f6bdfa542f71d58a4e6eb | 145f7ae82d02e39bda17aad380dac0f190f1882c | refs/heads/master | 2021-05-28T08:46:49.663299 | 2014-02-17T21:42:36 | 2014-02-17T21:42:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
url(r'^login/$','accounts.views.mine_login',{'template_name':'accounts/login.html'},name="login"),
url(r'^logout/$','accounts.views.logout_view', name="auth_logout"),
url(r'^register/$','accounts.views.register',name="auth_register"),
url(r'^profile/$','accounts.views.profile',name="profile"),
url(r'^forget/$','accounts.views.forget',name="forget_password"),
)
| [
"ikonitas@gmail.com"
] | ikonitas@gmail.com |
c6de19bbba3fcb34a773fb67a7063eddcad1eadd | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/framework/store/db/PackedDatabase.pyi | 0a36192ecae8f0ad6e5fcb950a72631b28d642a6 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,632 | pyi | import db
import db.buffers
import generic.jar
import ghidra.framework.store.db
import ghidra.util.task
import java.io
import java.lang
class PackedDatabase(db.Database):
"""
PackedDatabase provides a packed form of Database
which compresses a single version into a file.
When opening a packed database, a PackedDBHandle is returned
after first expanding the file into a temporary Database.
"""
READ_ONLY_DIRECTORY_LOCK_FILE: unicode = u'.dbDirLock'
@staticmethod
def cleanupOldTempDatabases() -> None:
"""
Attempt to remove all old temporary databases.
Those still open by an existing process should
not be removed by the operating system.
"""
...
@overload
def delete(self) -> None:
"""
Deletes the storage file associated with this packed database.
This method should not be called while the database is open, if
it is an attempt will be made to close the handle.
@throws IOException
"""
...
@overload
@staticmethod
def delete(packedDbFile: java.io.File) -> None:
"""
Deletes the storage file associated with this packed database.
@throws IOException
"""
...
def dispose(self) -> None:
"""
Free resources consumed by this object.
If there is an associated database handle it will be closed.
"""
...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getContentType(self) -> unicode:
"""
Returns the user defined content type associated with this database.
"""
...
def getCurrentVersion(self) -> int:
"""
Returns the version number associated with the latest buffer file version.
"""
...
@overload
@staticmethod
def getPackedDatabase(packedDbFile: java.io.File, monitor: ghidra.util.task.TaskMonitor) -> ghidra.framework.store.db.PackedDatabase:
"""
Get a packed database which whose unpacking will be cached if possible
@param packedDbFile
@param monitor
@return packed database which corresponds to the specified packedDbFile
@throws IOException
@throws CancelledException
"""
...
@overload
@staticmethod
def getPackedDatabase(packedDbFile: generic.jar.ResourceFile, neverCache: bool, monitor: ghidra.util.task.TaskMonitor) -> ghidra.framework.store.db.PackedDatabase:
"""
Get a packed database whose unpacking may be cached if possible
provided doNotCache is false.
@param packedDbFile
@param neverCache if true unpacking will never be cache.
@param monitor
@return packed database which corresponds to the specified packedDbFile
@throws IOException
@throws CancelledException
"""
...
@overload
@staticmethod
def getPackedDatabase(packedDbFile: java.io.File, neverCache: bool, monitor: ghidra.util.task.TaskMonitor) -> ghidra.framework.store.db.PackedDatabase:
"""
Get a packed database whose unpacking may be cached if possible
provided doNotCache is false.
@param packedDbFile
@param neverCache if true unpacking will never be cache.
@param monitor
@return packed database which corresponds to the specified packedDbFile
@throws IOException
@throws CancelledException
"""
...
def getPackedFile(self) -> generic.jar.ResourceFile:
"""
Returns the storage file associated with this packed database.
"""
...
def hashCode(self) -> int: ...
def isReadOnly(self) -> bool: ...
@staticmethod
def isReadOnlyPDBDirectory(directory: generic.jar.ResourceFile) -> bool:
"""
Check for the presence of directory read-only lock
@param directory
@return true if read-only lock exists+
"""
...
def lastModified(self) -> long:
"""
Returns the time at which this database was last saved.
"""
...
def length(self) -> long:
"""
Returns the length of this domain file. This size is the minimum disk space
used for storing this file, but does not account for additional storage space
used to tracks changes, etc.
@return file length
@throws IOException thrown if IO or access error occurs
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def open(self, monitor: ghidra.util.task.TaskMonitor) -> db.DBHandle: ...
def openForUpdate(self, monitor: ghidra.util.task.TaskMonitor) -> db.DBHandle: ...
@staticmethod
def packDatabase(dbh: db.DBHandle, itemName: unicode, contentType: unicode, outputFile: java.io.File, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Serialize (i.e., pack) an open database into the specified outputFile.
@param dbh open database handle
@param itemName item name to associate with packed content
@param contentType supported content type
@param outputFile packed output file to be created
@param monitor progress monitor
@throws IOException
@throws CancelledException if monitor cancels operation
"""
...
def refresh(self) -> None:
"""
Scan files and update state.
"""
...
def setSynchronizationObject(self, syncObject: object) -> None:
"""
Set the object to be used for synchronization.
@param syncObject
"""
...
def toString(self) -> unicode: ...
@staticmethod
def unpackDatabase(bfMgr: db.buffers.BufferFileManager, checkinId: long, packedFile: java.io.File, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Create a new Database with data provided by an ItemDeserializer.
@param bfMgr the buffer manager for the database
@param checkinId the check-in id
@param packedFile the file to unpack
@param monitor the task monitor
@throws CancelledException
"""
...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def contentType(self) -> unicode: ...
@property
def packedFile(self) -> generic.jar.ResourceFile: ...
@property
def readOnly(self) -> bool: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
d9359c471400c6cd070d551f11de720256d788d1 | 203fd0a2a91ee063736d11b069e11026c4184ec5 | /tools/test_files/all_17_or_less/cuccaroMultiplier_1b.py | d685ab8e566f9bf3cb9f2e768a48a6df11b46216 | [
"Apache-2.0"
] | permissive | Astlaan/OpenQL | 19b35200775367e6f11d1756e9495c1883a1c531 | 1b60d5784d439e6c19e662b570114aa1b3045d38 | refs/heads/develop | 2020-09-01T02:56:37.906564 | 2019-11-15T15:43:55 | 2019-11-15T15:43:55 | 181,012,110 | 0 | 0 | NOASSERTION | 2019-04-12T13:18:51 | 2019-04-12T13:18:51 | null | UTF-8 | Python | false | false | 3,349 | py | from openql import openql as ql
import os
import argparse
def circuit(config_file, new_scheduler='yes', scheduler='ASAP', uniform_sched= 'no', sched_commute = 'yes', mapper='base', moves='no', maptiebreak='random', initial_placement='no', output_dir_name='test_output', optimize='no', measurement=True, log_level='LOG_WARNING'):
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, output_dir_name)
ql.set_option('output_dir', output_dir)
ql.set_option('optimize', optimize)
ql.set_option('scheduler', scheduler)
ql.set_option('scheduler_uniform', uniform_sched)
ql.set_option('mapper', mapper)
ql.set_option('initialplace', initial_placement)
ql.set_option('log_level', log_level)
ql.set_option('scheduler_post179', new_scheduler)
ql.set_option('scheduler_commute', sched_commute)
ql.set_option('mapusemoves', moves)
ql.set_option('maptiebreak', maptiebreak)
config_fn = os.path.join(curdir, config_file)
# platform = ql.Platform('platform_none', config_fn)
platform = ql.Platform('starmon', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 7
p = ql.Program('cuccaroMultiplier_1b', platform, num_qubits)
p.set_sweep_points(sweep_points)
k = ql.Kernel('cuccaroMultiplier_1b', platform, num_qubits)
k.gate('prepz',[2])
k.gate('prepz',[3])
k.gate('prepz',[4])
k.gate('prepz',[5])
k.gate('toffoli',[0,1,3])
k.gate('cnot',[1,5])
k.gate('toffoli',[5,3,1])
k.gate('toffoli',[0,1,4])
k.gate('toffoli',[5,3,1])
k.gate('cnot',[1,5])
k.gate('toffoli',[0,5,3])
if measurement:
for q in range(num_qubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
ql.set_option('mapper', 'no')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenQL compilation of a Quantum Algorithm')
parser.add_argument('config_file', help='Path to the OpenQL configuration file to compile this algorithm')
parser.add_argument('--new_scheduler', nargs='?', default='yes', help='Scheduler defined by Hans')
parser.add_argument('--scheduler', nargs='?', default='ASAP', help='Scheduler specification (ASAP (default), ALAP, ...)')
parser.add_argument('--uniform_sched', nargs='?', default='no', help='Uniform shceduler actication (yes or no)')
parser.add_argument('--sched_commute', nargs='?', default='yes', help='Permits two-qubit gates to be commutable')
parser.add_argument('--mapper', nargs='?', default='base', help='Mapper specification (base, minextend, minextendrc)')
parser.add_argument('--moves', nargs='?', default='no', help='Let the use of moves')
parser.add_argument('--maptiebreak', nargs='?', default='random', help='')
parser.add_argument('--initial_placement', nargs='?', default='no', help='Initial placement specification (yes or no)')
parser.add_argument('--out_dir', nargs='?', default='test_output', help='Folder name to store the compilation')
parser.add_argument('--measurement', nargs='?', default=True, help='Add measurement to all the qubits in the end of the algorithm')
args = parser.parse_args()
try:
circuit(args.config_file, args.new_scheduler, args.scheduler, args.uniform_sched, args.sched_commute, args.mapper, args.moves, args.maptiebreak, args.initial_placement, args.out_dir)
except TypeError:
print('\nCompiled, but some gate is not defined in the configuration file. \nThe gate will be invoked like it is.')
raise | [
"diogovalada.7@tecnico.ulisboa.pt"
] | diogovalada.7@tecnico.ulisboa.pt |
5cf10b27b7c61c8f1932bda943bbe58d6874009b | 24d096347f61aafe0b7f9294b6b6cfd66931ee06 | /classy_vision/dataset/classy_kinetics400.py | 579e316e6322e428eb0ec9b8cec9371867b54553 | [
"MIT"
] | permissive | miguelvr/ClassyVision | 259ee08e59761b33400d06eff383432bad93a34a | 38a59270e16fda83e160c5888b96c777cb78757b | refs/heads/master | 2020-12-11T10:32:10.725070 | 2020-01-14T01:39:26 | 2020-01-14T01:40:46 | 233,824,620 | 0 | 0 | MIT | 2020-01-14T11:18:59 | 2020-01-14T11:18:58 | null | UTF-8 | Python | false | false | 6,536 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Callable, Dict, List, Optional
import torch
from torchvision.datasets.kinetics import Kinetics400
from . import register_dataset
from .classy_video_dataset import ClassyVideoDataset
from .transforms.util_video import build_video_field_transform_default
@register_dataset("kinetics400")
class Kinetics400Dataset(ClassyVideoDataset):
"""`Kinetics-400 <https://deepmind.com/research/open-source/
open-source-datasets/kinetics/>`_ is an action recognition video dataset,
and it has 400 classes.
`Original publication <https://arxiv.org/pdf/1705.06950.pdf>`_
We assume videos are already trimmed to 10-second clip, and are stored in a
folder.
It is built on top of `Kinetics400 <https://github.com/pytorch/vision/blob/
master/torchvision/datasets/kinetics.py#L7/>`_ dataset class in TorchVision.
"""
def __init__(
self,
split: str,
batchsize_per_replica: int,
shuffle: bool,
transform: Callable,
num_samples: Optional[int],
frames_per_clip: int,
video_width: int,
video_height: int,
video_min_dimension: int,
audio_samples: int,
audio_channels: int,
step_between_clips: int,
frame_rate: Optional[int],
clips_per_video: int,
video_dir: str,
extensions: List[str],
metadata_filepath: str,
):
"""The constructor of Kinetics400Dataset.
Args:
split: dataset split which can be either "train" or "test"
batchsize_per_replica: batch size per model replica
shuffle: If true, shuffle the dataset
transform: a dict where transforms video and audio data
num_samples: if provided, it will subsample dataset
frames_per_clip: the No. of frames in a video clip
video_width: rescaled video width. If 0, keep original width
video_height: rescaled video height. If 0, keep original height
video_min_dimension: rescale video so that min(height, width) =
video_min_dimension. If 0, keep original video resolution. Note
only one of (video_width, video_height) and (video_min_dimension)
can be set
audio_samples: desired audio sample rate. If 0, keep original
audio sample rate
audio_channels: desire No. of audio channel. If 0, keep original audio
channels
step_between_clips: Number of frames between each clip.
frame_rate: desired video frame rate. If None, keep
orignal video frame rate.
clips_per_video: Number of clips to sample from each video
video_dir: path to video folder
extensions: A list of file extensions, such as "avi" and "mp4". Only
video matching those file extensions are added to the dataset
metadata_filepath: path to the dataset meta data
"""
# dataset metadata includes the path of video file, the pts of frames in
# the video and other meta info such as video fps, duration, audio sample rate.
# Users do not need to know the details of metadata. The computing, loading
# and saving logic of metata are all handled inside of the dataset.
# Given the "metadata_file" path, if such file exists, we load it as meta data.
# Otherwise, we compute the meta data, and save it at "metadata_file" path.
metadata = None
if os.path.exists(metadata_filepath):
metadata = Kinetics400Dataset.load_metadata(
metadata_filepath, video_dir=video_dir, update_file_path=True
)
dataset = Kinetics400(
video_dir,
frames_per_clip,
step_between_clips=step_between_clips,
frame_rate=frame_rate,
_precomputed_metadata=metadata,
extensions=extensions,
num_workers=torch.get_num_threads(),
_video_width=video_width,
_video_height=video_height,
_video_min_dimension=video_min_dimension,
_audio_samples=audio_samples,
_audio_channels=audio_channels,
)
metadata = dataset.metadata
if metadata and not os.path.exists(metadata_filepath):
Kinetics400Dataset.save_metadata(metadata, metadata_filepath)
super().__init__(
dataset,
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
clips_per_video,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "Kinetics400Dataset":
"""Instantiates a Kinetics400Dataset from a configuration.
Args:
config: A configuration for a Kinetics400Dataset.
See :func:`__init__` for parameters expected in the config.
Returns:
A Kinetics400Dataset instance.
"""
required_args = ["split", "metadata_file", "video_dir"]
assert all(
arg in config for arg in required_args
), f"The arguments {required_args} are all required."
split = config["split"]
audio_channels = config.get("audio_channels", 0)
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
) = cls.parse_config(config)
extensions = config.get("extensions", ("mp4"))
transform = build_video_field_transform_default(transform_config, split)
return cls(
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
audio_channels,
step_between_clips,
frame_rate,
clips_per_video,
config["video_dir"],
extensions,
config["metadata_file"],
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
1a537ebe3356dce30f9c7ae18c3f56db7dcad104 | bc441bb06b8948288f110af63feda4e798f30225 | /ucpro_sdk/model/resource_manage/filter_condition_group_pb2.py | 7239f832bbc11b6d43ad8d143ba923245160ff23 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,242 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: filter_condition_group.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ucpro_sdk.model.resource_manage import filter_condition_pb2 as ucpro__sdk_dot_model_dot_resource__manage_dot_filter__condition__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='filter_condition_group.proto',
package='resource_manage',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manage'),
serialized_pb=_b('\n\x1c\x66ilter_condition_group.proto\x12\x0fresource_manage\x1a\x36ucpro_sdk/model/resource_manage/filter_condition.proto\"]\n\x14\x46ilterConditionGroup\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\rconditionList\x18\x02 \x03(\x0b\x32 .resource_manage.FilterConditionBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manageb\x06proto3')
,
dependencies=[ucpro__sdk_dot_model_dot_resource__manage_dot_filter__condition__pb2.DESCRIPTOR,])
_FILTERCONDITIONGROUP = _descriptor.Descriptor(
name='FilterConditionGroup',
full_name='resource_manage.FilterConditionGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='resource_manage.FilterConditionGroup.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conditionList', full_name='resource_manage.FilterConditionGroup.conditionList', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=198,
)
_FILTERCONDITIONGROUP.fields_by_name['conditionList'].message_type = ucpro__sdk_dot_model_dot_resource__manage_dot_filter__condition__pb2._FILTERCONDITION
DESCRIPTOR.message_types_by_name['FilterConditionGroup'] = _FILTERCONDITIONGROUP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FilterConditionGroup = _reflection.GeneratedProtocolMessageType('FilterConditionGroup', (_message.Message,), {
'DESCRIPTOR' : _FILTERCONDITIONGROUP,
'__module__' : 'filter_condition_group_pb2'
# @@protoc_insertion_point(class_scope:resource_manage.FilterConditionGroup)
})
_sym_db.RegisterMessage(FilterConditionGroup)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
d4203dfd3e932c41f7a84b25c66eb08de195f7d9 | 043cfa8990dafcbcaa354c6ae7eade01ae2aab71 | /Configuration/python/bbgun_BjetToMu_endcap_pos_pythia8_cfi.py | 899d0083eeeb374632ab7687dabf2b02a2a65158 | [] | no_license | l-cadamuro/SampleGeneration | cdacfc793c617c2ec5f886a6295d3ed0fb1dc932 | fd9398577a0ef721a5d9846b27862791149435b1 | refs/heads/master | 2021-10-25T23:47:49.833650 | 2019-04-08T12:15:01 | 2019-04-08T12:15:01 | 124,481,043 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | import FWCore.ParameterSet.Config as cms
generator = cms.EDFilter("Pythia8PtGun",
PGunParameters = cms.PSet(
MaxPt = cms.double(500.0),
MinPt = cms.double(15.0),
ParticleID = cms.vint32(5),
AddAntiParticle = cms.bool(True),
MaxEta = cms.double(2.7),
MaxPhi = cms.double(3.14159265359),
MinEta = cms.double(1.0),
MinPhi = cms.double(-3.14159265359) ## in radians
),
Verbosity = cms.untracked.int32(0), ## set to 1 (or greater) for printouts
psethack = cms.string('bb gun pt 15 500'),
firstRun = cms.untracked.uint32(1),
PythiaParameters = cms.PSet(parameterSets = cms.vstring())
)
MuMuFilter = cms.EDFilter("MCParticlePairFilter",
Status = cms.untracked.vint32(1, 1),
MinPt = cms.untracked.vdouble(2., 2.),
MinP = cms.untracked.vdouble(0., 0.),
MinEta = cms.untracked.vdouble(1.2, 1.2),
MaxEta = cms.untracked.vdouble(2.5, 2.5),
ParticleCharge = cms.untracked.int32(0),
ParticleID1 = cms.untracked.vint32(13),
)
ProductionFilterSequence = cms.Sequence(generator*MuMuFilter)
| [
"lc.cadamuro@gmail.com"
] | lc.cadamuro@gmail.com |
7ba1dbf8fa85e3dd499cf519e143c1de311c58c3 | b8c4ef9ccab22717ab97ab2fb100614d962a5820 | /src/test/python/com/skalicky/python/interviewpuzzles/test_find_longest_palindromic_substring.py | b8edfd26cea5ff84601d0561cf850307c6c2ae73 | [] | no_license | Sandeep8447/interview_puzzles | 1d6c8e05f106c8d5c4c412a9f304cb118fcc90f4 | a3c1158fe70ed239f8548ace8d1443a431b644c8 | refs/heads/master | 2023-09-02T21:39:32.747747 | 2021-10-30T11:56:57 | 2021-10-30T11:56:57 | 422,867,683 | 0 | 0 | null | 2021-10-30T11:56:58 | 2021-10-30T11:55:17 | null | UTF-8 | Python | false | false | 1,327 | py | from unittest import TestCase
from src.main.python.com.skalicky.python.interviewpuzzles.find_longest_palindromic_substring import Solution
class TestSolution(TestCase):
def test_find_longest_palindromic_substring__when_input_string_is_tracecars__then_result_is_racecar(self):
self.assertEqual('racecar', Solution.find_longest_palindromic_substring('tracecars'))
def test_find_longest_palindromic_substring__when_input_string_is_banana__then_result_is_anana(self):
self.assertEqual('anana', Solution.find_longest_palindromic_substring('banana'))
def test_find_longest_palindromic_substring__when_input_string_is_million__then_result_is_illi(self):
self.assertEqual('racecar', Solution.find_longest_palindromic_substring('tracecars'))
def test_find_longest_palindromic_substring__when_input_string_is_mh__then_result_is_m(self):
self.assertEqual('m', Solution.find_longest_palindromic_substring('mh'))
def test_find_longest_palindromic_substring__when_input_string_is_empty__then_these_is_no_result(self):
self.assertIsNone(Solution.find_longest_palindromic_substring(''))
def test_find_longest_palindromic_substring__when_these_is_no_input_string__then_there_is_no_result(self):
self.assertIsNone(Solution.find_longest_palindromic_substring(None))
| [
"skalicky.tomas@gmail.com"
] | skalicky.tomas@gmail.com |
d823f6c27a4692da6243b17732c3ae4ceb1d6125 | 3520f9f1b6d804a6d95233493972bf04dca67fb4 | /revisited/trees/symmetric_tree.py | 0d23c95372faee070efd7a74c51ffa4ae9981773 | [] | no_license | Shiv2157k/leet_code | 8691a470148809f0a7077434abdc689f33958f34 | 65cc78b5afa0db064f9fe8f06597e3e120f7363d | refs/heads/master | 2023-06-17T02:59:20.892561 | 2021-07-05T16:42:58 | 2021-07-05T16:42:58 | 266,856,709 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | from collections import deque
class TreeNode:
def __init__(self, val: int, left: int=None, right: int=None):
self.val = val
self.left = left
self.right = right
class BinaryTree:
def is_symmetric(self, root: "TreeNode") -> bool:
"""
Approach: Iterative
Time Complexity: O(N)
Space Complexity: O(1)
:param root:
:return:
"""
# base case
if not root:
return True
queue = deque([(root.left, root.right)])
while queue:
node1, node2 = queue.popleft()
if not node1 and not node2:
continue
if not node1 or not node2:
return False
if node1.val != node2.val:
return False
queue.popleft((node1.left, node2.right))
queue.popleft((node1.right, node2.left))
return True
def depth_first_search(self, node1: "TreeNode", node2: "TreeNode") -> bool:
"""
Depth First Search function.
:param node1:
:param node2:
:return:
"""
# base cases
if not node1 and not node2:
return True
if not node1 or not node2:
return False
return (node1.val == node2.val) and \
self.depth_first_search(node1.left, node2.right) and \
self.depth_first_search(node1.right, node2.left)
def is_symmetric(self, root: "TreeNode") -> bool:
"""
Approach: Depth First Search
Time Complexity: O(N)
Space Complexity: O(1)
:param root:
:return:
"""
# base case
if not root:
return True
return self.depth_first_search(root.left, root.right) | [
"shiv2157.k@gmail.com"
] | shiv2157.k@gmail.com |
b9dd31434efc8e2b06e84c2a097fd470a7039907 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02580/s938069625.py | 0db1858a0ef2df75e1167078f1c074c1a4e0a062 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import itertools
H, W, M = map(int, input().split())
Bh = [0]*(H+1)
Bw = [0]*(W+1)
OD = set()
for _ in range(M):
h, w = map(int, input().split())
Bh[h] += 1
Bw[w] += 1
OD.add((h, w))
M1 = max(Bh)
M2 = max(Bw)
l1 = [i for i, x in enumerate(Bh) if x == M1]
l2 = [j for j, x in enumerate(Bw) if x == M2]
for i, j in itertools.product(l1, l2):
if (i, j) not in OD:
break
else:
print(M1+M2-1)
exit()
print(M1+M2)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
550074286e8d07c9b0bbaa2def65185b632ab6e6 | 15ce00a910f5404f1ab3d6eb59334c26c5708748 | /object-oriented/inherit.py | 8e78555191bc03a4e65c6632fd522bdc7d419f78 | [] | no_license | calazans10/algorithms.py | 3307be25920428b33e784229c2aa727ac4225423 | b8b0495fe34645b45aa5366416c1f80d87d18a3b | refs/heads/master | 2020-05-17T13:27:58.481732 | 2013-07-21T13:31:39 | 2013-07-21T13:31:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | # -*- coding: utf-8 -*-
class SchoolMember:
'''Representa qualquer membro da escola.'''
def __init__(self, name, age):
self.name = name
self.age = age
print('(Iniciando SchoolMember: {0})'.format(self.name))
def tell(self):
'''Imprime os detalhes desta instância.'''
print('Nome: "{0}" Idade: "{1}"'.format(self.name, self.age), end=" ")
class Teacher(SchoolMember):
'''Representa um professor'''
def __init__(self, name, age, salary):
SchoolMember.__init__(self, name, age)
self.salary = salary
print('(Iniciando Teacher: {0})'.format(self.name))
def tell(self):
SchoolMember.tell(self)
print('Salário: "{0:d}"'.format(self.salary))
class Student(SchoolMember):
'''Representa um aluno'''
def __init__(self, name, age, marks):
SchoolMember.__init__(self, name, age)
self.marks = marks
print('(Iniciando Student: {0})'.format(self.name))
def tell(self):
SchoolMember.tell(self)
print('Nota: "{0:d}"'.format(self.marks))
t = Teacher('Mrs. Sasha', 26, 1000000)
s = Student('Derek', 17, 75)
print()
members = [t, s]
for member in members:
member.tell()
| [
"calazans10@gmail.com"
] | calazans10@gmail.com |
426d18ad42e680edb98475f9aff3c79cc1bfd48e | 1058f1d8afd0d02ef75b9adbd9d158439dbc353c | /runner.py | 581b6074e840996376b70498f1c60f8ee066ecae | [] | no_license | himanshu3006/IVTREE_Demo | 0bba36faa56648de80033bf1d8ba8bc18d6bd1c9 | 912622ef5ba2e1dce1aa41439e5706491416b4c9 | refs/heads/master | 2020-03-11T02:31:01.098767 | 2018-04-16T09:52:47 | 2018-04-16T09:52:47 | 129,721,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,138 | py | # Copyright 2017-2018 WiZR
#
# Licensed under the License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Automatic build trigger
from robot import run_cli
from robot import run
from robot.libraries import BuiltIn
import os
from robot import run
import datetime
import xml.etree.ElementTree as ET
import xml.dom.minidom
import pprint
import pdb
import json
import urllib
import urllib2
import telnetlib
import types
import commands
import string
import traceback
import copy
from robot.api import ExecutionResult
import unittest
from testLinkLibrary import *
from robot.api import ExecutionResult, SuiteVisitor
def run_robot(nightly='False', build_id=''):
import os
"""This method used to run the robot framework both test and Contiouns integartion setup """
if not nightly:
now = str(datetime.datetime.now())
test_run_folder = "Robot_Logs" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
complete_dir_path = os.path.join(os.getcwd(), "logs" + os.sep + test_run_folder)
os.environ["RESULTS_PATH"] = "%s" % test_run_folder
if not os.path.isdir(complete_dir_path):
os.mkdir(complete_dir_path)
else:
pass
# Export the result directory for the test suite
os.environ["RESULTS_PATH"] = test_run_folder
else:
complete_dir_path = os.path.join(os.getcwd(), "logs" + os.sep + build_id)
os.environ["RESULTS_PATH"] = "%s" % build_id
if not os.path.isdir(complete_dir_path):
os.mkdir(complete_dir_path)
else:
pass
# Form complete path to store html rport
if nightly:
complete_report_path = os.path.join(complete_dir_path, "report.html")
# Form complete path to store log report
complete_log_path = os.path.join(complete_dir_path, "log.html")
complete_xml_path = os.path.join(complete_dir_path, "output.xml")
else:
complete_report_path = os.path.join(complete_dir_path,
"Functional_" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".html")
# Form complete path to store log report
complete_log_path = os.path.join(complete_dir_path,
"Functional_log" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".html")
if not nightly:
# start the suite
run(
"resource/HomePage_IVtree.robot",
report=complete_report_path, log=complete_log_path)
else:
import subprocess
import os
run("resource/HomePage_IVtree.robot",
report=complete_report_path, log=complete_log_path, output=complete_xml_path)
# run(
# "resource/HomePage_IVtree.robot",
# report=complete_report_path, log=complete_log_path, output=complete_xml_path)complete_xml_path
return
#JustaComment
class PrintTestInfo(SuiteVisitor):
def __init__(self,jenkins_build=''):
self.build=jenkins_build
self.result = {}
tree = ET.parse(os.path.dirname(os.path.abspath(__file__)) + os.sep + 'ConfigInput.xml')
for node in tree.iter():
if node.tag == "Parameters":
self.result_dict = node.attrib
self.tls = getTestLinkObject(
self.result_dict['testLinkURL'], self.result_dict['testLinkDEVKEY'])
addBuildToTestPlan(self.tls, self.result_dict['testLinkTestProjectName'], self.result_dict['testLinkTestPlanName'],
self.build, "fROM AUT")
def visit_test(self, test):
if test.status == "PASS":
testCaseStatus = 'p'
else:
testCaseStatus = 'f'
updateResultInTestLink(self.tls, self.result_dict['testLinkTestProjectName'], self.result_dict['testLinkTestPlanName'],
self.build, test.name.strip(":"),testCaseStatus, self.result_dict['testLinkPlatform'])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="IVTREE ROBOT FRAMEWORK")
parser.add_argument("-b", action="store", default="False", dest="build_id")
command_args = parser.parse_args()
try:
if command_args.build_id:
print "running"
run_robot('True',command_args.build_id)
else:
run_robot()
finally:
print "finally"
complete_dir_path = os.path.join(os.getcwd(), "logs" + os.sep + command_args.build_id)
complete_xml_path = os.path.join(complete_dir_path, "output.xml")
print complete_xml_path
result = ExecutionResult(complete_xml_path)
result.suite.visit(PrintTestInfo(command_args.build_id))
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
c0024ee559ed21dc3009191dae2a7c697e60c2c0 | 0be6be303a683a7448b72a391a80b318cae5e6d0 | /myapp/migrations/0028_auto_20200107_1128.py | 950aa3a6e609ffb1bcfee363e0e9f5062e12085a | [] | no_license | baljindersaini06/login-social | 19513ee4f830a28ddd56f23b1704b400393e8204 | 35f232797f77814999b9d4eb2666ee21f404d367 | refs/heads/master | 2022-12-17T14:56:14.511665 | 2020-03-11T12:32:47 | 2020-03-11T12:32:47 | 226,084,576 | 1 | 0 | null | 2020-02-07T11:44:19 | 2019-12-05T11:18:01 | JavaScript | UTF-8 | Python | false | false | 403 | py | # Generated by Django 2.2.6 on 2020-01-07 11:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0027_auto_20200107_1125'),
]
operations = [
migrations.AlterField(
model_name='location',
name='company_id',
field=models.CharField(default='', max_length=100),
),
]
| [
"nishtha0995@gmail.com"
] | nishtha0995@gmail.com |
9b5a2cf580aa52c8586080958104672518d40cb4 | db053c220094368ecb784fbe62375378c97457c2 | /18.4sum.py | a7036e5b23f5052b138291e443703c914581eac2 | [] | no_license | thegamingcoder/leetcode | 8c16e7ac9bda3e34ba15955671a91ad072e87d94 | 131facec0a0c70d319982e78e772ed1cb94bc461 | refs/heads/master | 2020-03-22T14:51:45.246495 | 2018-07-09T00:00:06 | 2018-07-09T00:00:06 | 140,211,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | #
# [18] 4Sum
#
# https://leetcode.com/problems/4sum/description/
#
# algorithms
# Medium (27.92%)
# Total Accepted: 165.5K
# Total Submissions: 592.9K
# Testcase Example: '[1,0,-1,0,-2,2]\n0'
#
# Given an array nums of n integers and an integer target, are there elements
# a, b, c, and d in nums such that a + b + c + d = target? Find all unique
# quadruplets in the array which gives the sum of target.
#
# Note:
#
# The solution set must not contain duplicate quadruplets.
#
# Example:
#
#
# Given array nums = [1, 0, -1, 0, -2, 2], and target = 0.
#
# A solution set is:
# [
# [-1, 0, 0, 1],
# [-2, -1, 1, 2],
# [-2, 0, 0, 2]
# ]
#
#
#
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
| [
"sharanbale@yahoo-inc.com"
] | sharanbale@yahoo-inc.com |
20c3b36779ace815dd0b3609d41b4e92282b1c15 | 0a1e855a1aec7f85f2eedd3a2a6770e9c4729bdb | /nmt/loader/__init__.py | 3fa6d6e5854e8292e89825936c7f716a8b0d84e5 | [] | no_license | bkj/attn2d_fork | c6a5520e56d708375a17c80ba2b72a8754dbf740 | 4f22a92d03f718cf5a449605c473bc03131016fc | refs/heads/master | 2020-03-27T17:11:14.640884 | 2018-08-29T00:07:19 | 2018-08-29T00:07:19 | 146,833,893 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | from .dataloader import textDataLoader
from .split_dataloader import SplitLoader
def ReadData(params, job_name):
ddir = params['dir']
src = params['src']
trg = params['trg']
dataparams = {'h5': "%s/%s.%s" % (ddir, src, "h5"),
'infos': "%s/%s.%s" % (ddir, src, "infos"),
'batch_size': params['batch_size'],
'max_length': params['max_src_length']
}
src_loader = textDataLoader(dataparams, job_name=job_name)
dataparams = {'h5': "%s/%s.%s" % (ddir, trg, "h5"),
'infos': "%s/%s.%s" % (ddir, trg, "infos"),
'batch_size': params['batch_size'],
'max_length': params['max_trg_length']
}
trg_loader = textDataLoader(dataparams, job_name=job_name)
return src_loader, trg_loader
def ReadDataSplit(params, split, job_name):
ddir = params['dir']
src = params['src']
trg = params['trg']
dataparams = {'h5': "%s/%s_%s.%s" % (ddir, src, split, "h5"),
'infos': "%s/%s.%s" % (ddir, src, "infos"),
'batch_size': params['batch_size'],
'max_length': params['max_src_length']
}
src_loader = SplitLoader(dataparams, job_name=job_name)
dataparams = {'h5': "%s/%s_%s.%s" % (ddir, trg, split, "h5"),
'infos': "%s/%s.%s" % (ddir, trg, "infos"),
'batch_size': params['batch_size'],
'max_length': params['max_trg_length']
}
trg_loader = SplitLoader(dataparams, job_name=job_name)
return src_loader, trg_loader
| [
"maha.elbayad@gmail.com"
] | maha.elbayad@gmail.com |
a259298c4f1703ea8c23a835b807fd6637d88242 | a857d1911a118b8aa62ffeaa8f154c8325cdc939 | /toontown/coghq/DistributedHealBarrelAI.py | 2b1c01a5eab98c8124fafbc194f62954b6a814a9 | [
"MIT"
] | permissive | DioExtreme/TT-CL-Edition | 761d3463c829ec51f6bd2818a28b667c670c44b6 | 6b85ca8352a57e11f89337e1c381754d45af02ea | refs/heads/main | 2023-06-01T16:37:49.924935 | 2021-06-24T02:25:22 | 2021-06-24T02:25:22 | 379,310,849 | 0 | 0 | MIT | 2021-06-22T15:07:31 | 2021-06-22T15:07:30 | null | UTF-8 | Python | false | false | 568 | py | import DistributedBarrelBaseAI
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
class DistributedHealBarrelAI(DistributedBarrelBaseAI.DistributedBarrelBaseAI):
def __init__(self, level, entId):
x = y = z = h = 0
DistributedBarrelBaseAI.DistributedBarrelBaseAI.__init__(self, level, entId)
def d_setGrab(self, avId):
self.notify.debug('d_setGrab %s' % avId)
self.sendUpdate('setGrab', [avId])
av = self.air.doId2do.get(avId)
if av:
av.toonUp(self.getRewardPerGrab())
| [
"devinhall4@gmail.com"
] | devinhall4@gmail.com |
20e50ce3aa4fb3fe4fd8bcecc0b39cc8376380f8 | ca66a4283c5137f835377c3ed9a37128fcaed037 | /Lib/site-packages/pandas/tests/indexing/multiindex/test_multiindex.py | 2887ed5275740095ce1de82ad4fa39a554230cf9 | [] | no_license | NamithaKonda09/majorProject | f377f7a77d40939a659a3e59f5f1b771d88889ad | 4eff4ff18fa828c6278b00244ff2e66522e0cd51 | refs/heads/master | 2023-06-04T20:25:38.450271 | 2021-06-24T19:03:46 | 2021-06-24T19:03:46 | 370,240,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,031 | py |
import numpy as np
import pytest
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.util import testing as tm
class TestMultiIndexBasic(object):
def test_multiindex_perf_warn(self):
df = DataFrame({'jim': [0, 0, 1, 1],
'joe': ['x', 'x', 'z', 'y'],
'jolie': np.random.rand(4)}).set_index(['jim', 'joe'])
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.index]):
df.loc[(1, 'z')]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0, )]
def test_multiindex_contains_dropped(self):
# GH 19027
# test that dropped MultiIndex levels are not in the MultiIndex
# despite continuing to be in the MultiIndex's levels
idx = MultiIndex.from_product([[1, 2], [3, 4]])
assert 2 in idx
idx = idx.drop(2)
# drop implementation keeps 2 in the levels
assert 2 in idx.levels[0]
# but it should no longer be in the index itself
assert 2 not in idx
# also applies to strings
idx = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
assert 'a' in idx
idx = idx.drop('a')
assert 'a' in idx.levels[0]
assert 'a' not in idx
@pytest.mark.parametrize("data, expected", [
(MultiIndex.from_product([(), ()]), True),
(MultiIndex.from_product([(1, 2), (3, 4)]), True),
(MultiIndex.from_product([('a', 'b'), (1, 2)]), False),
])
def test_multiindex_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
| [
"namithakonda09@gmail.com"
] | namithakonda09@gmail.com |
f5f6c26fb232ac3a3844b104e870e39ffc21f5c6 | 4c187f0f9d244e89facdddc1581bcef33e092a93 | /benchmarks/RevLib/4gt4-v0_80.py | 176d543a2d3493c9d36bac182920d5743b9a6430 | [] | no_license | Gonaco/Super-Qool-Benchmarks | 419dea5306bcec7e502034527acffe371a4e8004 | a630f3dd6f22bebd4ce7601a772fd3a8cd3dd08c | refs/heads/master | 2021-01-25T13:40:57.523633 | 2018-04-03T09:31:56 | 2018-04-03T09:31:56 | 123,600,859 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,599 | py | from openql import openql as ql
import os
import numpy as np
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, 'test_output')
ql.set_output_dir(output_dir)
config_fn = os.path.join(curdir, '/home/daniel/Master/Quantum_Computing_and_Quantum_Information/OpenQL/tests/hardware_config_cc_light.json')
platform = ql.Platform('platform_none', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 6
p = ql.Program('4gt4-v0_80', num_qubits, platform)
p.set_sweep_points(sweep_points, num_circuits)
k = ql.Kernel('4gt4-v0_80', platform)
k.gate('h',0)
k.gate('t',2)
k.gate('t',1)
k.gate('t',0)
k.gate('cnot',1,2)
k.gate('cnot',0,1)
k.gate('cnot',2,0)
k.gate('tdag',1)
k.gate('cnot',2,1)
k.gate('tdag',2)
k.gate('tdag',1)
k.gate('t',0)
k.gate('cnot',0,1)
k.gate('cnot',2,0)
k.gate('cnot',1,2)
k.gate('h',0)
k.gate('x',2)
k.gate('cnot',4,0)
k.gate('h',5)
k.gate('t',3)
k.gate('t',2)
k.gate('t',5)
k.gate('cnot',2,3)
k.gate('cnot',5,2)
k.gate('cnot',3,5)
k.gate('tdag',2)
k.gate('cnot',3,2)
k.gate('tdag',3)
k.gate('tdag',2)
k.gate('t',5)
k.gate('cnot',5,2)
k.gate('cnot',3,5)
k.gate('cnot',2,3)
k.gate('h',5)
k.gate('h',4)
k.gate('t',5)
k.gate('t',3)
k.gate('t',4)
k.gate('cnot',3,5)
k.gate('cnot',4,3)
k.gate('cnot',5,4)
k.gate('tdag',3)
k.gate('cnot',5,3)
k.gate('tdag',5)
k.gate('tdag',3)
k.gate('t',4)
k.gate('cnot',4,3)
k.gate('cnot',5,4)
k.gate('cnot',3,5)
k.gate('h',4)
k.gate('h',3)
k.gate('t',1)
k.gate('t',0)
k.gate('t',3)
k.gate('cnot',0,1)
k.gate('cnot',3,0)
k.gate('cnot',1,3)
k.gate('tdag',0)
k.gate('cnot',1,0)
k.gate('tdag',1)
k.gate('tdag',0)
k.gate('t',3)
k.gate('cnot',3,0)
k.gate('cnot',1,3)
k.gate('cnot',0,1)
k.gate('h',3)
k.gate('h',4)
k.gate('t',5)
k.gate('t',3)
k.gate('t',4)
k.gate('cnot',3,5)
k.gate('cnot',4,3)
k.gate('cnot',5,4)
k.gate('tdag',3)
k.gate('cnot',5,3)
k.gate('tdag',5)
k.gate('tdag',3)
k.gate('t',4)
k.gate('cnot',4,3)
k.gate('cnot',5,4)
k.gate('cnot',3,5)
k.gate('h',4)
k.gate('h',3)
k.gate('t',1)
k.gate('t',0)
k.gate('t',3)
k.gate('cnot',0,1)
k.gate('cnot',3,0)
k.gate('cnot',1,3)
k.gate('tdag',0)
k.gate('cnot',1,0)
k.gate('tdag',1)
k.gate('tdag',0)
k.gate('t',3)
k.gate('cnot',3,0)
k.gate('cnot',1,3)
k.gate('cnot',0,1)
k.gate('h',3)
k.gate('h',5)
k.gate('t',3)
k.gate('t',2)
k.gate('t',5)
k.gate('cnot',2,3)
k.gate('cnot',5,2)
k.gate('cnot',3,5)
k.gate('tdag',2)
k.gate('cnot',3,2)
k.gate('tdag',3)
k.gate('tdag',2)
k.gate('t',5)
k.gate('cnot',5,2)
k.gate('cnot',3,5)
k.gate('cnot',2,3)
k.gate('h',5)
k.gate('h',4)
k.gate('t',5)
k.gate('t',3)
k.gate('t',4)
k.gate('cnot',3,5)
k.gate('cnot',4,3)
k.gate('cnot',5,4)
k.gate('tdag',3)
k.gate('cnot',5,3)
k.gate('tdag',5)
k.gate('tdag',3)
k.gate('t',4)
k.gate('cnot',4,3)
k.gate('cnot',5,4)
k.gate('cnot',3,5)
k.gate('h',4)
k.gate('h',3)
k.gate('t',1)
k.gate('t',0)
k.gate('t',3)
k.gate('cnot',0,1)
k.gate('cnot',3,0)
k.gate('cnot',1,3)
k.gate('tdag',0)
k.gate('cnot',1,0)
k.gate('tdag',1)
k.gate('tdag',0)
k.gate('t',3)
k.gate('cnot',3,0)
k.gate('cnot',1,3)
k.gate('cnot',0,1)
k.gate('h',3)
k.gate('h',4)
k.gate('t',5)
k.gate('t',3)
k.gate('t',4)
k.gate('cnot',3,5)
k.gate('cnot',4,3)
k.gate('cnot',5,4)
k.gate('tdag',3)
k.gate('cnot',5,3)
k.gate('tdag',5)
k.gate('tdag',3)
k.gate('t',4)
k.gate('cnot',4,3)
k.gate('cnot',5,4)
k.gate('cnot',3,5)
k.gate('h',4)
k.gate('h',3)
k.gate('t',1)
k.gate('t',0)
k.gate('t',3)
k.gate('cnot',0,1)
k.gate('cnot',3,0)
k.gate('cnot',1,3)
k.gate('tdag',0)
k.gate('cnot',1,0)
k.gate('tdag',1)
k.gate('tdag',0)
k.gate('t',3)
k.gate('cnot',3,0)
k.gate('cnot',1,3)
k.gate('cnot',0,1)
k.gate('h',3)
k.gate('cnot',0,4)
p.add_kernel(k)
p.compile(optimize=False)
| [
"danielmoremanza@gmail.com"
] | danielmoremanza@gmail.com |
977a606e80beed44b40a02e5cf61d426b4f9fb0a | 9e0099592b617fb9bdad4d0cb3c2ea454c21a712 | /web/impl/lxml_components.py | 7e92d031ed97fbbdf5987d36f10c7113673584dd | [
"MIT"
] | permissive | gabrielcnr/enaml-web | 0aafd3118beab8cd49bf1a8535792ef21ca52527 | ad741d546cc5971540647a05bf53826a8d2887c1 | refs/heads/master | 2021-06-24T03:14:31.554078 | 2017-09-04T05:44:50 | 2017-09-04T05:44:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | '''
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file COPYING.txt, distributed with this software.
Created on Apr 12, 2017
@author: jrm
'''
import inspect
from web.components import html
def generic_factory():
from .lxml_toolkit_object import WebComponent
return WebComponent
def code_factory():
from .lxml_code import CodeComponent
return CodeComponent
def markdown_factory():
from .lxml_md import MarkdownComponent
return MarkdownComponent
#: Create generic html factories
FACTORIES = {
name: generic_factory for name, obj in inspect.getmembers(html) if inspect.isclass(obj)
}
#: Create special widgets
FACTORIES.update({
'Code': code_factory,
'Markdown': markdown_factory,
})
| [
"frmdstryr@gmail.com"
] | frmdstryr@gmail.com |
8bb64c4cb3d29bfca57ad52e072d0b32d6eeee0f | 6e0666ac38280b5409c4917d3bbc6a101365f164 | /students_base/migrations/0022_job_expiry_date.py | 95d91799cb0366d322312a53af1b37bb54d9b222 | [] | no_license | wolf553055/practice | e6ab89a27c2ef26f42f732513cff1d23ff08cf88 | ed0ee9c0cfd04b17fe0379a7c581aad0e910af9a | refs/heads/master | 2022-04-10T12:55:19.149399 | 2020-03-13T21:25:47 | 2020-03-13T21:25:47 | 257,196,828 | 1 | 0 | null | 2020-04-20T06:54:24 | 2020-04-20T06:54:24 | null | UTF-8 | Python | false | false | 404 | py | # Generated by Django 3.0.2 on 2020-02-20 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students_base', '0021_auto_20200220_1337'),
]
operations = [
migrations.AddField(
model_name='job',
name='expiry_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"ski20173@yandex.ru"
] | ski20173@yandex.ru |
ebab8fd73010dddd9b8a8c5ee3477887af9a9785 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_slaloming.py | fc70a9f6793d6a5ff31d2307f098e301a9ecaa68 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _SLALOMING():
def __init__(self,):
self.name = "SLALOMING"
self.definitions = slalom
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['slalom']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
58c6dd77eaee72629489793af1e4621301029f11 | 6a0591333c8f3f82c21409547144b7b75bb44c8b | /Python Scripts/2016_06_21_1800_Gauss_sin.py | 72de6690cc1821da3c98af33eacd30b116b95f6c | [] | no_license | LyingCortex/HFSS | b6bf3092828966f89d37df553b664d3f9924d8d7 | bf32721106679216d26676062073c9c82360da56 | refs/heads/master | 2020-12-24T08:00:08.014218 | 2019-03-20T08:27:36 | 2019-03-20T08:27:36 | 55,970,647 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 21 17:57:14 2016
@author: LY
"""
Ra = 9.1
L1 = 34.3
N1 = 35
LL = 93.2
N1 = round(LL -L1)
rL = 14.9
q =(LL - L1)/ ((rL/Ra)**2-1)**0.5
z2 = [L1+(LL-L1)/N1*(i+1) for i in range(N1)]
r2 = [Ra*(1+((zi-L1)/q)**2)**0.5 for zi in z2]
pos = []
for i in range(N1):
pos.append((0,-r2[i],z2[i]))
ri = 12/2
L1 = 34.3
Ra = 9.1
N1 = 35
pp = 0.75
z1 = [34.3/N1*i for i in range(N1+1)]
r1 = [ri+(Ra-ri)*math.sin(math.pi*zi /2/L1) ** pp for zi in z1]
pos1 = []
for i in range(N1+1):
pos1.append((0,-r1[i],z1[i]+0.4)) | [
"liuyangxtp@gmail.com"
] | liuyangxtp@gmail.com |
f0e2f32f79e59d7cd29ab9dd1279b7ac6825d104 | a659f96022132fda0e036755b4659a118cba1df3 | /libs/decay.py | a0927a3a81855370495030566303d2614295d316 | [] | no_license | ekimekim/pylibs | 29d7172c8bf05ac0d8582118dff86dd04bf56fec | f4c180dc2e814e67064e4d03eafdb92f307e5748 | refs/heads/master | 2022-02-18T18:41:46.694266 | 2020-12-12T01:23:51 | 2020-12-12T01:23:51 | 17,832,278 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,893 | py |
"""Implements a counter whose value decays over time
DecayCounter() objects store a float, which decays over time with the given decay rate.
Decay rate is expressed as a half-life, ie. after half_life seconds, the value is halved.
The decay is automatically accounted for on a get() operation, no background thread or other out-of-band
timekeeping is used.
Note that while a set() operation is provided, it is not safe to use in a read-modify-write pattern,
as you would lose the decay that should have occurred between the read and the write.
You should instead use modify() for such operations, which takes a callable that should perform the operation
and return the result.
If one of the following libraries is installed, monotonic time will be used by default:
Monotime
Monoclock
monotonic
If you would like to enforce this as a requirement, use the monotonic=True flag.
Conversely, if you would like to force the use of wall clock time.time() even when monotonic is available,
use monotonic=False. Note this is probably a bad idea
(for example, your values will jump up wildly if the system time is changed backwards).
"""
import math
import time
from importlib import import_module
# lib name, function that gets value given module (in order of preference)
_monotonic_libs = [
('monotonic', lambda module: module.monotonic()),
('monotime', lambda module: module.monotonic()),
('monoclock', lambda module: module.nano_count() / 1e9),
]
for _lib, _fn in _monotonic_libs:
try:
_monotonic_module = import_module(_lib)
except (ImportError, RuntimeError): # "monotonic" will raise RuntimeError if no implementation for platform
continue
has_monotonic = True
monotonic_time = lambda: _fn(_monotonic_module)
break
else:
has_monotonic = False
class DecayCounter(object):
"""Holds a value that decays over time.
"""
def __init__(self, halflife, initial=0, monotonic=None):
"""Half-life is expressed in seconds.
If monotonic is given and True, force the use of monotonic time or fail with ValueError().
If monotonic is given and False, force the use of time.time() even if monotonic time is available.
If monotonic is not given, use monotonic time if available, else time.time().
"""
if monotonic and not has_monotonic:
raise ValueError("System does not support monotonic time")
self._halflife = halflife
self._monotonic = has_monotonic if monotonic is None else monotonic
self._update(initial, self._get_time())
@property
def halflife(self):
return self._halflife
@halflife.setter
def halflife(self, halflife):
# we want to apply the old half life up until now, then change it.
value, time = self._get()
self._update(value, time)
self._halflife = halflife
def get(self):
"""Returns the current value, taking into account any decay since last set"""
value, time = self._get()
return value
def modify(self, func):
"""Safely read, modify, then write the value. Func should be a callable that takes one arg,
the current value, and returns the new value.
For example:
def double(counter):
counter.modify(lambda value: value * 2)
"""
value, time = self._get()
value = func(value)
self._update(value, time)
def copy(self):
"""Return a new instance of DecayCounter() with the same halflife as the current counter,
and initially the same value."""
return DecayCounter(self.halflife, self.get(), monotonic=self._monotonic)
def set(self, value):
"""Sets the value. Note that this method is only safe when setting to a constant value
ie. it is not safe to read the value, modify it, then set it. This will cause there to be no
decay applied for the period of time between your get() and your set()."""
# As it turns out, set is really just a special case of modify
self.modify(lambda old: value)
def add(self, amount):
"""Add amount to value (amount can be negative). A shortcut for modify(lambda value: value + amount)."""
self.modify(lambda value: value + amount)
def _get_time(self):
"""Returns the current time, by whatever counting method is in use.
Subclasses should override this to implement alternate timekeeping.
"""
return monotonic_time() if self._monotonic else time.time()
def _get(self):
"""Returns the current value, along with the point in time when that value was taken"""
# We calculate the current value based on decay and time since last set
# We could update on every get, but there's no need (and I suspect it might lead to floating
# point errors if you get() in rapid succession)
decay_exponent = -math.log(2) / self.halflife
current_time = self._get_time()
elapsed = current_time - self._time
current_value = self._value * math.exp(decay_exponent * elapsed)
return current_value, current_time
def _update(self, value, time):
"""Underlying function that updates the value and time"""
self._value = value
self._time = time
| [
"mikelang3000@gmail.com"
] | mikelang3000@gmail.com |
2ef0fc6b16f988d02c738f6ee9e2890d721c1d95 | d7b628c387070e62a7a559c86770c9e71f4c13ae | /qgis_plutil/utils/legend.py | 2d260ca7c539ccd7c3711e624ae607d03e6f9e6c | [
"MIT"
] | permissive | pyqgis/plutil | 4cd3881e2415da1811a4a10ecc76ed9d42759148 | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | refs/heads/master | 2020-09-11T11:48:06.781470 | 2019-11-22T11:48:30 | 2019-11-22T11:48:30 | 222,054,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,102 | py | # -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
from qgis.core import (
QgsProject, QgsLayerTreeGroup, QgsLayerTreeLayer, QgsVectorLayer,
QgsVectorLayerJoinInfo
)
from qgis_plutil.utils.geometry import geometry_flat_name
logger = logging.getLogger('plutil.legend')
def get_path(path, parent=None, prj=None):
"""
Finds or creates a group at a certain path.
Arguments:
path:
The name of the object.
parent:
The top level group to search.
prj:
The project to add if parent is None.
"""
if prj is None:
prj = QgsProject.instance()
if parent is None:
parent = prj.layerTreeRoot()
if path is None:
return parent
if not isinstance(path, (list, tuple)):
path = path.split("/")
for part in path:
if len(path) > 0:
parent = get_group(part, parent)
return parent
def get_group(name, parent):
"""
Finds or creates a group.
Arguments:
name:
The name of the object.
parent:
The object to add the group to if not found.
"""
result = parent.findGroup(name)
if result is None:
result = parent.addGroup(name)
return result
def all_groups(parent=None):
"""
Iterator that yields each group in provided parent and all its kids.
Arguments:
parent:
The object to iterate. If None will iterate the whole legend.
"""
if parent is None:
parent = QgsProject.instance().layerTreeRoot()
def do_a_group(grp, level=0):
for child in grp.children():
if isinstance(child, QgsLayerTreeGroup):
yield child
do_a_group(child, level=level + 1)
do_a_group(parent)
def all_layers(parent=None):
"""
Iterator that yields each layer in provided parent and all its kids.
Arguments:
parent:
The object to iterate. If None will iterate the whole legend.
"""
if parent is None:
parent = QgsProject.instance().layerTreeRoot()
result = []
def do_a_group(grp, level=0):
for child in grp.children():
if isinstance(child, QgsLayerTreeGroup):
do_a_group(child, level=level + 1)
elif isinstance(child, QgsLayerTreeLayer):
result.append(child)
do_a_group(parent)
return result
def all_layers_with_name(name, parent=None):
"""
Iterator that yields each layer in provided parent and all its kids.
Arguments:
parent:
The object to iterate. If None will iterate the whole legend.
"""
if parent is None:
parent = QgsProject.instance().layerTreeRoot()
result = []
def do_a_group(grp, level=0):
for child in grp.children():
if isinstance(child, QgsLayerTreeGroup):
do_a_group(child, level=level + 1)
elif isinstance(child, QgsLayerTreeLayer):
if child.name() == name:
result.append(child)
do_a_group(parent)
return result
def locate_own_layer(name, group):
""" Attempts to locate a layer only as a direct child of the group."""
for child in group.children():
if isinstance(child, QgsLayerTreeLayer):
if child.name() == name:
return child
return None
def all_tree_items(parent=None):
"""
Iterator that yields each layer and group in provided parent
and all its kids.
Arguments:
parent:
The object to iterate. If None will iterate the whole legend.
"""
if parent is None:
parent = QgsProject.instance().layerTreeRoot()
def do_a_group(grp, level=0):
for child in grp.children():
if isinstance(child, QgsLayerTreeGroup):
yield child
do_a_group(child, level=level + 1)
elif isinstance(child, QgsLayerTreeLayer):
yield child
do_a_group(parent)
def add_layer_to_legend(layer, group=None, prj=None):
""" Adds a layer to a project in specified group. """
if prj is None:
prj = QgsProject.instance()
if isinstance(group, str):
group = get_path(path=group, prj=prj)
QgsProject.instance().addMapLayer(layer, addToLegend=False)
result = group.addLayer(layer)
layer.setCrs(prj.crs())
# QgsCoordinateReferenceSystem
logger.debug("layer crs: %r", prj.crs().description())
layer.updateExtents()
return result
def get_layer(legend_name, iface, geom_type, default_group=None):
"""
Finds or creates a layer.
If the name is empty it will attempt to locate the current layer in
the legend. If there is none it will error out with ValueError.
If the name is provided:
- and contains path separators (/) the specific location is searched
- and no separators are present it is searched in the entire legend.
If the layer is nowhere to be found a new layer is created.
Arguments:
legend_name (str):
The name of the layer to get or create.
iface (QgsInterface):
QGis interface.
geom_type (QgsWkbTypes, str):
The type of geometry for a new layer. If it is a name it
will be convened to a string.
default_group(str, None):
Where to place a new layer. By default it is placed at the
top level.
Returns:
QgsLayerTreeLayer, QgsVectorLayer
"""
is_new = False
assert legend_name is not None
if len(legend_name) == 0:
logger.debug("No layer name provided; locating current layer...")
map_layer = iface.layerTreeView().currentLayer()
if map_layer is None:
raise ValueError("No layer was provided and there "
"is no current layer")
layer = map_layer.layer()
logger.debug("current layer is %s", layer.name())
else:
logger.debug("Requesting to locate layer %s", legend_name)
path = list(part for part in legend_name.split('/') if len(part) > 0)
if len(path) > 1:
logger.debug("The path has %d components", len(path))
legend_name = path[-1]
group = get_path(path[:-1])
map_layer = locate_own_layer(name=legend_name, group=group)
if map_layer:
layers = [map_layer]
else:
layers = []
else:
logger.debug("The path has a single name; searching entire tree")
layers = all_layers_with_name(legend_name)
if len(layers) == 0:
map_layers = QgsProject.instance().mapLayersByName(legend_name)
layers = [QgsProject.instance().layerTreeRoot().findLayer(ly)
for ly in map_layers]
group = None
logger.debug("found %d layers", len(layers))
if len(layers) == 0:
logger.debug("no layer has been located and default group is %r",
default_group)
if group is None and default_group:
logger.debug("locating default group")
group = get_path(default_group)
if isinstance(geom_type, int):
geom_type = geometry_flat_name(geom_type)
logger.debug("geometry type parsed to %r", geom_type)
else:
logger.debug("geometry type is %r", geom_type)
layer = QgsVectorLayer(geom_type, legend_name, "memory")
map_layer = add_layer_to_legend(layer, group=group)
is_new = True
logger.debug("layer was created and added to legend")
else:
logger.debug("selecting first layer among %d", len(layers))
map_layer = layers[0]
layer = map_layer.layer()
return map_layer, layer, is_new
def current_layer(iface):
""" This here is just a remainder. """
return iface.layerTreeView().currentLayer()
| [
"nicu.tofan@gmail.com"
] | nicu.tofan@gmail.com |
66690261699a3b99d21fa2dbe477740cf4f0083f | b81a7b79f84a6460d258ed12a829f2c4e5bcedc3 | /dataset/dataset.py | 0fbe2b49834b2a7ef9df5801defa912a106fc27f | [
"Apache-2.0"
] | permissive | LearningHealthcare/dataset | cb52df3e4c719546b4bceef33175b34f9218f53a | 49f31c7ade0c2d6983eeaecc4a94f33a809fad6e | refs/heads/master | 2020-04-02T16:18:47.280132 | 2018-11-04T01:12:16 | 2018-11-04T01:12:16 | 154,601,479 | 0 | 0 | null | 2018-10-25T02:59:02 | 2018-10-25T02:59:02 | null | UTF-8 | Python | false | false | 2,724 | py | """ Dataset """
import numpy as np
from .base import Baseset
from .batch import Batch
from .dsindex import DatasetIndex
from .pipeline import Pipeline
class Dataset(Baseset):
""" Dataset
Attributes
----------
index
indices
is_split
"""
def __init__(self, index, batch_class=Batch, preloaded=None, *args, **kwargs):
super().__init__(index, *args, **kwargs)
self.batch_class = batch_class
self.preloaded = preloaded
@classmethod
def from_dataset(cls, dataset, index, batch_class=None):
""" Create Dataset from another dataset with new index
(usually a subset of the source dataset index)
"""
if (batch_class is None or (batch_class == dataset.batch_class)) and cls._is_same_index(index, dataset.index):
return dataset
bcl = batch_class if batch_class is not None else dataset.batch_class
return cls(index, batch_class=bcl, preloaded=dataset.preloaded)
@staticmethod
def build_index(index):
""" Create an index """
if isinstance(index, DatasetIndex):
return index
return DatasetIndex(index)
@staticmethod
def _is_same_index(index1, index2):
return (isinstance(index1, type(index2)) or isinstance(index2, type(index1))) and \
index1.indices.shape == index2.indices.shape and \
np.all(index1.indices == index2.indices)
def create_subset(self, index):
""" Create a dataset based on the given subset of indices """
return type(self).from_dataset(self, index)
def create_batch(self, batch_indices, pos=False, *args, **kwargs):
""" Create a batch from given indices.
if `pos` is `False`, then `batch_indices` should contain the indices
that should be included in the batch
otherwise `batch_indices` should contain their positions in the current index
"""
if not isinstance(batch_indices, DatasetIndex):
batch_indices = self.index.create_batch(batch_indices, pos, *args, **kwargs)
return self.batch_class(batch_indices, preloaded=self.preloaded, **kwargs)
def pipeline(self, config=None):
""" Start a new data processing workflow """
return Pipeline(self, config=config)
@property
def p(self):
""":class:`dataset.Pipeline` : a short alias for `pipeline()` """
return self.pipeline()
def __rshift__(self, other):
if not isinstance(other, Pipeline):
raise TypeError("Pipeline is expected, but got %s. Use as dataset >> pipeline" % type(other))
new_p = other.from_pipeline(other)
new_p.dataset = self
return new_p
| [
"rhudor@gmail.com"
] | rhudor@gmail.com |
b6089bc7227de2cb91899aa1b83fc1a25c05ecf0 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_1/640.py | 332708be890fae3760c6a0435ddcadcf15e7f424 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | #!/usr/bin/python
from pprint import pprint
import string
#----------------------------------------------------------------------
# Input
#----------------------------------------------------------------------
FILE = 'A-small'
class Reader:
def __init__(self, filename):
self.c = -1
self.lines = open(filename + '.in').read().split('\n')
def get(self):
self.c += 1
return self.lines[self.c]
def get_number(self):
return string.atoi(self.get())
r = Reader(FILE)
tasks = []
n = r.get_number()
for i in range(n):
s = r.get_number()
engines = []
for j in range(s):
engines.append(r.get())
q = r.get_number()
queries = []
for j in range(q):
queries.append(r.get())
tasks.append({'engines': engines, 'queries': queries})
#----------------------------------------------------------------------
# Business Logic
#----------------------------------------------------------------------
def count_jumps(engines, queries):
jumps = []
for q in range(len(queries)):
longest = 0
for i in range(len(engines)):
for j in range(q, len(queries)):
if queries[j] == engines[i]: break
if j - q + 1 > longest: longest = j - q + 1
jumps.append(longest)
return jumps
class Tracker:
def __init__(self, jumps):
self.jumps = jumps
self.length = len(jumps)
self.best = [1] * self.length
self.visited = []
def track(self, index = 0, path = []):
if index in self.visited: return
self.visited.append(index)
if sum(path) >= self.length:
self.check_best(path)
else:
if len(path) >= len(self.best) - 1: return
c = self.jumps[index]
for i in range(c, 0, -1):
self.track(index + i, path + [c])
def check_best(self, path):
if len(path) < len(self.best):
self.best = path
def get_result(self):
if self.best == []: return 0
return len(self.best) - 1
#----------------------------------------------------------------------
# Output
#----------------------------------------------------------------------
f = open(FILE + '.out', 'w')
i = 0
for task in tasks:
i += 1
jumps = count_jumps(task['engines'], task['queries'])
tracker = Tracker(jumps)
tracker.track()
f.write('Case #' + str(i) + ': ')
f.write(str(tracker.get_result()))
f.write('\n')
f.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
0ad24802dbc3987906af866664153b0b54e912f5 | 99d7a6448a15e7770e3b6f3859da043300097136 | /src/stats/core.py | 50dd565dc1fe91846ef80fa306eef8c1e3ec2540 | [] | no_license | softtrainee/arlab | 125c5943f83b37bc7431ae985ac7b936e08a8fe4 | b691b6be8214dcb56921c55daed4d009b0b62027 | refs/heads/master | 2020-12-31T07:54:48.447800 | 2013-05-06T02:49:12 | 2013-05-06T02:49:12 | 53,566,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,357 | py | #===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
#============= standard library imports ========================
from numpy import asarray
from scipy.stats import chi2
#============= local library imports ==========================
def calculate_mswd(x, errs, k=1):
mswd_w = 0
n = len(x)
if n >= 2:
x = asarray(x)
errs = asarray(errs)
# xmean_u = x.mean()
xmean_w, _err = calculate_weighted_mean(x, errs)
ssw = (x - xmean_w) ** 2 / errs ** 2
# ssu = (x - xmean_u) ** 2 / errs ** 2
d = 1.0 / (n - k)
mswd_w = d * ssw.sum()
# mswd_u = d * ssu.sum()
return mswd_w
def calculate_weighted_mean(x, errs, error=0):
x = asarray(x)
errs = asarray(errs)
weights = asarray(map(lambda e: 1 / e ** 2, errs))
wtot = weights.sum()
wmean = (weights * x).sum() / wtot
if error == 0:
werr = wtot ** -0.5
elif error == 1:
werr = 1
return wmean, werr
def validate_mswd(mswd, n, k=1):
'''
is mswd acceptable based on Mahon 1996
does the mswd fall in the %95 confidence interval of the reduced chi2
reduced chi2 =chi2/dof
http://en.wikipedia.org/wiki/Goodness_of_fit
http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chi2.html#scipy.stats.chi2
'''
if n < 2:
return
dof = n - k
# calculate the reduced chi2 95% interval for given dof
# use scale parameter to calculate the chi2_reduced from chi2
rv = chi2(dof, scale=1 / float(dof))
low, high = rv.interval(0.95)
return low <= mswd <= high
#============= EOF =============================================
# if 1 <= dof <= 25:
# table = {
# 1:(0.001, 5.020),
# 2:(0.025, 3.690),
# 3:(0.072, 3.117),
# 4:(0.121, 2.775),
# 5:(0.166, 2.560),
# 6:(0.207, 2.400),
# 7:(0.241, 2.286),
# 8:(0.273, 2.188),
# 9:(0.300, 2.111),
# 10:(0.325, 2.050),
# 11:(0.347, 1.991),
# 12:(0.367, 1.942),
# 13:(0.385, 1.900),
# 14:(0.402, 1.864),
# 15:(0.417, 1.833),
# 16:(0.432, 1.800),
# 17:(0.445, 1.776),
# 18:(0.457, 1.750),
# 19:(0.469, 1.732),
# 20:(0.480, 1.710),
# 21:(0.490, 1.690),
# 22:(0.500, 1.673),
# 23:(0.509, 1.657),
# 24:(0.517, 1.642),
# 25:(0.524, 1.624),
# }
# low, high = table[n]
# else:
# low, high = (0.524, 1.624)
| [
"jirhiker@localhost"
] | jirhiker@localhost |
6127b04a22d38f0bf204ac9cd6ba34e80952b3c3 | 2fbfe3d922452743214509d1fdce5dafdc268250 | /yield/print_commands_heppy.py | 4ee09b53a252f2fae19eb15fd504afdca8251d11 | [
"BSD-3-Clause"
] | permissive | alphatwirl/concur-anon-comp | 284f94fdbbdb175318010d9a321b8f5b42afbf46 | 7fba43ae05d2856ed4a00a8f30b759ad4f4ba7ec | refs/heads/master | 2021-07-09T19:42:42.349845 | 2017-10-08T10:01:59 | 2017-10-08T10:01:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,355 | py | #!/usr/bin/env python
# Tai Sakuma <tai.sakuma@cern.ch>
import os, sys
import itertools
import collections
##__________________________________________________________________||
scripts_subdir = os.path.dirname(__file__)
scripts_dir = os.path.dirname(scripts_subdir)
##__________________________________________________________________||
sys.path.insert(1, scripts_dir)
from command_composer import *
from command_composer_local import *
##__________________________________________________________________||
twirl_option_common = ' '.join([
'--parallel-mode htcondor',
'--logging-level INFO'
])
##__________________________________________________________________||
heppy_topdir = os.path.join(os.path.sep, 'hdfs', 'SUSY', 'RA1')
# heppy_topdir = os.path.join(os.path.sep, 'Users' ,'sakuma', 'work', 'cms', 'c150130_RA1_data')
##__________________________________________________________________||
tbl_topdir = os.path.join('.', 'tbl_20170710_01')
##__________________________________________________________________||
lumi = 35900 # [pb-1]
##__________________________________________________________________||
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('jobs', nargs = argparse.REMAINDER, help = "names of jobs to be run")
parser.add_argument('--print-jobs', action = 'store_true', default = False, help = 'print the names of all jobs and exit')
args = parser.parse_args()
##__________________________________________________________________||
def main():
job_dict = build_jobs()
all_job_names = job_dict.keys()
job_names = args.jobs if args.jobs else all_job_names
if args.print_jobs:
print '\n'.join(all_job_names)
return
jobs = [job_dict[j] for j in job_names]
commands = list(itertools.chain(*[j() for j in jobs]))
for command in commands:
print command
##__________________________________________________________________||
def build_jobs():
ret = collections.OrderedDict()
ret.update(build_jobs_twirl_heppy_SM())
ret.update(build_jobs_tbl_process_heppy_SM())
return ret
##__________________________________________________________________||
def build_jobs_twirl_heppy_SM():
ret = collections.OrderedDict()
name = 'SM'
tbl_dir = os.path.join(tbl_topdir, name)
heppy_dir = os.path.join(heppy_topdir, '80X', 'MC', '20161021_B03', 'ROC_MC_SM')
heppy_components = [ ]
twirl_option = "{common} --max-events-per-process 500000 --mc".format(common = twirl_option_common)
jobs = build_jobs_twirl_heppy_template(
name = name,
tbl_dir = tbl_dir,
heppy_dir = heppy_dir,
heppy_components = heppy_components,
twirl_option = twirl_option
)
ret.update(jobs)
return ret
##__________________________________________________________________||
def build_jobs_twirl_heppy_template(name, tbl_dir, heppy_dir,
heppy_components, twirl_option):
ret = collections.OrderedDict()
job = EchoCommands(
commands = [
'mkdir -p {name}'.format(name = tbl_dir),
'{script} --components {components} -i {heppy_dir} -o {tbl_dir} {options}'.format(
script = os.path.join(scripts_subdir, 'twirl_mktbl_heppy.py'),
components = ' '.join(heppy_components),
heppy_dir = heppy_dir,
tbl_dir = tbl_dir,
options = twirl_option,
),
'{script} {tbl_dir}'.format(
script = os.path.join(scripts_dir, 'scripts', 'tbl_dataset_dasurl.py'),
tbl_dir = tbl_dir
),
'{script} {tbl_dir}'.format(
script = os.path.join(scripts_dir, 'scripts', 'create_dataset_dasurl_html.py'),
tbl_dir = tbl_dir
)
]
)
ret['summarize_trees_{}'.format(name)] = job
return ret
##__________________________________________________________________||
def build_jobs_tbl_process_heppy_SM():
ret = collections.OrderedDict()
name = 'SM'
tbl_dir = os.path.join(tbl_topdir, name)
tbl_cfg_process = os.path.join(scripts_subdir, 'tbl', 'tbl_cfg_component_phasespace_process.txt')
job = EchoCommands(
commands = [
'mkdir -p {name}'.format(name = tbl_dir),
'rsync -u -t {src} {dest}'.format(
src = tbl_cfg_process,
dest = os.path.join(tbl_dir, 'tbl_cfg_component_phasespace_process.txt')
),
'{script} --lumi {lumi} {tbl_dir}'.format(
script = os.path.join(scripts_dir, 'scripts', 'tbl_n_combine_mc_components.py'),
lumi = lumi,
tbl_dir = tbl_dir
)
]
)
ret['combine_tables_into_process_{}'.format(name)] = job
command_format = '{script} --infile-name-prefix tbl_n\.process --outfile-name-prefix tbl_n.process --infile-categories {infile_categories} --categories {categories} --dir {tbl_dir}'
job = EchoCommands(
commands = [
command_format.format(
script = os.path.join(scripts_dir, 'scripts', 'tbl_n_sum_over_categories.py'),
infile_categories = 'htbin njetbin mhtbin',
categories = 'mhtbin',
tbl_dir = tbl_dir
),
command_format.format(
script = os.path.join(scripts_dir, 'scripts', 'tbl_n_sum_over_categories.py'),
infile_categories = 'htbin njetbin mhtbin',
categories = 'njetbin',
tbl_dir = tbl_dir
),
command_format.format(
script = os.path.join(scripts_dir, 'scripts', 'tbl_n_sum_over_categories.py'),
infile_categories = 'htbin njetbin',
categories = 'htbin',
tbl_dir = tbl_dir
),
command_format.format(
script = os.path.join(scripts_dir, 'scripts', 'tbl_n_sum_over_categories.py'),
infile_categories = 'htbin njetbin',
categories = 'njetbin',
tbl_dir = tbl_dir
)
]
)
ret['aggregate_categories_{}'.format(name)] = job
return ret
##__________________________________________________________________||
if __name__ == '__main__':
main()
| [
"tai.sakuma@gmail.com"
] | tai.sakuma@gmail.com |
a06988916b2525148eddb0dba44cfddb16d86774 | 40dd8330e5f78c4348bbddc2c5acfd59d793dd51 | /configs/deeplabv3/deeplabv3_r18-d8_4xb2-80k_cityscapes-769x769.py | 021c98c3762fac98ee7d6513c1eea5ce7e5124e1 | [
"Apache-2.0"
] | permissive | open-mmlab/mmsegmentation | 0d12092312e2c465ede1fd7dd9847b6f2b37049c | 30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8 | refs/heads/main | 2023-09-04T10:54:52.299711 | 2023-07-24T07:28:21 | 2023-07-24T07:28:21 | 272,133,018 | 6,534 | 2,375 | Apache-2.0 | 2023-09-14T01:22:32 | 2020-06-14T04:32:33 | Python | UTF-8 | Python | false | false | 279 | py | _base_ = './deeplabv3_r50-d8_4xb2-80k_cityscapes-769x769.py'
model = dict(
pretrained='open-mmlab://resnet18_v1c',
backbone=dict(depth=18),
decode_head=dict(
in_channels=512,
channels=128,
),
auxiliary_head=dict(in_channels=256, channels=64))
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
219452d03c3922660e1f4ea683adb86faf50a25b | 1ffc17893d9e15fd939628bbc41c3d2633713ebd | /skl2onnx/operator_converters/sequence.py | 77b9370162a8bbbc4f66d9fa50e006be7914a19f | [
"Apache-2.0"
] | permissive | xadupre/sklearn-onnx | 646e8a158cdded725064964494f0f8a760630aa8 | b05e4864cedbf4f2a9e6c003781d1db8b53264ac | refs/heads/master | 2023-09-01T15:58:38.112315 | 2022-12-21T01:59:45 | 2022-12-21T01:59:45 | 382,323,831 | 0 | 2 | Apache-2.0 | 2023-01-04T13:41:33 | 2021-07-02T11:22:00 | Python | UTF-8 | Python | false | false | 1,267 | py | # SPDX-License-Identifier: Apache-2.0
from ..proto import onnx_proto
from ..common._registration import register_converter
from ..common._topology import Scope, Operator
from ..common._container import ModelComponentContainer
def convert_sklearn_sequence_at(scope: Scope, operator: Operator,
container: ModelComponentContainer):
i_index = operator.index
index_name = scope.get_unique_variable_name("seq_at%d" % i_index)
container.add_initializer(
index_name, onnx_proto.TensorProto.INT64, [], [i_index])
container.add_node(
'SequenceAt', [operator.inputs[0].full_name, index_name],
operator.outputs[0].full_name,
name=scope.get_unique_operator_name('SequenceAt%d' % i_index))
def convert_sklearn_sequence_construct(scope: Scope, operator: Operator,
container: ModelComponentContainer):
container.add_node(
'SequenceConstruct', [i.full_name for i in operator.inputs],
operator.outputs[0].full_name,
name=scope.get_unique_operator_name('SequenceConstruct'))
register_converter('SklearnSequenceAt', convert_sklearn_sequence_at)
register_converter(
'SklearnSequenceConstruct', convert_sklearn_sequence_construct)
| [
"noreply@github.com"
] | xadupre.noreply@github.com |
cc826a4fb0f79712348dd784a78433792d2b323e | 127fa3dd454434b4c7526afe161177af2e10226e | /leetcode/15. 3Sum.py | 2478149ce975e230ee321b088381e188d6a0d337 | [] | no_license | lunar-r/sword-to-offer-python | 966c46a8ddcff8ce5c95697638c988d83da3beab | fab4c341486e872fb2926d1b6d50499d55e76a4a | refs/heads/master | 2023-04-18T18:57:12.126441 | 2020-11-29T09:51:23 | 2020-11-29T09:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,458 | py | # -*- coding: utf-8 -*-
"""
File Name: 15. 3Sum
Description :
Author : simon
date: 19-4-8
"""
class Solution(object): # 此法也超时
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
lookup = {}
for num in nums:
if target - num in lookup:
if (-target ,target - num, num) not in res:
res.append((-target ,target - num, num))
lookup[num] = target - num
n = len(nums)
nums.sort()
res = []
for i in range(n):
twoSum(nums[i+1:], 0-nums[i])
return [list(i) for i in res]
"""
利用有序性去重...
如何保证所有的数字都不是重复的?
确保你的解由左到右 有序
思路是考虑解的第一个元素的所有可能情况
将第一个元素做成target
在第一个元素的后面开始找2sum 为什么不考虑前面的元素? 因为去重 需要保证当前[a,b,c] a是最小的 所以不能往回找 只需要在后面的元素查找
"""
class Solution_(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
n, res = len(nums), []
nums.sort()
for i in range(n):
if i > 0 and nums[i] == nums[i-1]: # 因为i=0这个元素会直接往下执行
continue
l, r = i+1, n-1
while l < r:
tmp = nums[i] + nums[l] + nums[r]
if tmp == 0:
res.append([nums[i], nums[l], nums[r]])
l += 1
r -= 1
while l < r and nums[l] == nums[l-1]:
l += 1
while l < r and nums[r] == nums[r+1]:
r -= 1
elif tmp > 0:
r -= 1
else:
l += 1
return res
"""
双指针 sweep 2sum
"""
class Solution__:
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
res = []
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i-1]: continue # 这是一种重复情况 [a,b,c] a 相同
left, right = i+1, len(nums)-1 # 标准的双指针sweep查找2sum
while left < right:
curSum = nums[i] + nums[left] + nums[right]
if curSum == 0:
res.append([nums[i], nums[left], nums[right]])
right -= 1
left += 1
while left < right and nums[right] == nums[right+1]: right -= 1 # 防止重复解 [-2, 0, 0, 2, 2]
while left < right and nums[left] == nums[left-1]: left += 1 # 得确保下一个位置下的2sum不能和当前解重复 一定要移动到不一样的地方
# while nums[i] + nums[left] + nums[right] > 0: # 优化 每一步只改变一下 就不用重复计算
# right -= 1
# while nums[i] + nums[left] + nums[right] < 0:
# left += 1
elif curSum > 0:
right -= 1
else:
left += 1
return res
"""
找到所有不重复的解
"""
def Sum2Sweep(self, nums, target):
nums.sort()
left, right = 0, len(nums) - 1
res = []
while left < right:
cur = nums[left] + nums[right]
if cur == target:
res.append([nums[left], nums[right]]) # 收集了解之后一定要注意后面的处理
left += 1
right -= 1
while left < right and nums[right] == nums[right + 1]: right -= 1 # 防止重复解 [-2, 0, 0, 2, 2]
while left < right and nums[left] == nums[left - 1]: left += 1 # 得确保下一个位置下的2sum不能和当前解重复 一定要移动到不一样的地方
elif cur > target:
right -= 1
else:
left += 1
return res
if __name__ == '__main__':
test = [1,1,2,2,3,3]
print(Solution__().Sum2Sweep(test, 4))
| [
"2711772037@qq.com"
] | 2711772037@qq.com |
803a984ccf5f9c2459a060afbd9d7fc10595d1f0 | f5f7a1ae04a999f3f193cca647397b29806edf73 | /modeling/dynamics/builtin/random_tst.py | 869c5cbb58a908cdd11d8219406f61db301494e0 | [
"MIT"
] | permissive | kazuki0824/wrs | bf88d1568f591c61870332436bfcd079d78b87d7 | 03c9e59779a30e2f6dedf2732ad8a46e6ac3c9f0 | refs/heads/main | 2023-07-24T05:20:02.054592 | 2021-05-31T14:38:18 | 2021-05-31T14:38:18 | 368,829,423 | 1 | 0 | MIT | 2021-05-19T10:25:48 | 2021-05-19T10:25:47 | null | UTF-8 | Python | false | false | 132 | py | # conclusions:
# The builtin physics is very simple. It does not support joints and has little flexibility for different collisions. | [
"wanweiwei07@gmail.com"
] | wanweiwei07@gmail.com |
8dc1bf090c3c94c61255f6f0d2389b8d9794ba73 | 071f2a58fcee58845c039b4f7fc277b80d2907df | /tensorflow_federated/python/tests/perf_regression_test.py | 8baadde770ee35b8d5ae9bacfe0e9118e3064781 | [
"Apache-2.0"
] | permissive | hartmanwilliam/federated | 71be99e58b2f33880f0e482d5b578454d65af89b | 55e5fcbfde13ac4788f084e4c3c4714130cd19ec | refs/heads/master | 2022-07-04T20:33:51.082890 | 2020-05-22T17:45:01 | 2020-05-22T17:45:01 | 266,160,582 | 0 | 0 | Apache-2.0 | 2020-05-22T16:43:35 | 2020-05-22T16:43:34 | null | UTF-8 | Python | false | false | 1,670 | py | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to guard against serious asymptotic performance regressions."""
import time
from absl.testing import absltest
import tensorflow as tf
import tensorflow_federated as tff
tf.compat.v1.enable_v2_behavior()
class PerfRegressionTest(absltest.TestCase):
def test_federated_collect_large_numbers_of_parameters(self):
num_clients = 10
model_size = 10**6
client_models = [tf.ones([model_size]) for _ in range(num_clients)]
client_data_type = tff.FederatedType((tf.float32, [model_size]),
tff.CLIENTS)
@tff.federated_computation(client_data_type)
def comp(client_data):
return tff.federated_collect(client_data)
start_time_seconds = time.time()
result = comp(client_models)
end_time_seconds = time.time()
runtime = end_time_seconds - start_time_seconds
if runtime > 10:
raise RuntimeError('comp should take much less than a second, but took ' +
str(runtime))
del result
if __name__ == '__main__':
absltest.main()
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
c6335c940a7b4c9321731f01cc8889dae9d174b1 | 3b2940c38412e5216527e35093396470060cca2f | /top/api/rest/TmallItemIncrementUpdateSchemaGetRequest.py | c51feed80e3c9e820280232cccc5e62653b01364 | [] | no_license | akingthink/goods | 842eb09daddc2611868b01ebd6e330e5dd7d50be | ffdb5868a8df5c2935fc6142edcdf4c661c84dca | refs/heads/master | 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | '''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class TmallItemIncrementUpdateSchemaGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.item_id = None
self.xml_data = None
def getapiname(self):
return 'tmall.item.increment.update.schema.get'
| [
"yangwenjin@T4F-MBP-17.local"
] | yangwenjin@T4F-MBP-17.local |
50f38c6d4ef76737b9e40616aec9091e6047da55 | 9d1e48983e4cc2a4c37d00c528e235923c03c949 | /driver/driver.py | c4430f8036a42898ae7c16607b8a8fcc925a0426 | [] | no_license | gitmengzh/python_selenium_mail | 0e1efc2908fdf91026ac37b3ac79a2eaf17e4c03 | f3b7198a08e7773a5a5fd741846592da1178159a | refs/heads/master | 2021-07-12T01:28:36.058979 | 2017-10-09T05:09:41 | 2017-10-09T05:09:41 | 105,284,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | #coding:utf-8
#@time : 2017/9/29 20:25
#@Author : mengzh
#@file :{name}.py
# @Site :
# @File : driver.py
# @Software: PyCharm Community Edition
from selenium.webdriver import Remote
from selenium import webdriver
#启动浏览器驱动
def browser():
driver = webdriver.Chrome()
return driver
'''
#可以启动到远程主机中,运行自动化测试
host = '127.0.0.1:4444' #运行主机:端口号(本机默认:127.0.0.1:4444)
dc = {'browserName': 'chrome'} #指定浏览器
driver = Remote(command_execute='http://' + host + '/wd/hub',
desired_capabilities=dc)
#用于测试该脚本是否有效
if __name__ == '__main__':
dr = browser()
''' | [
"mengzh1618@gmail.com"
] | mengzh1618@gmail.com |
fca91227317f55d87e519efba9c184639f108e94 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Anscombe/trend_MovingAverage/cycle_30/ar_12/test_artificial_128_Anscombe_MovingAverage_30_12_100.py | 94a37184b05fb45fcf9fae9e8cfe29784205206f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 270 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
c412f1eae21a27f4a3b0f3a34450efaba32e7eb2 | b5479a025524c1387d3ba14f4fba7173f1d2df31 | /task/day04/讲解.py | 9875824b2c11f2e69e85f2a2e10098e30b85e707 | [] | no_license | liousAlready/NewDream_learning | 063f5e225d62eec8285e69bb5ba3bff850096ca3 | 7b790f675419224bfdbe1542eddc5a638982e68a | refs/heads/master | 2023-06-30T08:28:34.949464 | 2021-08-01T00:42:17 | 2021-08-01T00:42:17 | 391,489,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,382 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@contact: 1650503480@qq.com
@file: 讲解.py
@time: 2021/4/18 08:50
'''
# # 4、使用循环语句计算从1到100,一共有多少个尾数为7或者7的倍数这样的数,请输出这样的数。
# num = [] # 采用一个空列表
# for i in range(1, 101, 1): # 循环列表
# if i % 7 == 0:
# num.append(i)
# elif str(i)[-1] == '7': # 取字符串最后一位字符来进行判断
# num.append(i)
#
# print(num)
#
# # 讲解
# count = 0
# for i in range(1, 101, 1):
# if i % 10 == 7 or i % 7 == 0:
# print("i为:", i)
# count = count + 1
# else:
# continue
# else:
# print("总共是:", count)
# 5、模拟支付宝的蚂蚁森林通过日常的走步--20g,生活缴费--50g,线下支付--100g,网络购票--80g,共享单车--200g等低碳,环保行为可以积攒能量,当能量达到一定数量后,可以种一棵真正的树--500g。
# 5.1由用户输入环保行为,来积累能量;查询能量请输入能量来源!退出程序请输入0;
# e = [{"xingwei": "日常的走步", "nengliang": 20, "jilei": 0},
# {"xingwei": "生活缴费", "nengliang": 50, "jilei": 1},
# {"xingwei": "线下支付", "nengliang": 100, "jilei": 2},
# {"xingwei": "网络购物", "nengliang": 80, "jilei": 3},
# {"xingwei": "共享单车", "nengliang": 200, "jilei": 0}, ]
#
# print("欢迎你来到支付宝的蚂蚁森林,可以进行一下环保行为来积累能量:")
# print("日常的走步---20g\n 生活缴费---50g\n线下支付--100g\n网络购票--80g\n共享单车:200g")
# while True:
# print("请选择你要进行的操作:1--积累能量,2--查询,0--退出程序")
# x = int(input())
# if x == 1:
# print("请输入你的环保行为:")
# x1 = input()
# for e1 in e:
# if x1 == e1.get("xingwei"):
# e1["jilei"] = e1["jilei"] + e1["nengliang"]
#
# elif x == 2:
# sum_e = 0
# for e1 in e:
# sum_e = sum_e + e1.get("jilei")
# if sum_e >= 500:
# print("总能量为:%d , 恭喜您可以种树了" % sum_e)
# else:
# print("总能量为:%d,请继续您的环保行为" % sum_e)
#
# print("请输入你要查询的环保行为")
# s = input()
# for e1 in e:
# if s == e1.get("xingwei"):
# print(e1.get("jilei"))
#
# elif x == 0:
# break
# else:
# print("输入有误,请重新输入")
'''
7、购物车
功能要求:
要求用户输入总资产,例如: 2000
显示商品列表,让用户根据序号选择商品,加入购物车
购买,如果商品总额大于总资产,提示账户余额不足,否则,购买成功。
goods=[
{"name":"电脑","price":1999},
{"name":"鼠标","price":10},
{"name":"游艇","price":20},
{"name":"美女","price":998}
]
分程度来进行代码编写:
1、简单版:用户只能输入一次商品的序号,购买一个或者多个,就进行结账;
2、进阶版:用户可以多次输入自己想买的商品序号,同一个商品可以购买多个,最后再进行结账
3、高阶版:把显示商品和加入购物车,结算,三个功能定义成三个函数,进行调用
'''
money = input("请输入您的总资产:")
print("==========欢迎来到P9班购物中心==========")
print("======本中心有以下商品功能选购,请按照商品序号进行添加到购物车======")
goods = [
{"name": "电脑", "price": 1999},
{"name": "鼠标", "price": 10},
{"name": "游艇", "price": 20},
{"name": "美女", "price": 998},
]
print("商品\t名称\t价格")
for g in goods: # 遍历列表 取出字典2
print(goods.index(g) + 1, end="\t")
for k in g.keys(): # 取出字典中的 key value
print(g.get(k), end="\t")
print()
# i = input("请选择你需要购买的商品序号:")
# car = []
# dict1 =[]
# for g in goods:
# if i == goods.index(g) + 1:
# for k in g.keys():
# dict1[k] = 1
# car.append(dict1)
# print(car)
i = input("请选择你需要购买的商品序号:")
all_price = 0
for g in goods:
if i == goods.index(g) + 1:
for k in g.keys():
pass
| [
"m15574933885@163.com"
] | m15574933885@163.com |
3b1e8e45800b4899e289907539e9a369b1354124 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/algorithms-master/algorithms/streaming/misra_gries.py | b36c9276a48759167c8676a6fd6fc6c0e8d4a070 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 1,634 | py | """
Implementation of the Misra-Gries algorithm.
Given a list of items and a value k, it returns the every item in the list that appears at least n/k times, where n is the length of the array
By default, k is set to 2, solving the majority problem.
For the majority problem, this algorithm only guarantees that if there is an element that appears more than n/2 times, it will be outputed. If there
is no such element, any arbitrary element is returned by the algorithm. Therefore, we need to iterate through again at the end. But since we have filtred
out the suspects, the memory complexity is significantly lower than it would be to create counter for every element in the list.
For example:
Input misras_gries([1,4,4,4,5,4,4])
Output {'4':5}
Input misras_gries([0,0,0,1,1,1,1])
Output {'1':4}
Input misras_gries([0,0,0,0,1,1,1,2,2],3)
Output {'0':4,'1':3}
Input misras_gries([0,0,0,1,1,1]
Output None
"""
def misras_gries(array, k=2):
keys = {}
for i in range(len(array)):
val = str(array[i])
if val in keys:
keys[val] = keys[val] + 1
elif len(keys) < k - 1:
keys[val] = 1
else:
for key in list(keys):
keys[key] = keys[key] - 1
if keys[key] == 0:
del keys[key]
suspects = keys.keys()
frequencies = {}
for suspect in suspects:
freq = _count_frequency(array, int(suspect))
if freq >= len(array) / k:
frequencies[suspect] = freq
return frequencies if len(frequencies) > 0 else None
def _count_frequency(array, element):
return array.count(element)
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
62b89ae3b905a6cdda0c7773ec9545b72a6fd51f | 5b052d14cfb7b8a5fb49a22a60f9806fb9e085e7 | /libc.py | 7ba9c30b4b899d8b0f425ce79be35a150fc73bd2 | [] | no_license | Torres-x86-64/x86_64-linux-cheatsheats | 82bd6ba4888c14368886ec293b90e91ce33e7971 | be6cd4fd3b06a0e7be1dc6ef71dc49245aa5b348 | refs/heads/master | 2023-07-19T07:05:48.459601 | 2021-09-09T08:49:42 | 2021-09-09T08:49:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,066 | py | #!/usr/bin/env python3
import os
import subprocess
import sys
import tempfile
import shlex
import cmd
from collections import defaultdict
C_HEADER = """
#define _GNU_SOURCE
extern "C" {
#include <elf.h>
#include <asm/mman.h>
#include <ctype.h>
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <execinfo.h>
#include <fcntl.h>
#include <glob.h>
#include <grp.h>
#include <ifaddrs.h>
#include <langinfo.h>
#include <limits.h>
#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/if.h>
#include <linux/input.h>
#include <linux/magic.h>
#include <linux/netlink.h>
#include <linux/quota.h>
#include <linux/kvm.h>
#include <linux/reboot.h>
#include <locale.h>
#include <malloc.h>
#include <mqueue.h>
#include <netdb.h>
#include <net/ethernet.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <netpacket/packet.h>
#include <poll.h>
#include <pthread.h>
#include <pty.h>
#include <pwd.h>
#include <resolv.h>
#include <sched.h>
#include <semaphore.h>
#include <shadow.h>
#include <signal.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/file.h>
#include <sys/ioctl.h>
#include <sys/io.h>
#include <sys/ipc.h>
#include <syslog.h>
#include <sys/mman.h>
#include <sys/msg.h>
#include <sys/personality.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/quota.h>
#include <sys/reboot.h>
#include <sys/reg.h>
#include <sys/resource.h>
#include <sys/sem.h>
#include <sys/sendfile.h>
#include <sys/shm.h>
#include <sys/signalfd.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/swap.h>
#include <sys/syscall.h>
#include <sys/sysinfo.h>
#include <sys/time.h>
#include <sys/timerfd.h>
#include <sys/times.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <sys/user.h>
#include <sys/utsname.h>
#include <sys/vfs.h>
#include <sys/wait.h>
#include <sys/xattr.h>
#include <termios.h>
#include <time.h>
#include <ucontext.h>
#include <unistd.h>
#include <utime.h>
#include <utmpx.h>
#include <wchar.h>
}
"""
CXX_HEADER = C_HEADER + """
#include <iostream>
#include <type_traits>
#include <typeinfo>
#ifndef _MSC_VER
# include <cxxabi.h>
#endif
#include <memory>
#include <string>
#include <cstdlib>
template <class T>
std::string
type_name()
{
typedef typename std::remove_reference<T>::type TR;
std::unique_ptr<char, void(*)(void*)> own
(
#ifndef _MSC_VER
abi::__cxa_demangle(typeid(TR).name(), nullptr,
nullptr, nullptr),
#else
nullptr,
#endif
std::free
);
std::string r = own != nullptr ? own.get() : typeid(TR).name();
if (std::is_const<TR>::value)
r += " const";
if (std::is_volatile<TR>::value)
r += " volatile";
if (std::is_lvalue_reference<T>::value)
r += "&";
else if (std::is_rvalue_reference<T>::value)
r += "&&";
return r;
}
"""
def usage():
print(
"USAGE: %s repl|size|print|all|symbols|offset|members" % sys.argv[0],
file=sys.stderr)
return 0
def execute(f):
try:
exe = tempfile.NamedTemporaryFile(delete=True)
exe.close()
subprocess.check_call(
["c++", "-std=c++11", "-w", "-o", exe.name, "-x", "c++", f.name])
subprocess.check_call([exe.name])
except subprocess.CalledProcessError:
pass
finally:
try:
os.unlink(exe.name)
except FileNotFoundError:
pass
def main(args):
if len(args) < 2 or args[1] == "-h" or args[1] == "--help":
usage()
return 1
s = Shell()
s.args = sys.argv[2:]
s.onecmd(sys.argv[1])
def parse(arg):
return tuple(shlex.split(arg))
def gdb(arg, command):
f = tempfile.NamedTemporaryFile(mode="w+")
f.write(C_HEADER + """int main() {
%s a;
printf("%%p", &a);
}""" % arg)
f.flush()
subprocess.check_call(
["c++", "-std=c++11", "-g", "-o", "/tmp/main", "-w", "-x", "c++", f.name])
return subprocess.check_output(["gdb", "--nh", "-batch", "-ex", command, "/tmp/main"]).decode("utf-8")
class Shell(cmd.Cmd):
intro = 'Type help or ? to list commands.\n'
prompt = '> '
def precmd(self, line):
self.args = parse(line)
if len(self.args) > 1:
self.args = self.args[1:]
return line
def do_print(self, _):
"""
print expression
"""
if len(self.args) < 1:
print("USAGE: %s print expr" % sys.argv[0], file=sys.stderr)
return 1
f = tempfile.NamedTemporaryFile(mode="w+")
f.write(
CXX_HEADER +
"int main(int argc, char** argv) { std::cout << (%s) << std::endl; }"
% self.args[0])
f.flush()
execute(f)
do_p = do_print
default = do_print
def do_size(self, _):
"""
print sizeof(expression)
"""
if len(self.args) < 1:
print("USAGE: %s print-size expr" % sys.argv[0], file=sys.stderr)
return 1
f = tempfile.NamedTemporaryFile(mode="w+")
f.write(CXX_HEADER +
"int main() { std::cout << \"0x\" << std::hex << sizeof(%s) << std::endl; }" %
(self.args[0]))
f.flush()
execute(f)
do_s = do_size
def do_type(self, _):
"""
print type of expression
"""
if len(self.args) < 1:
print("USAGE: %s type" % sys.argv[0], file=sys.stderr)
return 1
f = tempfile.NamedTemporaryFile(mode="w+")
f.write(
CXX_HEADER +
"int main() { std::cout << type_name<decltype(%s)>() << std::endl; }"
% (self.args[0]))
f.flush()
execute(f)
do_t = do_type
def do_offset(self, _):
"""
print offsetof(struct, member)
"""
if len(self.args) < 2:
print(
"USAGE: %s offset struct member" % sys.argv[0],
file=sys.stderr)
return 1
f = tempfile.NamedTemporaryFile(mode="w+")
f.write(CXX_HEADER +
"int main() { std::cout << \"0x\" << std::hex << offsetof(%s, %s) << std::endl; }" %
(self.args[0], self.args[1]))
f.flush()
execute(f)
do_o = do_offset
def do_symbols(self, _):
"""
Print symbols
"""
f = tempfile.NamedTemporaryFile(mode="w+")
f.write(C_HEADER + "int main() {}")
f.flush()
subprocess.check_call(["cpp", f.name])
def do_all(self, _):
"""
Print libc header with all macros/definitions
"""
f = tempfile.NamedTemporaryFile(mode="w+")
f.write(C_HEADER + "int main() {}")
f.flush()
if subprocess.check_output(["cpp", "--version"]).startswith(b"clang"):
subprocess.check_call(["cpp", "-frewrite-includes", f.name])
else:
subprocess.check_call(["cpp", "-fdirectives-only", f.name])
def do_members(self, _):
"""
Print type information using gdb. For structs print members
"""
out = gdb(self.args[0], "ptype %s" % self.args[0])
print(out.replace("type = ", ""))
def do_gdboffset(self, _):
"""
Print offset calculated by gdb
"""
if len(self.args) < 2:
print(
"USAGE: %s offset struct member" % sys.argv[0],
file=sys.stderr)
return 1
print(gdb(self.args[0], f"print &(({self.args[0]} *) 0)->{self.args[1]}"))
def do_exit(self, _):
"""
Exit
"""
return True
def do_quit(self, _):
"""
Exit
"""
return True
if len(sys.argv) < 2:
usage()
sys.exit(0)
elif sys.argv[1] == "repl":
Shell().cmdloop()
else:
sys.exit(main(sys.argv))
| [
"joerg@thalheim.io"
] | joerg@thalheim.io |
dc82e1f840126fc715fbeadb46f6fde3429f2d39 | 118984fdbacf5eb71159eb511ccd055987498886 | /CH02/EX2.11.py | bd48a2f7c6a031fa25f8178f8c91a60ed0d5ef46 | [] | no_license | 6igsm0ke/Introduction-to-Programming-Using-Python-Liang-1st-edtion | 321c6256be6ff78adbc8e3ddc73f2f43a51a75ab | 159489f3af296f87469ddddf3a1cb232917506b0 | refs/heads/master | 2023-06-05T20:03:17.951911 | 2021-06-18T18:04:42 | 2021-06-18T18:04:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # (Financial application: investment amount) Suppose you want to deposit a
# certain amount of money into a savings account with a fixed annual interest rate.
# What amount do you need to deposit in order to have $5,000 in the account after
# three years? The initial deposit amount can be obtained using the following
# formula:
# Write a program that prompts the user to enter final account value, annual interest
# rate in percent, and the number of years, and displays the initial deposit amount.
finAccVal = eval(input("Enter final account value: "))
monthInterRatePerc = eval(input("Enter annual interest rate in percent: ")) / (100 * 12)
numOfMonths = eval(input("Enter number of years: ")) * 12
initialDepositAmount = finAccVal / (1 + monthInterRatePerc) ** numOfMonths
print("Initial deposit value is", initialDepositAmount)
| [
"47993441+OmarAlmighty@users.noreply.github.com"
] | 47993441+OmarAlmighty@users.noreply.github.com |
1ffce055ba6651fb3b62b2257eb1b97d0184681d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_playmate.py | a2a2cbdd32c52b30414fc86e33f08980172af6d0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py |
#calss header
class _PLAYMATE():
def __init__(self,):
self.name = "PLAYMATE"
self.definitions = [u'a friend, especially another child, who a child often plays with: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
b7b7589a8edfbd5a4ac78f7af700a1a51fee4730 | 54982f9506789cacd236a8890cd67c48d0e8df0e | /laser_scanner_copy/build/laser_scanner/uts_sensor_box/sensor_drivers/rosserial/rosserial_windows/catkin_generated/pkg.installspace.context.pc.py | 934825c013717a5efa0da78c6b7458a4656b52a0 | [] | no_license | bassie8881/ROS_stuff | 53e0d750c35f653bf9f93cf28ee7d7b604a69af6 | 627b114c6dd469a4e81085a894676e60aeb23691 | refs/heads/master | 2021-01-17T17:03:25.219628 | 2016-07-14T03:12:04 | 2016-07-14T03:12:04 | 63,243,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_windows"
PROJECT_SPACE_DIR = "/home/odroid/laser_scanner_copy/install"
PROJECT_VERSION = "0.6.3"
| [
"bas_bosscher@hotmail.com"
] | bas_bosscher@hotmail.com |
3a30f9e8d50edf6c153ca01b26747bcc8ac92ba9 | 538cee184d5ba586b2dfddd58e243febfbde81e6 | /splikes/neurons/backup__init__.py | ef276042e25a3339af9a2bd7b29521fe8c049d71 | [
"MIT"
] | permissive | bblais/Plasticnet | 05ea7763cc7a188dfb88611cb12dfc4edd015955 | 5fc8a4fc715d4877bfa255feacd37b4c23201777 | refs/heads/master | 2023-07-10T06:15:06.286575 | 2023-06-21T10:38:11 | 2023-06-21T10:38:11 | 34,905,565 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | from IntegrateAndFire import IntegrateAndFire
from poisson_julijana import poisson_julijana
from poisson_pattern import poisson_pattern
from poisson_pattern import poisson_plasticnet
from poisson_pattern import isi_pattern
from poisson_pattern import isi_plasticnet
from spike_pattern import spike_pattern
from srm0 import srm0
from srm0 import srm0_debug
from srm0 import srm0_isi
import process | [
"bblais@gmail.com"
] | bblais@gmail.com |
29cfdd3a27d07741e098f88164c64711908eb33c | 9dee94907e6456a4af9855d358693923c17b4e0d | /0575_Distribute_Candies.py | 633c5b32143611a4b89358487332a3c63c22615a | [] | no_license | chien-wei/LeetCode | e215915a8103e56f182040dacc9fb0d6996c86ec | 0d6f414e7610fedb2ec4818ecf88d51aa69e1355 | refs/heads/master | 2021-05-13T14:48:22.891100 | 2019-08-20T05:52:59 | 2019-08-20T05:52:59 | 116,749,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | class Solution:
def distributeCandies(self, candies):
"""
:type candies: List[int]
:rtype: int
"""
count = collections.Counter(candies)
return len(count) if len(count) <= len(candies)/2 else int(len(candies)/2) | [
"chien-wei@outlook.com"
] | chien-wei@outlook.com |
36a7d56d6d6bf426de9e275006e0087ccb97cc4e | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/eqpt/mem.py | 3d50535f47e2edf19cb09a8c1c709ee5f6e093e8 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,769 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Mem(Mo):
meta = ClassMeta("cobra.model.eqpt.Mem")
meta.isAbstract = True
meta.moClassName = "eqptMem"
meta.moClassName = "eqptMem"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Memory"
meta.writeAccessMask = 0x80080000000001
meta.readAccessMask = 0x80080000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.eqpt.Item")
meta.superClasses.add("cobra.model.nw.Item")
meta.superClasses.add("cobra.model.eqpt.Comp")
meta.concreteSubClasses.add("cobra.model.eqpt.SpromLc")
meta.concreteSubClasses.add("cobra.model.eqpt.SpromPsu")
meta.concreteSubClasses.add("cobra.model.eqpt.SpromFan")
meta.concreteSubClasses.add("cobra.model.eqpt.SpromSup")
meta.concreteSubClasses.add("cobra.model.eqpt.Dimm")
meta.concreteSubClasses.add("cobra.model.eqpt.Flash")
meta.concreteSubClasses.add("cobra.model.eqpt.SpromBP")
meta.rnPrefixes = [
]
prop = PropMeta("str", "acc", "acc", 3340, PropCategory.REGULAR)
prop.label = "Access"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("read-only", "read-only", 0)
prop._addConstant("read-write", "write-only", 1)
meta.props.add("acc", prop)
prop = PropMeta("str", "cap", "cap", 3339, PropCategory.REGULAR)
prop.label = "Capacity"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("cap", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cimcVersion", "cimcVersion", 56701, PropCategory.REGULAR)
prop.label = "CIMC version"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("cimcVersion", prop)
prop = PropMeta("str", "descr", "descr", 5597, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "id", "id", 3505, PropCategory.REGULAR)
prop.label = "ID"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "mfgTm", "mfgTm", 5596, PropCategory.REGULAR)
prop.label = "Manufacturing Time"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "not-applicable"
prop._addConstant("not-applicable", "n/a", 0)
meta.props.add("mfgTm", prop)
prop = PropMeta("str", "model", "model", 5592, PropCategory.REGULAR)
prop.label = "Model"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("model", prop)
prop = PropMeta("str", "operSt", "operSt", 3341, PropCategory.REGULAR)
prop.label = "Operational State"
prop.isOper = True
prop.defaultValue = 0
prop.defaultValueStr = "unknown"
prop._addConstant("absent", "absent", 3)
prop._addConstant("fail", "fail", 2)
prop._addConstant("mismatch", "mismatch", 5)
prop._addConstant("ok", "ok", 1)
prop._addConstant("shut", "shut", 4)
prop._addConstant("unknown", "unknown", 0)
meta.props.add("operSt", prop)
prop = PropMeta("str", "rev", "rev", 5593, PropCategory.REGULAR)
prop.label = "Revision"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
prop.defaultValue = "0"
prop.defaultValueStr = "0"
meta.props.add("rev", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "ser", "ser", 5594, PropCategory.REGULAR)
prop.label = "Serial Number"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("ser", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 3338, PropCategory.REGULAR)
prop.label = "Type"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("dimm", "dimm", 1)
prop._addConstant("flash", "flash", 2)
prop._addConstant("sprom", "sprom", 3)
prop._addConstant("unk", "unknown", 0)
meta.props.add("type", prop)
prop = PropMeta("str", "vendor", "vendor", 5595, PropCategory.REGULAR)
prop.label = "Vendor"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
prop.defaultValue = "Cisco Systems, Inc"
prop.defaultValueStr = "Cisco Systems, Inc"
meta.props.add("vendor", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
307b791acfb30d21d918c0b74267d482103d9343 | 3b4c109af79a01e1d5d0a9cc45a2c02d84f760da | /app/api/sge_api.py | 0b23cf9e1866b69708df28f2a9dd10936dc1126f | [] | no_license | huozhihui/sge_deploy | a07dcfe632a0b93988696ea5b543a3cfbb218104 | 129e32542311ad4939124eac9838a86085bb85df | refs/heads/master | 2020-03-12T00:07:25.479411 | 2018-05-20T02:22:48 | 2018-05-20T02:22:48 | 130,341,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask_restful import Resource
import argument
import base
from app.sge.sge_centos7 import SgeMaster, SgeClient
class SgeAuthApi(Resource):
def get(self):
return {"token": "true"}
class SgeMasterApi(Resource):
def post(self):
# args = argument.sge_master_parser.parse_args(strict=True)
args = argument.sge_master_parser.parse_args()
instance = SgeMaster(**args)
base.generate_thread(instance, **args)
return base.execute_success()
class SgeClientApi(Resource):
def post(self):
args = argument.sge_client_parser.parse_args()
instance = SgeClient(**args)
base.generate_thread(instance, **args)
| [
"240516816@qq.com"
] | 240516816@qq.com |
be858c8f665aa32394b9537f1d88bb26cd458735 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/vmware/plugins/doc_fragments/vmware_rest_client.py | 6b66d304ee41a505c9bd657775f528804c42474a | [
"MIT",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-or-later"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 1,809 | py | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
# Parameters for VMware REST Client based modules
DOCUMENTATION = r'''
options:
hostname:
description:
- The hostname or IP address of the vSphere vCenter server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_HOST) will be used instead.
type: str
username:
description:
- The username of the vSphere vCenter server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_USER) will be used instead.
type: str
aliases: [ admin, user ]
password:
description:
- The password of the vSphere vCenter server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_PASSWORD) will be used instead.
type: str
aliases: [ pass, pwd ]
validate_certs:
description:
- Allows connection when SSL certificates are not valid.
- Set to C(False) when certificates are not trusted.
- If the value is not specified in the task, the value of environment variable C(VMWARE_VALIDATE_CERTS) will be used instead.
type: bool
default: true
port:
description:
- The port number of the vSphere vCenter.
- If the value is not specified in the task, the value of environment variable C(VMWARE_PORT) will be used instead.
type: int
default: 443
protocol:
description:
- The connection to protocol.
type: str
choices: [ http, https ]
default: https
'''
| [
"sifang@cisco.com"
] | sifang@cisco.com |
0e2a0c7e769f2b1a1ce9065afb702e271edcf657 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Difference/trend_MovingAverage/cycle_12/ar_12/test_artificial_128_Difference_MovingAverage_12_12_20.py | d9f4a16840cfc05c99914bb60a1ff13682cda68e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 271 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
9749e7ffb75827cf90e8764277b3fa1be3196504 | e5f1befb7c7ca0072747b33086fc6569a6befd01 | /old/docstrings/001.py | 7f6a07a575df40c1c412300d12dbaf39ae528871 | [] | no_license | nepomnyashchii/TestGit | ae08d8bb1b7d2ab9389a309fd1dc9e24729b019c | c7abf4ab08ee3c2f3ea1fb09a1938bff7a3e0e5c | refs/heads/master | 2020-04-28T23:41:51.053547 | 2020-01-24T12:22:40 | 2020-01-24T12:22:40 | 175,666,093 | 0 | 1 | null | 2019-03-15T13:44:03 | 2019-03-14T17:08:58 | null | UTF-8 | Python | false | false | 142 | py | def my_function():
"""Do nothing, but document it.
No, really, it doesn't do anything.
"""
pass
print (my_function.__doc__)
| [
"nepomnyashchii@gmail.com"
] | nepomnyashchii@gmail.com |
2b7f019559e32d32d77c4d539f7fa04d207440bc | 14cb64c98b6532e134dfe1399cc74dbd3a0cdeba | /LightweightDjango/urls.py | 5993f636fb6fd1abf100488d26a9ca68d8bd1dfa | [] | no_license | agz1990/lightweight_django | 3ed98479fa0a7e0ac6e3d5bae237690761582a30 | 0daf9884843fc9d8dee1120bc5ba2a4a71ecb55c | refs/heads/master | 2016-08-12T07:35:14.456918 | 2015-11-30T16:30:58 | 2015-11-30T16:30:58 | 47,054,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | """LightweightDjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
# from placeholder import views
from sitebuider import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
# url(r'^image/(?P<width>[0-9]+)x(?P<height>[0-9]+)/$', views.flaceholder, name='placeholder'),
# url(r'^(?P<slug>[\w./-]+)', views.page, name='page'),
url(r'^board/', include('board.urls', namespace='board')),
url(r'^$', views.page, name='homepage'),
]
| [
"522360568@qq.com"
] | 522360568@qq.com |
19b15d5c5d51b57c95a7c8c07417e79611d4e2ce | 381d034368acbc29a8fbe57fa93ade6ae9026d1d | /venv/Scripts/pip3.7-script.py | 2178deb9ac099dcc82ab0299a0e67e01560dfbc8 | [] | no_license | mayhem215/issues | 7a9f7207aebd927f283061a6ffc176b2dcf7b4b3 | cccfaf80c31b29521f40fa3aa81cb79aec6e6579 | refs/heads/master | 2020-04-08T10:58:46.061311 | 2018-11-27T06:42:09 | 2018-11-27T06:42:09 | 159,289,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!C:\DjangoProjects\issues\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"mayhem15@mail.ru"
] | mayhem15@mail.ru |
e1fd7185282c13b02be9eecae27a0abbd4da1c46 | 87277cddfc489dd7d3837ffccda2f11bb4ad43cc | /py/Task659.py | 90643ad9965f0161929d5b3ab7ca9321a2a5ba47 | [] | no_license | rain-zhao/leetcode | 22c01d1803af7dd66164a204e6dc718e6bab6f0e | 8d47147f1c78896d7021aede767b5c659cd47035 | refs/heads/master | 2022-05-29T10:54:14.709070 | 2022-05-14T09:38:05 | 2022-05-14T09:38:05 | 242,631,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | from typing import List
from collections import defaultdict
import heapq
class Solution:
# hash + min-heap
def isPossible(self, nums: List[int]) -> bool:
map = defaultdict(list)
for num in nums:
prevLength = 0
if map[num-1]:
# pop min val from from heap
prevLength = heapq.heappop(map[num-1])
heapq.heappush(map[num], prevLength+1)
return not any(item and item[0] < 3 for item in map.values())
def isPossible2(self, nums: List[int]) -> bool:
pre1, pre2, pre3 = 0, 0, 0
n = len(nums)
i = 0
pre, cur = None, nums[0]-1
while i < n:
pre, cur = cur, nums[i]
# calculate cnt
cnt = 0
while i < n and nums[i] == cur:
cnt += 1
i += 1
# 当前数字跟前一位数字不连续
if cur - pre > 1:
if pre1 or pre2:
return False
pre1, pre2, pre3 = cnt, 0, 0
continue
# 不满足子序列要大于3要求
if cnt < pre1 + pre2:
return False
remain = cnt - pre1 - pre2
pre1, pre2, pre3 = max(0, remain - pre3),\
pre1, pre2 + min(pre3, remain)
return not pre1 and not pre2
nums = [1, 2, 3, 3, 4, 5]
obj = Solution()
print(obj.isPossible(nums))
| [
"rangeree@foxmail.com"
] | rangeree@foxmail.com |
93157527a57fa1551e01415c2bc9dbf613049c7e | f042b28a1aaa65586a3082546a2a66f7b8da9560 | /private/scripts/recheck-invalid-handles.py | ab4054d837b64d6cdc4bc55d34e29e751e8dc8d5 | [
"MIT"
] | permissive | bansal-shubham/stopstalk-deployment | 96fca9640fe829e1129a116a3e0b4895360807f1 | 6392eace490311be103292fdaff9ae215e4db7e6 | refs/heads/master | 2020-07-21T09:24:22.027051 | 2019-10-09T20:34:27 | 2019-10-09T20:34:27 | 206,814,820 | 0 | 0 | MIT | 2019-09-11T04:42:13 | 2019-09-06T14:46:32 | null | UTF-8 | Python | false | false | 4,427 | py | """
Copyright (c) 2015-2019 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import requests, bs4
import sites
# Constants to be used in case of request failures
SERVER_FAILURE = "SERVER_FAILURE"
NOT_FOUND = "NOT_FOUND"
OTHER_FAILURE = "OTHER_FAILURE"
REQUEST_FAILURES = (SERVER_FAILURE, NOT_FOUND, OTHER_FAILURE)
def get_invalid_handle_method(site):
site_class = getattr(sites, site.lower())
invalid_handle_method = getattr(site_class.Profile, "is_invalid_handle")
return invalid_handle_method
if __name__ == "__main__":
ihtable = db.invalid_handle
atable = db.auth_user
cftable = db.custom_friend
stable = db.submission
nrtable = db.next_retrieval
mapping = {}
handle_to_row = {}
for site in current.SITES:
mapping[site] = get_invalid_handle_method(site)
handle_to_row[site] = {}
impossiblehandle = "thisreallycantbeahandle308"
assert(all(map(lambda site: get_invalid_handle_method(site)(impossiblehandle), current.SITES.keys())))
def populate_handle_to_row(table):
for row in db(table).select():
for site in current.SITES:
site_handle = row[site.lower() + "_handle"]
if site_handle:
if handle_to_row[site].has_key(site_handle):
handle_to_row[site][site_handle].append(row)
else:
handle_to_row[site][site_handle] = [row]
populate_handle_to_row(atable)
populate_handle_to_row(cftable)
# for site in current.SITES:
# print site
# for site_handle in handle_to_row[site]:
# print "\t", site_handle
# for row in handle_to_row[site][site_handle]:
# print "\t\t", row.first_name, row.last_name, row.stopstalk_handle
update_dict = {"stopstalk_rating": 0,
"stopstalk_prev_rating": 0,
"per_day": 0.0,
"per_day_change": "0.0",
"authentic": False}
final_delete_query = False
cnt = 0
for row in db(ihtable).iterselect():
# If not an invalid handle anymore
if handle_to_row[row.site].has_key(row.handle) and mapping[row.site](row.handle) is False:
cnt += 1
print row.site, row.handle, "deleted"
for row_obj in handle_to_row[row.site][row.handle]:
print "\t", row_obj.stopstalk_handle, "updated"
update_dict[row.site.lower() + "_lr"] = current.INITIAL_DATE
row_obj.update_record(**update_dict)
if "user_id" in row_obj:
# Custom user
db(nrtable.custom_user_id == row_obj.id).update(**{row.site.lower() + "_delay": 0})
else:
db(nrtable.user_id == row_obj.id).update(**{row.site.lower() + "_delay": 0})
final_delete_query |= ((stable.site == row.site) & \
(stable.stopstalk_handle == row_obj.stopstalk_handle))
del update_dict[row.site.lower() + "_lr"]
row.delete_record()
if cnt >= 10:
if final_delete_query:
db(final_delete_query).delete()
cnt = 0
final_delete_query = False
if final_delete_query:
db(final_delete_query).delete()
| [
"raj454raj@gmail.com"
] | raj454raj@gmail.com |
02a9bcf7bba04245c6c9f6fb625931350afcc98a | 622cc29ddd62d76ba251ec4635aef66a59a44143 | /projeto/estoque/migrations/0001_initial.py | fe4a7bf28b2992ed624a17f515701c8d42c73e70 | [] | no_license | Aleleonel/estoque | c98e907919f8525cdb69eebf4b856de032d9733a | d38c60ca99d70116dfdfe01e01068cfe72a14727 | refs/heads/master | 2021-09-24T05:09:15.979126 | 2020-01-16T14:43:17 | 2020-01-16T14:43:17 | 229,847,448 | 1 | 0 | null | 2021-09-22T18:17:49 | 2019-12-24T01:22:00 | Python | UTF-8 | Python | false | false | 2,708 | py | # Generated by Django 3.0.1 on 2019-12-23 20:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('produto', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Estoque',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('nf', models.PositiveIntegerField(blank=True, null=True, verbose_name='nota fiscal')),
('movimento', models.CharField(blank=True, choices=[('e', 'entrada'), ('s', 'saida')], max_length=1)),
('funcionario', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='EstoqueItens',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantidade', models.PositiveIntegerField()),
('saldo', models.PositiveIntegerField(blank=True)),
('estoque', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='estoques', to='estoque.Estoque')),
('produto', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='produto.Produto')),
],
options={
'ordering': ('pk',),
},
),
migrations.CreateModel(
name='EstoqueEntrada',
fields=[
],
options={
'verbose_name': 'estoque entrada',
'verbose_name_plural': 'estoque entrada',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('estoque.estoque',),
),
migrations.CreateModel(
name='EstoqueSaida',
fields=[
],
options={
'verbose_name': 'estoque saída',
'verbose_name_plural': 'estoque saída',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('estoque.estoque',),
),
]
| [
"aleleonel@gmail.com"
] | aleleonel@gmail.com |
1f11240c9dc378f86e18d5a8dc29148d8faedf08 | 894b290b4f4f47b5eb523c23efd7bd6110d91b2f | /148_m_dianping/m_dianping/m_dianping/settings.py | fb36fe94e4be675a15979d7ab4a58bd94361b8f4 | [] | no_license | wliustc/SpiderS | 6650c00616d11239de8c045828bafdc5a299b1ce | 441f309c50d28c1a3917bed19321cd5cbe7c2861 | refs/heads/master | 2020-03-27T06:15:39.495785 | 2018-06-14T07:55:44 | 2018-06-14T07:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | # -*- coding: utf-8 -*-
BOT_NAME='m_dianping'
SPIDER_MODULES=['m_dianping.spiders']
NEWSPIDER_MODULE='m_dianping.spiders'
DOWNLOAD_HANDLERS={'s3': None}
USER_AGENT="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36"
MAIL_FROM="spider_man_warn@126.com"
ITEM_PIPELINES= {
'm_dianping.pipelines_global.WriteFilePipeline': 300,
}
LOG_LEVEL='WARNING'
REDIS_HOST="10.15.1.11"
REDIS_PORT="6379"
CORE_METRICS_INTERVAL=5
DEFAULT_REQUEST_HEADERS={'Connection':'keep-alive','Cache-Control':'max-age=0','Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8','Upgrade-Insecure-Requests':'1','Accept-Encoding':'gzip, deflate, sdch','Accept-Language':'zh-CN,zh;q=0.8',"user-agent": USER_AGENT}
MAIL_USER="spider_man_warn@126.com"
MAX_FILESIZE='500'
MAIL_PASS="dev123"
HDFS_MODULE="hdfs"
EXTENSIONS= {
'm_dianping.stats_collector_global.PrintCoreMetrics': 500,
'm_dianping.stats_mail_global.StatsMailer': 505,
}
SAVE_PATH='/home/work/backup/spiders_platform/data'
MAIL_HOST="smtp.126.com"
MAIL_PORT="25"
HDFS_IP="10.15.1.11"
USER_AGENTS=[
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
] | [
"luoshao23@gmail.com"
] | luoshao23@gmail.com |
239e6c075bec21dff439ef851b1d0bd654eab243 | 78c0dc213cb0a34d211cb1536ab6403e2c72bb5e | /backend/bot/channel_reaction/commands.py | 025ac55ecb9169eb0585053b722994cd048d01f8 | [] | no_license | git-hub-lab/reactor | 45191bf4ca0eef657467c445d0f94cb06d788899 | 4a1eb27388506d6783df01f7cb9790827971c89d | refs/heads/master | 2023-03-29T01:32:46.984776 | 2020-07-29T21:31:51 | 2020-07-29T21:31:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import logging
from telegram import Message as TGMessage, Update, User as TGUser
from telegram.ext import CallbackContext, Filters
from bot import redis, filters
from bot.redis import State
from bot.wrapper import command
from core.models import Message
logger = logging.getLogger(__name__)
@command('start', filters=Filters.private & filters.has_arguments, pass_args=True)
def command_start(update: Update, context: CallbackContext):
"""Initiate reaction."""
user: TGUser = update.effective_user
msg: TGMessage = update.effective_message
message_id = context.args[0]
try:
Message.objects.get(id=message_id)
except Message.DoesNotExist:
logger.debug(f"Message {message_id} doesn't exist.")
msg.reply_text(
"Message you want to react to is invalid "
"(either too old or magically disappeared from DB)."
)
return
msg.reply_text('Now send me your reaction. It can be a single emoji or a sticker.')
redis.set_state(user.id, State.reaction)
redis.set_key(user.id, 'message_id', message_id)
| [
"bachynin.i@gmail.com"
] | bachynin.i@gmail.com |
1952081d6abd867637e7d2fbda94c72fc3f2e7a9 | 59f64b5cf799e31c97b11828dba4787afb8f3f17 | /batch/batch/worker/jvm_entryway_protocol.py | fd09b55cce765faf638cbc885b622eb873739e7f | [
"MIT"
] | permissive | hail-is/hail | 2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1 | 07a483ae0f46c66f3ed6fd265b48f48c06298f98 | refs/heads/main | 2023-09-01T15:03:01.450365 | 2023-09-01T02:46:35 | 2023-09-01T02:46:35 | 45,069,467 | 913 | 262 | MIT | 2023-09-14T21:53:32 | 2015-10-27T20:55:42 | Python | UTF-8 | Python | false | false | 1,582 | py | import asyncio
import logging
import struct
log = logging.getLogger('jvm_entryway_protocol')
def write_int(writer: asyncio.StreamWriter, v: int):
writer.write(struct.pack('>i', v))
def write_long(writer: asyncio.StreamWriter, v: int):
writer.write(struct.pack('>q', v))
def write_bytes(writer: asyncio.StreamWriter, b: bytes):
n = len(b)
write_int(writer, n)
writer.write(b)
def write_str(writer: asyncio.StreamWriter, s: str):
write_bytes(writer, s.encode('utf-8'))
class EndOfStream(Exception):
pass
async def read(reader: asyncio.StreamReader, n: int) -> bytes:
b = bytearray()
left = n
while left > 0:
t = await reader.read(left)
if not t:
log.warning(f'unexpected EOS, Java violated protocol ({b})')
raise EndOfStream()
left -= len(t)
b.extend(t)
return b
async def read_byte(reader: asyncio.StreamReader) -> int:
b = await read(reader, 1)
return b[0]
async def read_bool(reader: asyncio.StreamReader) -> bool:
return await read_byte(reader) != 0
async def read_int(reader: asyncio.StreamReader) -> int:
b = await read(reader, 4)
return struct.unpack('>i', b)[0]
async def read_long(reader: asyncio.StreamReader) -> int:
b = await read(reader, 8)
return struct.unpack('>q', b)[0]
async def read_bytes(reader: asyncio.StreamReader) -> bytes:
n = await read_int(reader)
return await read(reader, n)
async def read_str(reader: asyncio.StreamReader) -> str:
b = await read_bytes(reader)
return b.decode('utf-8')
| [
"noreply@github.com"
] | hail-is.noreply@github.com |
d00f6d3c0887565fa9e0ba4997dc401bc35d40c3 | 926621c29eb55046f9f59750db09bdb24ed3078e | /lib/surface/compute/rolling_updates/list_instance_updates.py | 104a98256a3414700d5ad05430dad25b958b98b9 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/SDK | 525d9b29fb2e901aa79697c9dcdf5ddd852859ab | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | refs/heads/master | 2022-11-22T18:24:13.464605 | 2016-05-18T16:53:30 | 2016-05-18T16:53:30 | 282,322,505 | 0 | 0 | NOASSERTION | 2020-07-24T21:52:25 | 2020-07-24T21:52:24 | null | UTF-8 | Python | false | false | 2,645 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""rolling-updates list-instance-updates command."""
from googlecloudsdk.api_lib.compute import rolling_updates_util as updater_util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.third_party.apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.third_party.apitools.base.py import list_pager
class ListInstanceUpdates(base.ListCommand):
"""Lists all instance updates for a given update."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument('update', help='Update id.')
def Collection(self):
return 'replicapoolupdater.rollingUpdates.instanceUpdates'
def Run(self, args):
"""Run 'rolling-updates list-instance-updates'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
List of all the instance updates.
Raises:
HttpException: An http error response was received while executing api
request.
ToolException: An error other than http error occured while executing
the command.
"""
client = self.context['updater_api']
messages = self.context['updater_messages']
resources = self.context['updater_resources']
ref = resources.Parse(
args.update,
collection='replicapoolupdater.rollingUpdates')
request = (
messages.ReplicapoolupdaterRollingUpdatesListInstanceUpdatesRequest(
project=ref.project,
zone=ref.zone,
rollingUpdate=ref.rollingUpdate))
try:
return list_pager.YieldFromList(
client.rollingUpdates, request, method='ListInstanceUpdates')
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(updater_util.GetError(error))
| [
"richarddewalhalla@gmail.com"
] | richarddewalhalla@gmail.com |
5917e6c21f7e72b0ff6dbfd5b802092f2ededf69 | 4ad809420a3cd82199b31fcb6033ad6b28c5ac60 | /rustici_engine/models/xapi_score.py | 094b8c8dafee57946fdfe4de881e9623daa1f08f | [] | no_license | Myagi/python-rustici-engine-api | 2e4eb21f01b156551a1f4d747aea466dec22f30c | 20684845817cb9790b3bfc9be3db515f7ad5b0ee | refs/heads/master | 2022-03-30T12:26:44.825580 | 2020-02-03T06:34:12 | 2020-02-03T06:34:12 | 237,883,063 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,614 | py | # coding: utf-8
"""
Rustici Engine API
Rustici Engine API # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class XapiScore(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'scaled': 'float',
'raw': 'float',
'min': 'float',
'max': 'float'
}
attribute_map = {
'scaled': 'scaled',
'raw': 'raw',
'min': 'min',
'max': 'max'
}
def __init__(self, scaled=None, raw=None, min=None, max=None): # noqa: E501
"""XapiScore - a model defined in Swagger""" # noqa: E501
self._scaled = None
self._raw = None
self._min = None
self._max = None
self.discriminator = None
if scaled is not None:
self.scaled = scaled
if raw is not None:
self.raw = raw
if min is not None:
self.min = min
if max is not None:
self.max = max
@property
def scaled(self):
"""Gets the scaled of this XapiScore. # noqa: E501
:return: The scaled of this XapiScore. # noqa: E501
:rtype: float
"""
return self._scaled
@scaled.setter
def scaled(self, scaled):
"""Sets the scaled of this XapiScore.
:param scaled: The scaled of this XapiScore. # noqa: E501
:type: float
"""
self._scaled = scaled
@property
def raw(self):
"""Gets the raw of this XapiScore. # noqa: E501
:return: The raw of this XapiScore. # noqa: E501
:rtype: float
"""
return self._raw
@raw.setter
def raw(self, raw):
"""Sets the raw of this XapiScore.
:param raw: The raw of this XapiScore. # noqa: E501
:type: float
"""
self._raw = raw
@property
def min(self):
"""Gets the min of this XapiScore. # noqa: E501
:return: The min of this XapiScore. # noqa: E501
:rtype: float
"""
return self._min
@min.setter
def min(self, min):
"""Sets the min of this XapiScore.
:param min: The min of this XapiScore. # noqa: E501
:type: float
"""
self._min = min
@property
def max(self):
"""Gets the max of this XapiScore. # noqa: E501
:return: The max of this XapiScore. # noqa: E501
:rtype: float
"""
return self._max
@max.setter
def max(self, max):
"""Sets the max of this XapiScore.
:param max: The max of this XapiScore. # noqa: E501
:type: float
"""
self._max = max
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(XapiScore, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XapiScore):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"s.kitchell@live.com"
] | s.kitchell@live.com |
536601f80903439af1ce166f344de9f8bb69171f | 487dedd0be97b18d5e1ee85b6a3572bc896e8fcf | /otscrape/core/extractor/text/etree.py | 64fe59da7a6d1abe8ad6d6a1b57200ac89019c56 | [
"MIT"
] | permissive | SSripilaipong/otscrape | d4502d1ad3c3287681807a2ea8f27d3f1981f1bf | 73ad2ea3d20841cf5d81b37180a1f21c48e87480 | refs/heads/master | 2023-01-23T06:13:32.849937 | 2020-12-03T13:45:16 | 2020-12-03T13:45:16 | 210,801,600 | 0 | 0 | MIT | 2020-11-30T18:04:04 | 2019-09-25T09:04:32 | Python | UTF-8 | Python | false | false | 324 | py | from lxml import etree
from otscrape.core.base.extractor import Extractor
def parse(text):
tree = etree.fromstring(text, etree.HTMLParser())
return tree
class ETree(Extractor):
def extract(self, page, cache):
x = page[self.target]
assert isinstance(x, (str, bytes))
return parse(x)
| [
"santhapon.s@siametrics.com"
] | santhapon.s@siametrics.com |
f22a66523552e8ee89555571d973b24102d7123e | 7e31d945d7d17e3f486fc985ae7f4d775607b72a | /api/migrations/0001_initial.py | a730b89c0133931a770466efea7b186a81a7120e | [] | no_license | SimonielMusyoki/BookAPI | 55f9b959f3fc76b0fa3a1d111b1965fbd1943a11 | c27db0abca5f05f811331df268ca49e81d362d92 | refs/heads/master | 2022-12-17T01:08:09.949684 | 2020-09-11T18:41:47 | 2020-09-11T18:41:47 | 294,776,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # Generated by Django 3.1.1 on 2020-09-03 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=10)),
('description', models.TextField(blank=True, max_length=256)),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=3)),
('published', models.DateField(blank=True, default=None, null=True)),
('is_published', models.BooleanField(default=False)),
('cover', models.ImageField(blank=True, upload_to='covers/')),
],
),
]
| [
"musyoki.mtk3@gmail.com"
] | musyoki.mtk3@gmail.com |
0d8a6fcf59eb820a2dbc3b27d5554d23712b8403 | d2915ef6ee9c1ea01f47d3468bba8e320a8f5914 | /generators/yield.py | 652f7bd30d951f5463192ce6c4636527de6a7e30 | [] | no_license | asing177/python_basics | a269adbaf166fb760d2692874601528ef230bbbd | 48ce7d5d6356edbd9bc21f8ebb55ec95787d4340 | refs/heads/main | 2023-01-11T12:11:44.155102 | 2020-11-13T07:24:54 | 2020-11-13T07:24:54 | 300,806,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | def simpleGeneratorFun():
yield 1
yield 2
yield 3
for value in simpleGeneratorFun():
print(value)
def nextSquare():
i = 1;
while True:
yield i*i
i += 1
for num in nextSquare():
if num > 100:
break
print(num) | [
"adityasingh27@hotmail.com"
] | adityasingh27@hotmail.com |
73610a73af4b4716db90756be59b8087c358c855 | 5c69c7daab733ec95b7293937ffce5b2b6875306 | /Hashtable/1. Two Sum.py | da387d0d358a7f77934e783c6923b5002def265d | [
"MIT"
] | permissive | viewv/leetcode | 9de9820f251d949b7442651087172ae10620a8bd | b31e643846bb38978746342e3e3a94991178565a | refs/heads/master | 2021-07-17T11:42:08.070390 | 2020-04-12T13:41:45 | 2020-04-12T13:41:45 | 122,901,543 | 2 | 2 | MIT | 2018-12-24T07:01:06 | 2018-02-26T02:31:16 | Python | UTF-8 | Python | false | false | 267 | py | class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
waitset = dict()
for i, c in enumerate(nums):
if c in waitset:
return [waitset[c], i]
else:
waitset[target-c] = i
| [
"="
] | = |
52a950088da5054e02f6b47e6d7f0333980714e8 | b25182d0034468e5e545c6c72e5a2cdd3c43a484 | /.PyCharm2017.2/system/python_stubs/-223353804/lxml/etree/DTDError.py | d8d11fac2154fda014e7a50d1ea00378452f540a | [] | no_license | lovewula/config | f9ac16b30082c04be7733969d5359ee6c7258db6 | c0720e5bfd49f579a52f83de36de40c76996ebf6 | refs/heads/master | 2021-08-19T19:31:44.088218 | 2017-11-27T08:04:06 | 2017-11-27T08:04:06 | 111,974,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # encoding: utf-8
# module lxml.etree
# from D:\Python\Python27\lib\site-packages\lxml\etree.pyd
# by generator 1.145
""" The ``lxml.etree`` module implements the extended ElementTree API for XML. """
# imports
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
from LxmlError import LxmlError
class DTDError(LxmlError):
""" Base class for DTD errors. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
__qualname__ = 'DTDError'
| [
"lovewula1314@gmail.com"
] | lovewula1314@gmail.com |
0cb53bb790cfa14623388e02d96422c9d6c6e19d | 0b95798325817ea198ec57951dcce1d6d553d8ee | /Old Stuff/googlefinance.py | 4b11930c33ffa995def4e0410fa8794b5d17d6c7 | [] | no_license | actuarial-tools/PythonStuff | 3ef503a204f21165fee25b7f28c57100ad944c06 | 21b290cf548e82ca91b761a6a7bd876e136e429b | refs/heads/master | 2023-01-12T23:27:20.739987 | 2020-08-31T23:51:17 | 2020-08-31T23:51:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | from googlefinance.client import get_price_data, get_prices_data, get_prices_time_data
# Dow Jones
param = {
'q': "AAPL", # Stock symbol (ex: "AAPL")
'i': "86400", # Interval size in seconds ("86400" = 1 day intervals)
'x': "NASDAQ", # Stock exchange symbol on which stock is traded (ex: "NASD")
'p': "1M" # Period (Ex: "1Y" = 1 year)
}
# get price data (return pandas dataframe)
df = get_price_data(param)
length=(df.shape[0])-1
print(df.ix[length])
| [
"34726618+tyco333@users.noreply.github.com"
] | 34726618+tyco333@users.noreply.github.com |
9e247fcd3792985b1e1c0c795a01afdf5daf3a32 | f4ad721b7158ff2605be6f7e4bde4af6e0e11364 | /vt_manager_kvm/src/python/vt_manager_kvm/communication/geni/v3/tests/configurators/vtam/testdriverconfiguration.py | f489ff6e5663afe89875763e4c01dc6d350e51f9 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | ict-felix/stack | 3fb4222a0538c0dbbe351ccc3da1bafa9ca37057 | 583ccacf067b9ae6fc1387e53eaf066b4f3c0ade | refs/heads/master | 2021-01-10T10:16:29.851916 | 2016-06-22T15:11:11 | 2016-06-22T15:11:11 | 51,439,714 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | import sys
sys.path.append("/opt/ofelia/core_kvm/lib/am/")
sys.path.append("/opt/ofelia/core_kvm/lib/")
sys.path.append("/opt/ofelia/vt_manager_kvm/src/python/")
import os
import sys
from os.path import dirname, join
# This is needed because wsgi disallows using stdout
sys.stdout = sys.stderr
os.environ['DJANGO_SETTINGS_MODULE'] = 'vt_manager_kvm.settings.settingsLoader'
from vt_manager_kvm.communication.geni.v3.configurators.handlerconfigurator import HandlerConfigurator
from vt_manager_kvm.communication.geni.v3.drivers.vtam import VTAMDriver
import unittest
class TestDriverConfigurator(unittest.TestCase):
def setUp(self):
self.configurator = HandlerConfigurator
self.configured_driver = self.configurator.get_vt_am_driver()
def test_should_get_vt_am_driver_instance(self):
self.assertTrue(isinstance(self.configured_driver, VTAMDriver))
if __name__ == "__main__":
unittest.main()
| [
"jenkins@localhost"
] | jenkins@localhost |
3e005d69f1b690be7bd975b76f34196ba6b556d8 | ce4d77f4aa4d615ef3736ddef61ff204631959d9 | /studies/migrations/0002_study_state.py | 9bb5c6d1dd3cfdab0e0e0e1836e01361a084ff91 | [
"MIT"
] | permissive | joaoalves2010/lookit-api | 391b40915fc843ffacd957d100b200990da19245 | ee0ded40bae1c20d93a883dcd55561ffd69a932f | refs/heads/master | 2021-01-20T10:29:34.037473 | 2017-05-16T18:16:28 | 2017-05-16T18:16:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-24 20:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studies', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='study',
name='state',
field=models.CharField(choices=[('created', 'Created'), ('submitted', 'Submitted'), ('rejected', 'Rejected'), ('retracted', 'Retracted'), ('approved', 'Approved'), ('active', 'Active'), ('paused', 'Paused'), ('deactivated', 'Deactivated')], default='created', max_length=25),
),
]
| [
"cwisecarver@cos.io"
] | cwisecarver@cos.io |
41ce402e18afba510e2dbe34d9f61334a1de3042 | f5d7f59e3811919145f3601b2da9e8ac3635fa8b | /ICAPS-16/utils.py | 591cac62d3b047d4a930e31811f20512b80f9087 | [] | no_license | AI-Planning/publicity-chair-tools | cc7709585479dd8a62d69dd6ee592fa165354f50 | d7fd543424ae71fa5a5a4fa76c06c17bca0129bf | refs/heads/master | 2021-05-23T13:39:39.205642 | 2017-05-16T03:11:19 | 2017-05-16T03:11:19 | 253,316,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
import os, time, shutil, glob
def html_files():
return glob.glob('../*.html')
def backup():
fid = str(int(time.time()))
os.mkdir(os.path.join('backup', fid))
for f in html_files():
shutil.copy(f, os.path.join('backup', fid))
| [
"christian.muise@gmail.com"
] | christian.muise@gmail.com |
93882d577bd2aa4d20226bdd2fe605279e1d0cde | 6146d080087b21e36347408eea76598f4691ed67 | /code/greedy/5203.py | e26d214d34d0881d81d689bee2e97321026ab6d2 | [] | no_license | banggeut01/algorithm | 682c4c6e90179b8100f0272bf559dbeb1bea5a1d | 503b727134909f46e518c65f9a9aa58479a927e9 | refs/heads/master | 2020-06-27T14:07:51.927565 | 2019-12-19T03:48:30 | 2019-12-19T03:48:30 | 199,800,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | # 5203.py 베이비진 게임
def is_babygin(i, c):
if c[i] == 3: # run
return True
# triplet
if -1 < i - 1 and c[i - 1]:
if -1 < i - 2 and c[i - 2]:
return True # i-2,i-1,i
elif i + 1 < 10 and c[i + 1]:
return True # i-1,i,i+1
if i < 8 and c[i + 1] and c[i + 2]:
return True # i,i+1,i+2
return False
t = int(input())
for tc in range(1, t + 1):
card = list(map(int, input().split()))
c1, c2 = [0] * 10, [0] * 10
for i in range(6):
c1[card[i * 2]] += 1
if is_babygin(card[i * 2], c1):
print('#{} 1'.format(tc))
break
c2[card[i * 2 + 1]] += 1
if is_babygin(card[i * 2 + 1], c2):
print('#{} 2'.format(tc))
break
else:
print('#{} 0'.format(tc)) | [
"genie121110@gmail.com"
] | genie121110@gmail.com |
dd11adff413d18d91d56bee3e060982b713e652c | 200abee8ebb5fa255e594c8d901c8c68eb9c1a9c | /venv/01_Stepik/Python_Programmirovanie/2.5_2.py | 3b751f4923be2be48f18566154558b2a212d2509 | [] | no_license | Vestenar/PythonProjects | f083cbc07df57ea7a560c6b18efed2bb0dc42efb | f8fdf9faff013165f8d835b0ccb807f8bef6dac4 | refs/heads/master | 2021-07-20T14:14:15.739074 | 2019-03-12T18:05:38 | 2019-03-12T18:05:38 | 163,770,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | '''
Напишите программу, на вход которой подаётся список чисел одной строкой.
Программа должна для каждого элемента этого списка вывести сумму двух его соседей.
Для элементов списка, являющихся крайними, одним из соседей считается элемент,
находящий на противоположном конце этого списка.
'''
a = [int(i) for i in input().split()]
b = []
cc = ''
if len(a) < 2:
b = a
elif len(a) == 2:
b.append(a[0]+a[1])
b.append(a[1]+a[0])
else:
b.append(a[1]+a[-1])
for j in range(1, len(a)-1):
b.append(a[j-1]+a[j+1])
b.append(a[0]+a[-2])
for bb in b:
cc += str(bb) + ' '
print(cc) | [
"vestenar@gmail.com"
] | vestenar@gmail.com |
d56a0ca77428beff3c986780889c46bc2f71f360 | f4c91a4b1112ec54d87964fe1cc3d7c772bc0d5f | /backend/modules/camera/serializers.py | 401598fe2acf95b8b281c7742c175248d5bd5110 | [] | no_license | crowdbotics-apps/my-new-app-31789 | 7d41bf3b7f23536095ffc0ec62bbb56f81b097f0 | c5513ad2df9e73707871e1c10c6768a93690f9a7 | refs/heads/master | 2023-08-25T23:02:37.774572 | 2021-11-01T15:53:29 | 2021-11-01T15:53:29 | 423,511,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from .models import Image
from rest_framework import serializers
class ImageSerializer(serializers.HyperlinkedModelSerializer):
image = serializers.SerializerMethodField()
def get_image(self, obj):
return obj.image.url
class Meta:
model = Image
fields = (
"id",
"image",
)
class ImageUploadSerializer(serializers.ModelSerializer):
image = serializers.ImageField()
class Meta:
model = Image
fields = ("image",)
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
f7ce03e38ad7d5aa9a21b0fac115503aad0be1c0 | 50e3fcca6e2a9a73ed52d231a739f70c28ed108f | /String/reverseString.py | 9eb938bc314f6f2ff75e464670796110f7324311 | [] | no_license | thomasyu929/Leetcode | efa99deaa2f6473325de516d280da6911c2cc4ab | 780271875c5b50177653fd7fe175d96dd10e84e2 | refs/heads/master | 2022-03-29T00:11:01.554523 | 2020-01-03T00:28:22 | 2020-01-03T00:28:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
left, right = 0, len(s)-1
while right > left:
s[left], s[right] = s[right], s[left]
left += 1
right -= 1
| [
"yby4301955@gmail.com"
] | yby4301955@gmail.com |
87aa2ccdfa27f434815a78bd12020e6fe19e4f04 | b333dc607a2f1556f6a8adb6d16dc88fa8a30c8b | /portal/apps/actstream/exceptions.py | a9e423369b62ba4f9e9389cc63c6b24a52dfc1b2 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hernan0216/utopia-cms | 6558f8f600620c042dd79c7d2edf18fb77caebb8 | 48b48ef9acf8e3d0eb7d52601a122a01da82075c | refs/heads/main | 2023-02-06T10:31:35.525180 | 2020-12-15T17:43:28 | 2020-12-15T17:43:28 | 321,775,279 | 1 | 0 | BSD-3-Clause | 2020-12-15T19:59:17 | 2020-12-15T19:59:16 | null | UTF-8 | Python | false | false | 1,324 | py | from django.db.models.base import ModelBase
from django.core.exceptions import ImproperlyConfigured
from actstream.settings import get_models
class ModelNotActionable(ImproperlyConfigured):
"""
Raised when a Model not in ``ACTSTREAM_ACTION_MODELS`` setting is used in
an Action.
"""
def __str__(self):
model = self.args[0]
if not is_model(model):
return 'Object %r must be a Django Model not %s' % (model,
type(model))
opts = model._meta
return 'Model %s not recognized, add "%s.%s" to the ACTSTREAM_SETTINGS["MODELS"] settings' % (
model.__name__, opts.app_label, opts.module_name)
class BadQuerySet(ValueError):
"""
Action stream must return a QuerySet of Action items.
"""
def is_model(obj):
"""
Returns True if the obj is a Django model
"""
if not hasattr(obj, '_meta'):
return False
if not hasattr(obj._meta, 'db_table'):
return False
return True
def check_actionable_model(model):
"""
If the model is not defined in the ``MODELS`` setting this check raises the
``ModelNotActionable`` exception.
"""
model = model if hasattr(model, 'objects') else model.__class__
if not model in get_models().values():
raise ModelNotActionable(model)
| [
"apacheco@ladiaria.com.uy"
] | apacheco@ladiaria.com.uy |
4e6526852876cd354c2a489dcd5a811b53acfb85 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /otp/distributed/TelemetryLimiter.py | 4ab97f057c5473430e442d1bc44faaafd8a94a00 | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,548 | py | # 2013.08.22 22:15:20 Pacific Daylight Time
# Embedded file name: otp.distributed.TelemetryLimiter
from direct.showbase.DirectObject import DirectObject
from otp.avatar.DistributedPlayer import DistributedPlayer
from direct.task.Task import Task
class TelemetryLimiter(DirectObject):
__module__ = __name__
TaskName = 'TelemetryLimiterEnforce'
LeakDetectEventName = 'telemetryLimiter'
def __init__(self):
self._objs = {}
self._task = taskMgr.add(self._enforceLimits, self.TaskName, priority=40)
def destroy(self):
taskMgr.remove(self._task)
del self._objs
def getNumObjs(self):
return len(self._objs)
def addObj(self, obj):
id = obj.getTelemetryLimiterId()
self._objs[id] = obj
self.accept(self._getDummyEventName(obj), self._dummyEventHandler)
def _getDummyEventName(self, obj):
return '%s-%s-%s-%s' % (self.LeakDetectEventName,
obj.getTelemetryLimiterId(),
id(obj),
obj.__class__.__name__)
def _dummyEventHandler(self, *args, **kargs):
pass
def removeObj(self, obj):
id = obj.getTelemetryLimiterId()
self._objs.pop(id)
self.ignore(self._getDummyEventName(obj))
def _enforceLimits(self, task = None):
for obj in self._objs.itervalues():
obj.enforceTelemetryLimits()
return Task.cont
class TelemetryLimit():
__module__ = __name__
def __call__(self, obj):
pass
class RotationLimitToH(TelemetryLimit):
__module__ = __name__
def __init__(self, pConst = 0.0, rConst = 0.0):
self._pConst = pConst
self._rConst = rConst
def __call__(self, obj):
obj.setHpr(obj.getH(), self._pConst, self._rConst)
class TLNull():
__module__ = __name__
def __init__(self, *limits):
pass
def destroy(self):
pass
class TLGatherAllAvs(DirectObject):
__module__ = __name__
def __init__(self, name, *limits):
self._name = name
self._avs = {}
self._limits = makeList(limits)
self._avId2limits = {}
avs = base.cr.doFindAllInstances(DistributedPlayer)
for av in avs:
self._handlePlayerArrive(av)
self.accept(DistributedPlayer.GetPlayerGenerateEvent(), self._handlePlayerArrive)
self.accept(DistributedPlayer.GetPlayerNetworkDeleteEvent(), self._handlePlayerLeave)
def _handlePlayerArrive(self, av):
if av is not localAvatar:
self._avs[av.doId] = av
limitList = []
for limit in self._limits:
l = limit()
limitList.append(l)
av.addTelemetryLimit(l)
self._avId2limits[av.doId] = limitList
base.cr.telemetryLimiter.addObj(av)
def _handlePlayerLeave(self, av):
if av is not localAvatar:
base.cr.telemetryLimiter.removeObj(av)
for limit in self._avId2limits[av.doId]:
av.removeTelemetryLimit(limit)
del self._avId2limits[av.doId]
del self._avs[av.doId]
def destroy(self):
self.ignoreAll()
while len(self._avs):
self._handlePlayerLeave(self._avs.values()[0])
del self._avs
del self._limits
del self._avId2limits
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\otp\distributed\TelemetryLimiter.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:15:21 Pacific Daylight Time
| [
"anonymoustoontown@gmail.com"
] | anonymoustoontown@gmail.com |
c24358f43d7141913b10d3ef9b1b265a7c0fae6b | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/web_client_api/commands/clan_management.py | 65a1fad870de4dcf63e6067d0af4748bb2b7479d | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,168 | py | # 2017.08.29 21:52:10 Střední Evropa (letní čas)
# Embedded file name: scripts/client/web_client_api/commands/clan_management.py
from collections import namedtuple
from command import SchemeValidator, CommandHandler, instantiateObject
_ClanManagementCommand = namedtuple('_ClanManagementCommand', ('action', 'custom_parameters'))
_ClanManagementCommand.__new__.__defaults__ = (None, {})
_ClanManagementCommandScheme = {'required': (('action', basestring),)}
class ClanManagementCommand(_ClanManagementCommand, SchemeValidator):
"""
Represents web command for clan management.
"""
def __init__(self, *args, **kwargs):
super(ClanManagementCommand, self).__init__(_ClanManagementCommandScheme)
def createClanManagementHandler(handlerFunc):
data = {'name': 'clan_management',
'cls': ClanManagementCommand,
'handler': handlerFunc}
return instantiateObject(CommandHandler, data)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\web_client_api\commands\clan_management.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:10 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
65ea134875ba2ab9fcd78dbd165786366276074b | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Gio/SettingsSchema.py | 133a7559436e836df6baa87635606f86f050fc0a | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 5,652 | py | # encoding: utf-8
# module gi.repository.Gio
# from /usr/lib64/girepository-1.0/Gio-2.0.typelib
# by generator 1.147
# no doc
# imports
import gi as __gi
import gi.overrides as __gi_overrides
import gi.overrides.Gio as __gi_overrides_Gio
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class SettingsSchema(__gi.Boxed):
# no doc
def copy(self, *args, **kwargs): # real signature unknown
pass
def get_id(self): # real signature unknown; restored from __doc__
""" get_id(self) -> str """
return ""
def get_key(self, name): # real signature unknown; restored from __doc__
""" get_key(self, name:str) -> Gio.SettingsSchemaKey """
pass
def get_path(self): # real signature unknown; restored from __doc__
""" get_path(self) -> str """
return ""
def has_key(self, name): # real signature unknown; restored from __doc__
""" has_key(self, name:str) -> bool """
return False
def list_children(self): # real signature unknown; restored from __doc__
""" list_children(self) -> list """
return []
def list_keys(self): # real signature unknown; restored from __doc__
""" list_keys(self) -> list """
return []
def ref(self): # real signature unknown; restored from __doc__
""" ref(self) -> Gio.SettingsSchema """
pass
def unref(self): # real signature unknown; restored from __doc__
""" unref(self) """
pass
def _clear_boxed(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(SettingsSchema), '__module__': 'gi.repository.Gio', '__gtype__': <GType GSettingsSchema (94269257323904)>, '__dict__': <attribute '__dict__' of 'SettingsSchema' objects>, '__weakref__': <attribute '__weakref__' of 'SettingsSchema' objects>, '__doc__': None, 'get_id': gi.FunctionInfo(get_id), 'get_key': gi.FunctionInfo(get_key), 'get_path': gi.FunctionInfo(get_path), 'has_key': gi.FunctionInfo(has_key), 'list_children': gi.FunctionInfo(list_children), 'list_keys': gi.FunctionInfo(list_keys), 'ref': gi.FunctionInfo(ref), 'unref': gi.FunctionInfo(unref)})"
__gtype__ = None # (!) real value is '<GType GSettingsSchema (94269257323904)>'
__info__ = StructInfo(SettingsSchema)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
9a8f4257b92a9fa34be0a539801cfcf273634ec4 | f91ba67b3fbe988f2c8a17d5973aa576006d2d05 | /phantom_00010.py | c45dd9cfc39466f9162cbc7d9bebc5a24ca01b82 | [] | no_license | decarlof/tomobanktools | affb5bb8ecceac0311907f2cee21452c12a82abe | c7a7df95445f2cf2edeb2e3f354e5030dd762721 | refs/heads/master | 2020-06-13T17:28:55.358342 | 2018-04-04T17:27:16 | 2018-04-04T17:27:16 | 75,575,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,402 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 3 15:35:30 2016
@author: decarlo
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from xdesign import *
import os
import time
import pytz
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import dxfile.dxtomo as dx
import dxchange
def iso_time():
# set the experiment date
now = datetime.datetime.today()
# set iso format time
central = pytz.timezone('US/Central')
local_time = central.localize(now)
local_time_iso = local_time.isoformat()
return local_time_iso
if __name__ == '__main__':
# Set tomobank id
tomobank_id = 'phantom_00010'
# Set path to the micro-CT data to convert.
fname = '/local/decarlo/conda/tomobank/phantoms/' + tomobank_id + '/' + tomobank_id + '.h5'
# Set meta-data
experimenter_affiliation="Argonne National Laboratory"
experimenter_email="tomobank@anl.gov"
instrument_name="XDesign VERSION:0.2.0.dev0+1d67599b8f104ebded86bac98100dbf15e251a66 FUNCTION: SlantedSquares(count=16, angle=5/360*2*np.pi, gap=0.01), prop='mass_atten'"
sample_name = tomobank_id
# Phantom generation start time
start_date = iso_time()
np.random.seed(0) # random seed for repeatability
phantom = Foam(size_range=[0.1, 0.01], gap=0, porosity=0.5)
ccd_x = 256
ccd_y = 256
n_proj = 512
step = 1. / ccd_x
prb = Probe(Point([step / 2., -10]), Point([step / 2., 10]), step)
#plt.imshow(np.reshape(sino, (n_proj, ccd_x)), cmap='gray', interpolation='nearest')
#plt.show(block=True)
n_dark = 1
n_white = 1
dark = np.zeros((n_dark, ccd_y, ccd_x)) # Array filled with zeros
flat = np.ones((n_white, ccd_y, ccd_x)) # Array filled with ones
sino = sinogram(n_proj, ccd_x, phantom)
proj = np.expand_dims(sino, 1)
# Theta
theta_step = np.pi / n_proj
theta_step_deg = theta_step * 180./np.pi
theta = np.arange(0, 180., 180. / n_proj)
# Set data collection angles as equally spaced between 0-180 degrees.
start_angle = 0
start_angle_unit = 'deg'
end_angle = 180
end_angle_unit = 'deg'
angular_step_unit = 'deg'
# Phantom generation end time
end_date = iso_time()
# Write ground_truth
ground_truth = discrete_phantom(phantom, ccd_x, prop='mass_atten')
fname_gt='/local/decarlo/conda/tomobank/phantoms/' + tomobank_id + '/' + tomobank_id + '_ground_truth'
dxchange.write_tiff(ground_truth, fname=fname_gt, dtype='float32')
#plt.imshow(ground_truth, interpolation='none', cmap=plt.cm.inferno)
#plt.show()
# Save into a data-exchange file.
if os.path.isfile(fname):
print ("Data Exchange file already exists: ", fname)
else:
# Create new folder.
dirPath = os.path.dirname(fname)
if not os.path.exists(dirPath):
os.makedirs(dirPath)
# Open DataExchange file
f = dx.File(fname, mode='w')
# Write the Data Exchange HDF5 file.
f.add_entry(dx.Entry.experimenter(affiliation={'value': experimenter_affiliation}))
f.add_entry(dx.Entry.experimenter(email={'value': experimenter_email}))
f.add_entry(dx.Entry.instrument(name={'value': instrument_name}))
f.add_entry(dx.Entry.sample(name={'value': sample_name}))
f.add_entry(dx.Entry.data(data={'value': proj, 'units':'counts'}))
f.add_entry(dx.Entry.data(data_white={'value': flat, 'units':'counts'}))
f.add_entry(dx.Entry.data(data_dark={'value': dark, 'units':'counts'}))
f.add_entry(dx.Entry.data(theta={'value': theta, 'units':'degrees'}))
f.add_entry(dx.Entry.data(ground_truth={'value': ground_truth, 'units':'counts'}))
f.add_entry(dx.Entry.acquisition(start_date={'value': start_date}))
f.add_entry(dx.Entry.acquisition(end_date={'value': end_date}))
f.add_entry(dx.Entry.acquisition_setup(rotation_start_angle={'value': start_angle, 'unit': start_angle_unit}))
f.add_entry(dx.Entry.acquisition_setup(rotation_end_angle={'value': end_angle, 'unit': end_angle_unit}))
f.add_entry(dx.Entry.acquisition_setup(angular_step={'value': theta_step_deg, 'unit': angular_step_unit}))
f.close()
| [
"decarlof@gmail.com"
] | decarlof@gmail.com |
6ff60baecdd37afb05f52e9cf4f338deb432a2da | cd552f4a5b4be789fc08b394ade54048ed1659cf | /backend/task/migrations/0001_initial.py | 50b0122f5b290e1f9f224480de1e8f5bab33624d | [] | no_license | crowdbotics-apps/quizzer-21916 | b7277bf476d3aec8cd4b6b733175c47861f64a35 | 5dd6058ef091d35c28fd8e8c6861a7fb9c2e5793 | refs/heads/master | 2022-12-31T11:24:44.129262 | 2020-10-24T19:28:57 | 2020-10-24T19:28:57 | 306,953,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,121 | py | # Generated by Django 2.2.16 on 2020-10-24 19:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
('location', '0001_initial'),
('task_category', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('details', models.TextField()),
('frequency', models.CharField(max_length=7)),
('size', models.CharField(max_length=6)),
('is_confirmed', models.BooleanField()),
('status', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_confirmed', models.DateTimeField(blank=True, null=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_category', to='task_category.Category')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_customer', to='task_profile.CustomerProfile')),
('location', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='task_location', to='location.TaskLocation')),
('subcategory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_subcategory', to='task_category.Subcategory')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=10)),
('timestamp_completed', models.DateTimeField(blank=True, null=True)),
('date', models.DateField(blank=True, null=True)),
('timestamp_started', models.DateTimeField(blank=True, null=True)),
('task', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasktransaction_task', to='task.Task')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('review', models.TextField(blank=True, null=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rating_customer', to='task_profile.CustomerProfile')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rating_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_customer', to='task_profile.CustomerProfile')),
('task', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='message_task', to='task.Task')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_tasker', to='task_profile.TaskerProfile')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
81fe6ddd5951313145b6075839e0fc6ef7842fa1 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/KhanAcademy/Topics/GetTopicExercises.py | fdc1bef745733007b4361f180088f6bd5a2a2b5c | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# GetTopicExercises
# Retrieves a list of all exercises for a given topic.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetTopicExercises(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetTopicExercises Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/KhanAcademy/Topics/GetTopicExercises')
def new_input_set(self):
return GetTopicExercisesInputSet()
def _make_result_set(self, result, path):
return GetTopicExercisesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetTopicExercisesChoreographyExecution(session, exec_id, path)
class GetTopicExercisesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetTopicExercises
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_TopicID(self, value):
"""
Set the value of the TopicID input for this Choreo. ((required, string) The ID of the topic.)
"""
InputSet._set_input(self, 'TopicID', value)
class GetTopicExercisesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetTopicExercises Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Khan Academy.)
"""
return self._output.get('Response', None)
class GetTopicExercisesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetTopicExercisesResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
b224e8424fe2321fab3bdd4428ddc6c31456e0d2 | f1614f3531701a29a33d90c31ab9dd6211c60c6b | /menu_sun_integration/shared/loggers/logger.py | f08a1781b80144f3a4a19ceb7b4a6ad4331b5f52 | [] | no_license | pfpacheco/menu-sun-api | 8a1e11543b65db91d606b2f3098847e3cc5f2092 | 9bf2885f219b8f75d39e26fd61bebcaddcd2528b | refs/heads/master | 2022-12-29T13:59:11.644409 | 2020-10-16T03:41:54 | 2020-10-16T03:41:54 | 304,511,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,836 | py | """
Singleton Design Pattern
Intent: Lets you ensure that a class has only one instance, while providing a
global access point to this instance.
"""
import logging
import json
from threading import Lock
from typing import Optional
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def info(msg):
logger.info(msg)
class LoggerMeta(type):
"""
This is a thread-safe implementation of Singleton.
"""
_instance = None
_lock: Lock = Lock()
"""
We now have a lock object that will be used to synchronize threads during
first access to the Singleton.
"""
def __call__(cls, *args, **kwargs):
# Now, imagine that the program has just been launched. Since there's no
# Singleton instance yet, multiple threads can simultaneously pass the
# previous conditional and reach this point almost at the same time. The
# first of them will acquire lock and will proceed further, while the
# rest will wait here.
with cls._lock:
# The first thread to acquire the lock, reaches this conditional,
# goes inside and creates the Singleton instance. Once it leaves the
# lock block, a thread that might have been waiting for the lock
# release may then enter this section. But since the Singleton field
# is already initialized, the thread won't create a new object.
if not cls._instance:
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
class Logger(metaclass=LoggerMeta):
"""
We'll use this property to prove that our Singleton really works.
"""
def __init__(self) -> None:
self.entity = None
self.integration_type = None
self.seller_code = None
self.seller_id = None
self.entity_id = None
def setup(self, entity: str = None, integration_type: str = None, seller_id: int = None,
seller_code: str = None, entity_id: str = None):
self.entity = entity
self.integration_type = integration_type
self.seller_code = seller_code
self.seller_id = seller_id
self.entity_id = entity_id
return self
def update_entity(self, entity: str):
self.entity = entity
def update_entity_id(self, entity_id):
self.entity_id = entity_id
# def set_context(self, entity: str, entity_id=None):
# self.entity = entity
# self.entity = entity_id if entity_id else self.entity_id if self.entity_id else ""
def __format_msg(self, key: str, payload: str, description: str, integration: str = None, entity_id=None):
msg = {}
integration_value = integration if integration else self.integration_type if self.integration_type else ""
entity_id_value = entity_id if entity_id else self.entity_id if self.entity_id else ""
if integration_value:
msg = {"integration": integration_value}
msg.update({"seller_id": self.seller_id, "seller_code": self.seller_code,
self.entity: entity_id_value, "key": key, "description": description, "payload": str(payload)})
return msg
@staticmethod
def dumps(payload=None):
return json.dumps(payload)
def info(self, key: str, description: str, payload, entity_id=None):
logger.info(self.__format_msg(key=key, description=description, entity_id=entity_id, payload=payload))
def error(self, key: str, description: str, payload, entity_id=None):
logger.error(self.__format_msg(key=key, description=description, entity_id=entity_id, payload=payload))
def warn(self, key: str, description: str, payload, entity_id=None):
logger.warning(self.__format_msg(key=key, description=description, entity_id=entity_id, payload=payload))
| [
"pfpacheco@gmail.com"
] | pfpacheco@gmail.com |
64ab75ba0d4d2d2b104627452a85f0bcb91b6f7a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03331/s890555095.py | d9eb7cc5b2495ad8d73cd65a7ad507a778046eff | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import math
n = int(input())
if math.log10(n) %1 == 0:
print(10)
exit()
else:
ans = 0
n = str(n)
for i in range(len(n)):
ans += int(n[i])
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c8422048fff1caffcc3b6757dfa93a48ba25411b | 4b87a0de0f43de2bde41f2590faac970c18fe482 | /core/migrations/0004_bank_employee_ifs_level.py | 3cd9da0d374df2edc8785d34d3b5047e9d77c7ab | [] | no_license | krishSona/testbackend | d0bc325776537d9814b9022b3538b5e8a840e6a4 | d87e050d02542c58876d4f81c2ea99815ab4160e | refs/heads/master | 2023-04-08T01:26:42.070058 | 2021-04-03T06:08:54 | 2021-04-03T06:08:54 | 354,214,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,954 | py | # Generated by Django 3.0.5 on 2020-10-10 06:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20201009_1250'),
]
operations = [
migrations.CreateModel(
name='Bank',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Level',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Ifs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=11)),
('bank_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Bank')),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('employee_id', models.CharField(blank=True, max_length=50, null=True)),
('phone', models.CharField(max_length=10)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('joining_date', models.DateField(blank=True, null=True)),
('pan', models.CharField(blank=True, max_length=10, null=True)),
('aadhaar_number', models.CharField(blank=True, max_length=16, null=True)),
('permanent_address', models.TextField(blank=True, null=True)),
('current_address', models.TextField(blank=True, null=True)),
('net_monthly_salary', models.IntegerField()),
('service_status', models.IntegerField(blank=True, null=True)),
('credit_limit', models.FloatField(blank=True, null=True)),
('bank_account_number', models.CharField(max_length=18)),
('department_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='core.Department')),
('designation_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='core.Designation')),
('ifs_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Ifs')),
('level_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='core.Level')),
],
),
]
| [
"kali@dailysalary.in"
] | kali@dailysalary.in |
5c72cdf180de5fffecfa4ddc9d16bb2a8e2340a4 | 1e4b37d3bfa9ecba22517548c0577dee76704d8f | /temp_ws/build/control_msgs/control_msgs/catkin_generated/pkg.develspace.context.pc.py | 3d5a5127cfec63ac7ab72bd83035f5504ad2da31 | [] | no_license | mch5048/catkin_ws_4rl | 0aa19dc46effd2ae7941cc1e0b6d824f595d8574 | 2201d3b353da3b380dc7330ae5651e9640cd3408 | refs/heads/master | 2020-04-19T01:42:56.013573 | 2019-02-21T01:09:13 | 2019-02-21T01:09:13 | 167,879,885 | 0 | 0 | null | 2019-02-21T01:09:14 | 2019-01-28T01:27:31 | Makefile | UTF-8 | Python | false | false | 515 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/irobot/catkin_ws/devel/include".split(';') if "/home/irobot/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "actionlib_msgs;geometry_msgs;message_runtime;std_msgs;trajectory_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "control_msgs"
PROJECT_SPACE_DIR = "/home/irobot/catkin_ws/devel"
PROJECT_VERSION = "1.4.0"
| [
"mch5048@korea.ac.kr"
] | mch5048@korea.ac.kr |
488174fcd8a4f51478362366ed22f4dd0259a5f4 | 3c11ee5d423d86bcb72744c5f7024745afc9a581 | /q1/q1.py | 914f0d85955fb8ddcd9723e0dbac54a0e1eb2ab6 | [] | no_license | Insper/20_sub_rob | 5b167082a033a19fc8e6aaeb5a93c4f10c266025 | 7b73300cf8df191ed6ae7d5a9ec602142f391507 | refs/heads/master | 2022-12-20T03:37:54.982812 | 2020-10-01T18:40:52 | 2020-10-01T18:40:52 | 274,880,025 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Este NÃO é um programa ROS
from __future__ import print_function, division
import cv2
import os,sys, os.path
import numpy as np
print("Rodando Python versão ", sys.version)
print("OpenCV versão: ", cv2.__version__)
print("Diretório de trabalho: ", os.getcwd())
# Arquivos necessários
video = "triangulos.mp4"
if __name__ == "__main__":
# Inicializa a aquisição da webcam
cap = cv2.VideoCapture(video)
print("Se a janela com a imagem não aparecer em primeiro plano dê Alt-Tab")
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == False:
#print("Codigo de retorno FALSO - problema para capturar o frame")
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
#sys.exit(0)
# Our operations on the frame come here
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# NOTE que em testes a OpenCV 4.0 requereu frames em BGR para o cv2.imshow
cv2.imshow('imagem', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"fmiranda@gmail.com"
] | fmiranda@gmail.com |
54ab16d754a94742288670b53b1662e8646f9038 | 4becbe7ecf450e16102282711fb5e9dc336e3065 | /old/feature_selection_thresholds.py | 927ee9f961402ad984830b700369be4d883b24ea | [
"Apache-2.0"
] | permissive | timodonnell/mhcpred | c173cfbd42f4d9cab5f7ade5835d902762835334 | 71f082d94bbde9616ab31f0e70e7076240690163 | refs/heads/master | 2021-01-23T02:34:58.759833 | 2014-11-19T22:19:42 | 2014-11-19T22:19:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | import numpy as np
def find_best_threshold_accuracy(x, y):
thresholds = np.unique(x)
best_accuracy = 0
best_threshold = None
for t in thresholds:
mask = (x<=t)
acc_below = np.mean(mask == y)
acc_above = np.mean(~mask == y)
accuracy = max(acc_below, acc_above)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_threshold = t
return best_accuracy, best_threshold
def find_threshold_pairs(x1, x2, y):
thresholds1 = np.unique(x1)
thresholds2 = np.unique(x2)
best_accuracy = 0
best_threshold = None
for t1 in thresholds1:
mask1 = (x1<=t1)
for t2 in thresholds2:
mask2 = (x2<=t2)
accuracy = max(
np.mean(mask == y)
for mask in
[
mask1 & mask2,
mask1 & ~mask2,
~mask1 & mask2,
~mask1 & ~mask2
]
)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_threshold = (t1, t2)
return best_accuracy, best_threshold | [
"alex.rubinsteyn@gmail.com"
] | alex.rubinsteyn@gmail.com |
965dafa5ccbd266991cbb533b7a53bcaf27119bb | 97181bd72f65cb348a5331858d0e5e6708d1ffd9 | /pipeline/alleleFinder/mergeAlleleTable.py | 2276726474c32ddbda8e5c4062f7c3d664c27164 | [] | no_license | wangyibin/TDGP | 965adeb1d747b949e4e21080dd99408cdb84ba82 | 0b137d7b5fe4d26dac89e91f43583c69b943b1f8 | refs/heads/master | 2022-10-04T05:40:06.089546 | 2022-09-05T07:20:35 | 2022-09-05T07:20:35 | 187,630,451 | 4 | 5 | null | 2021-04-16T07:13:39 | 2019-05-20T11:49:48 | Python | UTF-8 | Python | false | false | 4,926 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
merge allele table from synteny and gmap, and filter by blast.
"""
import argparse
from logging import warn
import os
import os.path
import sys
import logging
import warnings
import pandas as pd
import numpy as np
import multiprocessing as mp
from pandarallel import pandarallel
from joblib import Parallel, delayed
from collections import OrderedDict, Counter
from utils import AllFrame2alleleTable, import_allele_table, remove_dup_from_allele_table
from utils import import_blast, alleleTable2AllFrame
warnings.filterwarnings("ignore")
def create_empty_allele_all_df(df):
columns = df.columns
empty_df = pd.DataFrame(columns=columns)
return empty_df
def filter_by_blast(row, allele_table_df, gene_headers):
gene1, gene2 = row.qseqid, row.sseqid
tmp_df = allele_table_df[allele_table_df.isin([gene1, gene2]).any(1)]
dup_gene_df = tmp_df.drop(gene_headers, axis=1)
if len(tmp_df) < 2:
if tmp_df.empty:
return {0: [gene1, gene2] + [np.nan] * (len(dup_gene_df.columns) - 2) \
+ [np.nan] * len(gene_headers)}
else:
idx, = tmp_df.index.to_list()
dup_gene_nums = len(dup_gene_df.dropna())
tmp_df_gene_list = set(tmp_df.loc[idx].to_list() + [idx])
if gene1 not in tmp_df_gene_list:
tmp_df.iloc[:, dup_gene_nums] = gene1
if gene2 not in tmp_df_gene_list:
tmp_df.iloc[:, dup_gene_nums] = gene2
return {idx: tmp_df.loc[idx].to_list()}
else:
dup_gene_headers = dup_gene_df.columns
idx_series = tmp_df.apply(lambda x: len(
x.dropna()), 1).sort_values(ascending=False)
idx_series = idx_series.index.to_list()
idxmax = idx_series[0]
idxmin = idx_series[1:]
new_dup_genes = []
for l in tmp_df.loc[idxmin].values.tolist():
new_dup_genes += list(filter(lambda x: not isinstance(x, float), l))
old_dup_genes = dup_gene_df.loc[idxmax].dropna().to_list()
new_dup_genes = set(new_dup_genes) | set(old_dup_genes)
new_dup_genes = sorted(new_dup_genes)
tmp_df.loc[idxmax, dup_gene_headers] = dict(
zip(dup_gene_headers, new_dup_genes))
# tmp_df = tmp_df.loc[idxmax]
idx = idxmax
res_list = tmp_df.loc[idx].to_list()
res_db = {idx: res_list}
return res_db
def mergeAlleleTable(args):
"""
%(prog)s
"""
p = p = argparse.ArgumentParser(prog=mergeAlleleTable.__name__,
description=mergeAlleleTable.__doc__,
formatter_class=argparse.RawTextHelpFormatter,
conflict_handler='resolve')
pReq = p.add_argument_group('Required arguments')
pOpt = p.add_argument_group('Optional arguments')
pReq.add_argument('alleletable1', help='allele table from synteny')
pReq.add_argument('alleletable2', help='allele table from gmap')
# pReq.add_argument('blast',
# help='blast file after filter')
pOpt.add_argument('--gene_headers', nargs="+",
default=['geneA', 'geneB', 'geneC', 'geneD'],
help='gene headers of table [default: %(default)s]')
pOpt.add_argument('-t', '--threads', type=int, default=4,
help='threads numnber of program [default: %(default)s]')
pOpt.add_argument('-o', '--output', type=argparse.FileType('w'),
default=sys.stdout, help='output file [default: stdout]')
pOpt.add_argument('-h', '--help', action='help',
help='show help message and exit.')
args = p.parse_args(args)
synteny_allele_table = import_allele_table(args.alleletable1)
gmap_allele_table = import_allele_table(args.alleletable2)
#blast_df = import_blast(args.blast, onlyid=True)
merge_df = pd.concat(
[synteny_allele_table, gmap_allele_table], axis=0, ignore_index=True,)
merge_df.reset_index(drop=True, inplace=True)
merge_all_df = alleleTable2AllFrame(merge_df)
#rmdup_df = remove_dup_from_allele_table(merge_all_df)
# pandarallel.initialize(nb_workers=args.threads, verbose=0)
# filter_df = blast_df.parallel_apply(filter_by_blast, axis=1,
# args=(rmdup_df, args.gene_headers))
# res_df = pd.concat(filter_df.to_frame()[0].map(
# lambda x: pd.DataFrame.from_dict(x)).to_list(), axis=1).T.dropna(how='all', axis=0)
# res_df.columns = rmdup_df.columns
# res_df = res_df.drop_duplicates()
# res_df.reset_index(inplace=True, drop=True)
# rmdup_res_df = remove_dup_from_allele_table(res_df)
res_df = AllFrame2alleleTable(merge_all_df)
res_df.to_csv(args.output, sep='\t', header=True,
index=None, na_rep='.')
if __name__ == "__main__":
mergeAlleleTable(sys.argv[1:])
| [
"738339463@qq.com"
] | 738339463@qq.com |
de3cc18d1c894c0e1ba9fd5c9f1d0c803416aa80 | f7ddd86fa6657e4832bc07cb6b2ba393f5ec1291 | /trainModel/chuangyu/ChuangYu.py | 0ff95dfbd02009cca20536cb8f4ccf92d1c0bdfc | [] | no_license | czasg/NumCharCaptchaCode | b7438d5c06c0fb2c2f4fc5ebf54053f991cf5f3a | e8a7819d4c8f10de1665f9366c944e9285f8b624 | refs/heads/master | 2022-07-05T19:11:04.509427 | 2020-05-21T03:00:14 | 2020-05-21T03:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | # coding: utf-8
import base64
from model.cnn.CNN import CNN
from trainModel.utils import catchErrorAndRetunDefault
class ChuangYu(CNN):
width = 139 # 图片宽度
height = 60 # 图片高度
labelLen = 6
rotate = [-3, 3, -6, 6]
def __init__(self):
super(ChuangYu, self).__init__()
self.gatherManager = None
self.initPathParams(__file__)
def getBatchX(self, imageArray):
return imageArray.flatten() + 0
@catchErrorAndRetunDefault
def nextCaptcha(self) -> dict:
if not self.gatherManager:
self.gatherManager = Manager()
body = self.gatherManager.nextCaptcha()
code = self.predict(body)
return {
"status": 1,
"msg": "获取成功",
"data": {
"code": code,
"image": f"data:image/png;base64,{base64.b64encode(body).decode()}"
}
}
if __name__ == '__main__':
# ChuangYu().fastTrain()
ChuangYu().keepTrain()
| [
"czasg0.0"
] | czasg0.0 |
c2b5279f960b5ea83198c8c71c1c0aa798f866eb | 78ef0d7736075ee33ac4230f47c078bbf2b0e014 | /joboffers/migrations/0002_auto_20211102_1725.py | 5e6e8abf0e5445bd9141fc828a8e49aeec986808 | [
"Apache-2.0"
] | permissive | PyAr/pyarweb | e22e9350bf107329e5a79c2368fb182958a134d2 | 5f88d1ea0cea9bd67547b70dc2c8bbaa3b8b9d03 | refs/heads/master | 2023-08-31T10:24:53.220031 | 2023-08-29T16:21:57 | 2023-08-29T16:21:57 | 17,032,696 | 64 | 108 | Apache-2.0 | 2023-09-07T04:02:53 | 2014-02-20T19:28:31 | Python | UTF-8 | Python | false | false | 1,427 | py | # Generated by Django 3.0.14 on 2021-11-02 20:25
from django.db import migrations, models
import taggit_autosuggest.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('joboffers', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='joboffer',
name='contact_mail',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Correo electrónico'),
),
migrations.AlterField(
model_name='joboffer',
name='contact_phone',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Teléfono'),
),
migrations.AlterField(
model_name='joboffer',
name='contact_url',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Sitio web'),
),
migrations.AlterField(
model_name='joboffer',
name='description',
field=models.TextField(verbose_name='Descripción'),
),
migrations.AlterField(
model_name='joboffer',
name='tags',
field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Etiquetas'),
),
]
| [
"noreply@github.com"
] | PyAr.noreply@github.com |
8ff4091acd023959002085d9fb24e56e3c9a9a57 | 21e724b3d1fd4a5b388eb65226e08849bf3acb82 | /tmb/migrations/0004_auto_20200906_1638.py | 45c444b2f4682809e31fbce2da187ec223c739a9 | [] | no_license | rodrigues87/alimentostaco | 671f55ec168199d0c287059b932584941f22642e | a3737f0d565fd8cf6e47b2e02bd4a6e84491570f | refs/heads/master | 2022-12-14T21:09:26.549994 | 2020-09-12T06:55:07 | 2020-09-12T06:55:07 | 276,987,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # Generated by Django 3.0.8 on 2020-09-06 19:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('atividades', '0002_auto_20200906_1632'),
('tmb', '0003_auto_20200906_1631'),
]
operations = [
migrations.AlterField(
model_name='tmb',
name='atividade',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atividades.Atividade'),
),
]
| [
"pedroh.mix@gmail.com"
] | pedroh.mix@gmail.com |
54def98eca301b938e8e42ac66354c331b517118 | 7f66c66eb82b480e8a23ecbfb8613aae02cb50f7 | /tests/core/web3-module/test_conversions.py | fd69882d554390c5af6349c753eed52982fc4150 | [
"MIT"
] | permissive | y19818/web3.py | 03ddedcfdbd4dde2c1a458b31f5e796509b3c7c6 | 32a85a287ab63220d1e0c06d77be74de595ff02f | refs/heads/master | 2021-06-25T00:30:50.312173 | 2019-12-02T08:21:40 | 2019-12-02T08:21:40 | 225,276,093 | 0 | 0 | MIT | 2019-12-02T03:20:47 | 2019-12-02T03:20:47 | null | UTF-8 | Python | false | false | 6,491 | py | # coding=utf-8
import pytest
from hexbytes import (
HexBytes,
)
from web3 import Web3
from web3.datastructures import (
AttributeDict,
)
@pytest.mark.parametrize(
'val, expected',
(
(b'\x01', b'\x01'),
(b'\xff', b'\xff'),
(b'\x00', b'\x00'),
(0x1, b'\x01'),
(0x0001, b'\x01'),
(0xFF, b'\xff'),
(0, b'\x00'),
(256, b'\x01\x00'),
(True, b'\x01'),
(False, b'\x00'),
),
)
def test_to_bytes_primitive(val, expected):
assert Web3.toBytes(val) == expected
@pytest.mark.parametrize(
'val, expected',
(
('0x', b''),
('0x0', b'\x00'),
('0x1', b'\x01'),
('0', b'\x00'),
('1', b'\x01'),
('0xFF', b'\xff'),
('0x100', b'\x01\x00'),
('0x0000', b'\x00\x00'),
('0000', b'\x00\x00'),
),
)
def test_to_bytes_hexstr(val, expected):
assert Web3.toBytes(hexstr=val) == expected
@pytest.mark.parametrize(
'val, expected',
(
('cowmö', b'cowm\xc3\xb6'),
('', b''),
),
)
def test_to_bytes_text(val, expected):
assert Web3.toBytes(text=val) == expected
def test_to_text_identity():
assert Web3.toText(text='pass-through') == 'pass-through'
@pytest.mark.parametrize(
'val, expected',
(
(b'', ''),
('0x', ''),
(b'cowm\xc3\xb6', 'cowmö'),
('0x636f776dc3b6', 'cowmö'),
(0x636f776dc3b6, 'cowmö'),
('0xa', '\n'),
),
)
def test_to_text(val, expected):
assert Web3.toText(val) == expected
@pytest.mark.parametrize(
'val, expected',
(
('0x', ''),
('0xa', '\n'),
('0x636f776dc3b6', 'cowmö'),
('636f776dc3b6', 'cowmö'),
),
)
def test_to_text_hexstr(val, expected):
assert Web3.toText(hexstr=val) == expected
@pytest.mark.parametrize(
'val, expected',
(
(b'\x00', 0),
(b'\x01', 1),
(b'\x00\x01', 1),
(b'\x01\x00', 256),
(True, 1),
(False, 0),
('255', TypeError),
('-1', TypeError),
('0x0', TypeError),
('0x1', TypeError),
),
)
def test_to_int(val, expected):
if isinstance(expected, type):
with pytest.raises(expected):
Web3.toInt(val)
else:
assert Web3.toInt(val) == expected
@pytest.mark.parametrize(
'val, expected',
(
('0', 0),
('-1', -1),
('255', 255),
('0x0', ValueError),
('0x1', ValueError),
('1.1', ValueError),
('a', ValueError),
),
)
def test_to_int_text(val, expected):
if isinstance(expected, type):
with pytest.raises(expected):
Web3.toInt(text=val)
else:
assert Web3.toInt(text=val) == expected
@pytest.mark.parametrize(
'val, expected',
(
('0x0', 0),
('0x1', 1),
('0x01', 1),
('0x10', 16),
('0', 0),
('1', 1),
('01', 1),
('10', 16),
),
)
def test_to_int_hexstr(val, expected):
assert Web3.toInt(hexstr=val) == expected
@pytest.mark.parametrize(
'val, expected',
(
(b'\x00', '0x00'),
(b'\x01', '0x01'),
(b'\x10', '0x10'),
(b'\x01\x00', '0x0100'),
(b'\x00\x0F', '0x000f'),
(b'', '0x'),
(0, '0x0'),
(1, '0x1'),
(16, '0x10'),
(256, '0x100'),
(0x0, '0x0'),
(0x0F, '0xf'),
(False, '0x0'),
(True, '0x1'),
),
)
def test_to_hex(val, expected):
assert Web3.toHex(val) == expected
@pytest.mark.parametrize(
'val, expected',
(
('', '0x'),
('cowmö', '0x636f776dc3b6'),
),
)
def test_to_hex_text(val, expected):
assert Web3.toHex(text=val) == expected
@pytest.mark.parametrize(
'val, expected',
(
('0x0', '0x0'),
('0x1', '0x1'),
('0x0001', '0x0001'),
('0x10', '0x10'),
('0xF', '0xf'),
('F', '0xf'),
),
)
def test_to_hex_cleanup_only(val, expected):
assert Web3.toHex(hexstr=val) == expected
@pytest.mark.parametrize(
'val, expected',
(
(AttributeDict({'one': HexBytes('0x1')}), '{"one": "0x01"}'),
(AttributeDict({'two': HexBytes(2)}), '{"two": "0x02"}'),
(AttributeDict({
'three': AttributeDict({
'four': 4
})
}), '{"three": {"four": 4}}'),
({'three': 3}, '{"three": 3}'),
),
)
def test_to_json(val, expected):
assert Web3.toJSON(val) == expected
@pytest.mark.parametrize(
'tx, expected',
(
(
AttributeDict({
'blockHash': HexBytes(
'0x849044202a39ae36888481f90d62c3826bca8269c2716d7a38696b4f45e61d83'
),
'blockNumber': 6928809,
'from': '0xDEA141eF43A2fdF4e795adA55958DAf8ef5FA619',
'gas': 21000,
'gasPrice': 19110000000,
'hash': HexBytes(
'0x1ccddd19830e998d7cf4d921b19fafd5021c9d4c4ba29680b66fb535624940fc'
),
'input': '0x',
'nonce': 5522,
'r': HexBytes('0x71ef3eed6242230a219d9dc7737cb5a3a16059708ee322e96b8c5774105b9b00'),
's': HexBytes('0x48a076afe10b4e1ae82ef82b747e9be64e0bbb1cc90e173db8d53e7baba8ac46'),
'to': '0x3a84E09D30476305Eda6b2DA2a4e199E2Dd1bf79',
'transactionIndex': 8,
'v': 27,
'value': 2907000000000000
}),
'{"blockHash": "0x849044202a39ae36888481f90d62c3826bca8269c2716d7a38696b4f45e61d83", "blockNumber": 6928809, "from": "0xDEA141eF43A2fdF4e795adA55958DAf8ef5FA619", "gas": 21000, "gasPrice": 19110000000, "hash": "0x1ccddd19830e998d7cf4d921b19fafd5021c9d4c4ba29680b66fb535624940fc", "input": "0x", "nonce": 5522, "r": "0x71ef3eed6242230a219d9dc7737cb5a3a16059708ee322e96b8c5774105b9b00", "s": "0x48a076afe10b4e1ae82ef82b747e9be64e0bbb1cc90e173db8d53e7baba8ac46", "to": "0x3a84E09D30476305Eda6b2DA2a4e199E2Dd1bf79", "transactionIndex": 8, "v": 27, "value": 2907000000000000}' # noqa: E501
),
),
)
def test_to_json_with_transaction(tx, expected):
assert Web3.toJSON(tx) == expected
| [
"y19818@gmail.com"
] | y19818@gmail.com |
68bf89d2e8d1b965f3e84c1955eb27a602aea8e1 | aee3816964945dce66298dce07ed68fa666d7e8b | /py_gnome/tests/unit_tests/test_utilities/test_weathering_algorithms.py | 8eed7a2b71181f966a074ad8f3a97e345d24beef | [] | no_license | liuy0813/PyGnome | c0f06b74b921332a69847c4fa51f83b6cb2412e5 | 9d4a3f0eb00c9dbe18af1b76c5f326501654c180 | refs/heads/master | 2021-01-16T17:44:19.628127 | 2016-02-12T17:12:04 | 2016-02-12T17:12:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | #!/usr/bin/env python
import numpy as np
from gnome.utilities.weathering import (LeeHuibers,
Riazi,
Stokes,
PiersonMoskowitz,
DelvigneSweeney,
DingFarmer,
)
def test_lee_huibers():
assert np.isclose(LeeHuibers.partition_coeff(92.1, 866.0), 1000)
def test_riazi():
assert np.isclose(Riazi.mol_wt(300.0), 67.44, atol=0.001)
assert np.isclose(Riazi.density(300.0), 669.43, atol=0.001)
assert np.isclose(Riazi.molar_volume(300.0), 0.1, atol=0.001)
assert np.isclose(Riazi.mol_wt(400.0), 113.756, atol=0.001)
assert np.isclose(Riazi.density(400.0), 736.8, atol=0.001)
assert np.isclose(Riazi.molar_volume(400.0), 0.154, atol=0.001)
def test_stokes():
assert np.isclose(Stokes.water_phase_xfer_velocity(0.1, 400.0 / 1e6),
0.00872)
assert np.isclose(Stokes.water_phase_xfer_velocity(0.2, 400.0 / 1e6),
0.01744)
assert np.isclose(Stokes.water_phase_xfer_velocity(0.3, 400.0 / 1e6),
0.02616)
def test_pierson_moskowitz():
assert np.isclose(PiersonMoskowitz.significant_wave_height(10.0), 2.24337)
assert np.isclose(PiersonMoskowitz.peak_wave_period(10.0), 7.5)
def test_delvigne_sweeney():
wind_speed = 10.0
T_w = PiersonMoskowitz.peak_wave_period(wind_speed)
assert np.isclose(DelvigneSweeney.breaking_waves_frac(wind_speed, 10.0),
0.016)
assert np.isclose(DelvigneSweeney.breaking_waves_frac(wind_speed, T_w),
0.0213333)
def test_ding_farmer():
wind_speed = 10.0 # m/s
rdelta = 0.2 # oil/water density difference
droplet_diameter = 0.0002 # 200 microns
wave_height = PiersonMoskowitz.significant_wave_height(wind_speed)
wave_period = PiersonMoskowitz.peak_wave_period(wind_speed)
f_bw = DelvigneSweeney.breaking_waves_frac(wind_speed, wave_period)
k_w = Stokes.water_phase_xfer_velocity(rdelta, droplet_diameter)
assert np.isclose(DingFarmer.calm_between_wave_breaks(0.5, 10),
15.0)
assert np.isclose(DingFarmer.calm_between_wave_breaks(f_bw, wave_period),
347.8125)
assert np.isclose(DingFarmer.refloat_time(wave_height, k_w),
385.90177)
assert np.isclose(DingFarmer.water_column_time_fraction(f_bw,
wave_period,
wave_height,
k_w),
1.1095)
| [
"james.makela@noaa.gov"
] | james.makela@noaa.gov |
0db805146c6008d8160d3608f70456fbac618d08 | 5557ec6b8eebff21c22620e78554edf881cb44d3 | /ssseg/cfgs/ccnet/cfgs_cityscapes_resnet101os16.py | cdb6203821f4bb576b8203f3649b1b1e0e055e2c | [
"MIT"
] | permissive | dongzhang89/sssegmentation | 06a515f789e796f6fe5b28a8d78ebd921b08e960 | 65b547c2d2b72a51896ca0f1bcf56ea78ed0b89f | refs/heads/main | 2023-02-09T02:01:47.865163 | 2021-01-05T13:47:59 | 2021-01-05T13:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,351 | py | '''define the config file for cityscapes and resnet101os16'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'cityscapes',
'rootdir': 'data/CityScapes',
'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 1024), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 1024), 'data_type': 'tensor'}),]
}
)
DATASET_CFG['test'].update(
{
'type': 'cityscapes',
'rootdir': 'data/CityScapes',
'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),],
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
DATALOADER_CFG['train'].update(
{
'batch_size': 8,
}
)
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 220
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 19,
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True
},
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'ccnet_resnet101os16_cityscapes_train',
'logfilepath': 'ccnet_resnet101os16_cityscapes_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'ccnet_resnet101os16_cityscapes_test',
'logfilepath': 'ccnet_resnet101os16_cityscapes_test/test.log',
'resultsavepath': 'ccnet_resnet101os16_cityscapes_test/ccnet_resnet101os16_cityscapes_results.pkl'
}
) | [
"1159254961@qq.com"
] | 1159254961@qq.com |
380fb77f10f06f5c69266682378e83a5ec0e8306 | 7d326c33aa4bfe39962e802b981556369b893e8d | /sports/migrations/0001_initial.py | af928980efc5a9995845ea7e72fd78178bc84914 | [
"Apache-2.0"
] | permissive | Yojanpardo/scout_sport | 2d6d1406e1e4687a51b25c204b47a53f8aa1dc8d | 28b031bcb5444e4e3a3e5a8baa04c7ecfcaeab70 | refs/heads/master | 2020-03-28T08:25:07.279438 | 2018-09-09T21:46:53 | 2018-09-09T21:46:53 | 147,963,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # Generated by Django 2.1.1 on 2018-09-08 22:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Sport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(max_length=255)),
],
),
]
| [
"yojan.pardo@gmail.com"
] | yojan.pardo@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.