blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09a981fcec8dd6271c17d600235d3a7be9caba06 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_325/ch42_2020_03_26_19_44_19_862731.py | 030854db3859bd46deaf4ce4de9d234c5a682e3a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | palavra = input("Digite sua palavra: ")
lista = []
i = 0
while palavra != "fim":
palavra = input("Digite sua palavra: ")
if palavra[i] == "a":
lista.append(palavra[i])
i += 1 | [
"you@example.com"
] | you@example.com |
2cc97c38877aae8391444fa04b9a9e8252833132 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/CommonScripts/Scripts/ExtractHTMLTables/ExtractHTMLTables.py | 992e12ef7d173e3f43d201634fe78157ba9c1345 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 2,217 | py | import demistomock as demisto # noqa: F401
from bs4 import BeautifulSoup
from CommonServerPython import * # noqa: F401
def extract_html_table(html, indexes):
soup = BeautifulSoup(html, 'html.parser')
tables = []
for index, tab in enumerate(soup.find_all('table')):
if len(indexes) > 0 and index not in indexes and str(index) not in indexes:
continue
table = []
headers = []
# Check if there are headers and use them
for th in tab.find_all('th'):
headers.append(th.text)
for tr in tab.find_all('tr'):
tds = tr.find_all('td')
# This is a data row and not header row
if len(tds) > 0:
# Single value in a table - just create an array of strings ignoring header
if len(tds) == 1:
table.append(tds[0].text)
# If there are 2 columns and no headers, treat as key-value (might override values if same key in first column)
elif len(tds) == 2 and len(headers) == 0:
if type(table) == list:
table = {} # type: ignore
table[tds[0].text] = tds[1].text
else:
row = {}
if len(headers) > 0:
for i, td in enumerate(tds):
row[headers[i]] = td.text
else:
for i, td in enumerate(tds):
row['cell' + str(i)] = td.text
table.append(row)
if len(table) > 0:
tables.append(table)
if len(tables) > 0:
return({
'Type': entryTypes['note'],
'Contents': 'Found {} tables in HTML.'.format(len(tables)),
'ContentsFormat': formats['text'],
'EntryContext': {'HTMLTables': tables if len(tables) > 1 else tables[0]}
})
else:
return 'Did not find tables in HTML.'
def main():
html = demisto.getArg('html')
indexes = argToList(demisto.getArg('indexes'))
demisto.results(extract_html_table(html, indexes))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
d6309c0542fa1789c852ee9441c523c99edc7a90 | bf57ccabb586e624ec33f0aa2cdce298c99459e1 | /python/lsst/meas/extensions/simpleShape/__init__.py | 185b138198bdc79b230f2ed8bc985197b1c91488 | [] | no_license | jonathansick-shadow/meas_extensions_simpleShape | 3ac60daef4f5f878cd13bb341630af1ea3d2934f | 556670ae1ca6d4c6fbbef52c4837fafb245c00c3 | refs/heads/master | 2020-04-05T23:09:11.347445 | 2014-06-20T22:43:32 | 2014-06-20T22:43:32 | 54,810,102 | 0 | 0 | null | 2016-03-27T03:00:37 | 2016-03-27T03:00:36 | null | UTF-8 | Python | false | false | 214 | py | from .simpleShapeLib import *
from .version import * # generated by sconsUtils
from lsst.meas.algorithms.algorithmRegistry import AlgorithmRegistry
AlgorithmRegistry.register("shape.simple", SimpleShapeControl)
| [
"jbosch@astro.princeton.edu"
] | jbosch@astro.princeton.edu |
b2f419d19da9a9fcdc4e997e3782e947b7dfb813 | 46fb9eea28f92c637273b2065cb8c38abe5a2007 | /tbjh/jupyterhub_config.py | 746ca8fb6be519a7ce3ad49cc49e9b5d278a5c76 | [
"BSD-3-Clause"
] | permissive | yuvipanda/the-batchiest-jupyterhub | 0337838e2f094bcee21c6a8a0a1ed261c4e3970d | c3d34f55f844f307850e879ce8e464e7f367d1e3 | refs/heads/master | 2022-10-02T20:23:07.252736 | 2020-06-04T13:04:21 | 2020-06-04T13:04:21 | 264,124,457 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | import json
import pwd
import os
import pathlib
import asyncio
import subprocess
from glob import glob
from jupyterhub_traefik_proxy import TraefikTomlProxy
from tbjh import constants
# Don't kill servers when JupyterHub restarts
c.JupyterHub.cleanup_servers = False
# Traefik should be started by systemd
c.JupyterHub.proxy_class = TraefikTomlProxy
c.TraefikTomlProxy.should_start = False
with open(constants.TRAEFIK_CREDS_PATH) as f:
creds = json.load(f)
if 'version' not in creds or creds['version'] != 'v1':
# FIXME: Better error message
raise ValueError("Invalid traefik-creds.json file")
c.TraefikTomlProxy.traefik_api_username = creds['username']
c.TraefikTomlProxy.traefik_api_password = creds['password']
async def check_call_process(cmd):
"""
Asynchronously execute a process, throw an error when it fails
"""
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=cmd,
stderr=stderr,
output=stdout
)
# Make sure there's a conda install
async def pre_spawn_hook(spawner):
username = spawner.user.name
homedir = pathlib.Path(pwd.getpwnam(username).pw_dir)
if (homedir / 'conda').exists():
# If 'conda' dir exists, assume we are good
# In the future, we might have more sophisticated checks
return
# Install miniforge
# FIXME: Show this as progress in spawn call
await check_call_process([
'/bin/sh',
str(constants.MINIFORGE_INSTALLER_PATH),
'-b', '-p', str(homedir / 'conda'),
])
# Install packages we want
await check_call_process([
str(homedir / 'conda/bin/conda'),
'env', 'create',
'-f', str(constants.NOTEBOOK_ENVIRONMENT_YML)
])
c.Spawner.pre_spawn_hook = pre_spawn_hook
# Load arbitrary .py config files if they exist.
# This is our escape hatch
extra_configs = sorted(glob(os.path.join(constants.JUPYTERHUB_CONFIG_D_DIR, '*.py')))
for ec in extra_configs:
load_subconfig(ec) | [
"yuvipanda@gmail.com"
] | yuvipanda@gmail.com |
f2149e2231b00c5ed68eeabea58a9727811fe6b8 | a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c | /notebook/os_stat.py | 2ac10452705a12d3368f8ecb6e8cc6cfde8f5ca3 | [
"MIT"
] | permissive | nkmk/python-snippets | a6c66bdf999502e52f4795a3074ced63bf440817 | f9dd286a9cf93f474e20371f8fffc4732cb3c4d5 | refs/heads/master | 2023-08-03T04:20:05.606293 | 2023-07-26T13:21:11 | 2023-07-26T13:21:11 | 98,900,570 | 253 | 77 | MIT | 2020-10-25T01:12:53 | 2017-07-31T14:54:47 | Jupyter Notebook | UTF-8 | Python | false | false | 2,890 | py | import os
import pathlib
import datetime
import time
import platform
p = pathlib.Path('data/temp/test.txt')
p.write_text('test')
time.sleep(10)
p.write_text('update')
# 6
print(p.stat())
# os.stat_result(st_mode=33188, st_ino=8728494137, st_dev=16777220, st_nlink=1, st_uid=501, st_gid=20, st_size=6, st_atime=1549094615, st_mtime=1549094615, st_ctime=1549094615)
print(type(p.stat()))
# <class 'os.stat_result'>
print(os.stat('data/temp/test.txt'))
# os.stat_result(st_mode=33188, st_ino=8728494137, st_dev=16777220, st_nlink=1, st_uid=501, st_gid=20, st_size=6, st_atime=1549094615, st_mtime=1549094615, st_ctime=1549094615)
print(type(os.stat('data/temp/test.txt')))
# <class 'os.stat_result'>
print(os.stat(p))
# os.stat_result(st_mode=33188, st_ino=8728494137, st_dev=16777220, st_nlink=1, st_uid=501, st_gid=20, st_size=6, st_atime=1549094615, st_mtime=1549094615, st_ctime=1549094615)
print(type(os.stat(p)))
# <class 'os.stat_result'>
print(p.stat() == os.stat('data/temp/test.txt') == os.stat(p))
# True
st = p.stat()
print(st.st_atime)
# 1549094615.972488
print(st.st_mtime)
# 1549094615.9723485
print(st.st_ctime)
# 1549094615.9723485
print(st.st_birthtime)
# 1549094605.9650702
print(type(st.st_ctime))
# <class 'float'>
print(st.st_ctime_ns)
# 1549094615972348510
print(type(st.st_ctime_ns))
# <class 'int'>
print(os.path.getatime('data/temp/test.txt'))
# 1549094615.972488
print(os.path.getmtime('data/temp/test.txt'))
# 1549094615.9723485
print(os.path.getctime('data/temp/test.txt'))
# 1549094615.9723485
print(os.path.getctime(p))
# 1549094615.9723485
print(os.path.getctime(p) == p.stat().st_ctime)
# True
dt = datetime.datetime.fromtimestamp(p.stat().st_ctime)
print(dt)
# 2019-02-02 17:03:35.972348
print(type(dt))
# <class 'datetime.datetime'>
print(dt.strftime('%Y年%m月%d日 %H:%M:%S'))
# 2019年02月02日 17:03:35
print(dt.isoformat())
# 2019-02-02T17:03:35.972348
print(os.path.getmtime('data/temp/test.txt'))
# 1549094615.9723485
print(p.stat().st_mtime)
# 1549094615.9723485
print(datetime.datetime.fromtimestamp(p.stat().st_mtime))
# 2019-02-02 17:03:35.972348
def creation_date(path_to_file):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
print(creation_date(p))
# 1549094605.9650702
print(datetime.datetime.fromtimestamp(creation_date(p)))
# 2019-02-02 17:03:25.965070
| [
"nkmk.on@gmail.com"
] | nkmk.on@gmail.com |
858748ae013e2904d796045042e9433ef8f91d9c | b47a619f6ccd0f76ccce989e62d0c963a1c14ab4 | /Python/String general/Remove All Adjacent Duplicates In String.py | f29cbb7022b54f28f8b3c440b9a95d36bd92b889 | [] | no_license | GreatTwang/lccc_solution | 0799d19097549ef3c9beeebf6dc9960db9f9eb54 | e75899634f45b0d60f8b3cb854ab9e503d676a57 | refs/heads/master | 2020-07-07T02:45:18.984502 | 2019-10-09T04:53:35 | 2019-10-09T04:53:35 | 203,219,848 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # O(N) O(N)
# if c==stack top one then pop, else push into stack
class Solution:
def removeDuplicates(self, S: str) -> str:
stack=[]
for c in S:
if stack and c==stack[-1]:
stack.pop()
else:
stack.append(c)
return ''.join(stack))
| [
"tianwang@bu.edu"
] | tianwang@bu.edu |
c8b5bad84514b74417ff2eb13f76d4404db322ca | fe096ed06c34ae3adf958760886dd5f2fc64fa90 | /Heap (Priority Queue)/kClosestPointsToOrigin.py | 8b49044cf7e3c6b3a888bdad5e1abcf7473f227d | [] | no_license | harshmalviya7/LeetCode_Coding_Questions | c9d8a93f4a5664dcf57098cd58f3f1d95667b0c0 | 47edb51e55e390861ed539972d8bf66b41b4cdd7 | refs/heads/master | 2023-08-23T01:09:40.110710 | 2021-10-21T12:53:36 | 2021-10-21T12:53:36 | 373,072,675 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | # 973. K Closest Points to Origin
# https://leetcode.com/problems/k-closest-points-to-origin/
import heapq
class Solution:
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
out = 0
e = []
for j, i in enumerate(points):
heapq.heappush(e, (-(i[0] * i[0] + i[1] * i[1]), i))
if len(e) > k:
heapq.heappop(e)
# a=(i[0]**2+i[1]**2)
# e.append([a,i])
return [j for i, j in e]
# return [x[1] for x in sorted(e)][:k]
| [
"harsh.malviya.9869@gmail.com"
] | harsh.malviya.9869@gmail.com |
834321d61bdd025df9f0b9f1bf249d10cdfcb5b4 | c28ac3e0dd887e25d40e019dde062e73cb4a433c | /scripts/TargetTaxaGenes/TargetTaxaGenes.py | 4c96216dec48f4715b883e77e28fb2b29d58b027 | [] | no_license | ZhikunWu/Bioinformatic-resources | 16abc66f19d95dd14c11b2a453f7b3df0ed1fa16 | 2695dd0e249a49b948ac56cd71574b84c24cbf8a | refs/heads/master | 2021-06-01T22:29:40.731595 | 2020-10-10T01:41:07 | 2020-10-10T01:41:07 | 134,114,964 | 10 | 10 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | #!/usr/bin/env python
import collections
import argparse
#usage: python TargetTaxaGenes.py --gene /home/wzk/Project/C128/NCyc/representive.faa.annotation.xls --taxonomy /home/wzk/Project/C128/NR/representive.faa.diamond_taxonomy_species.txt --out representive.faa.diamond_taxonomy_species_NCyc.txt
def get_NCyc_gene(gene_file):
Genes = {}
in_h = open(gene_file, "r")
header = in_h.readline()
for line in in_h:
lines = line.strip().split("\t")
gene = lines[0]
target = lines[1]
Genes[gene] = target
in_h.close()
return Genes
def taxonomy_gene(gene_file, taxonomy_file, out_file):
Genes = get_NCyc_gene(gene_file)
TaxaGenes = collections.defaultdict(set)
in_h = open(taxonomy_file, "r")
for line in in_h:
lines = line.strip().split("\t")
gene = lines[0]
taxa = lines[-1]
if gene in Genes:
target = Genes[gene]
TaxaGenes[taxa].add(target)
in_h.close()
out_h = open(out_file, "w")
for t in TaxaGenes:
genes = TaxaGenes[t]
sortGenes = sorted(list(genes))
out_h.write("%s\t%s\n" % (t, "|".join(sortGenes)))
out_h.close()
def main():
parser = argparse.ArgumentParser(description="Get the genes of the taxonomy.")
parser.add_argument("-g", "--gene", help="The file contain genes.")
parser.add_argument("-t", "--taxonomy", help="The file contain gene and taxonomy.")
parser.add_argument("-o","--out", help="The output file.")
args = parser.parse_args()
taxonomy_gene(args.gene, args.taxonomy, args.out)
if __name__ == "__main__":
main()
| [
"598466208@qq.com"
] | 598466208@qq.com |
39b8024b1f674cfea0f2b7a50c585c89ddc5546a | 13a70bdc3ac997d0d6c839fe633deed3ca7fc5ab | /ch07-Linear_regrs/Ridge_regre.py | bbec67ae57bb14c87fff6654ab05b19258ddceb5 | [] | no_license | Y1ran/Machine-Learning-in-Action-Python3 | 5546a777d78aee6445da1621b2deaddb099ae6ef | 5aca5f9b865be449793e50ce32cba7c9b1ef286b | refs/heads/master | 2022-10-09T06:30:08.122526 | 2022-09-30T10:04:53 | 2022-09-30T10:04:53 | 139,533,418 | 403 | 233 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 15:04:10 2018
@author: Administrator
"""
def ridgeRegres(xMat,yMat,lam=0.2):
'''
#岭回归
@xMat:样本数据
@yMat:样本对应的原始值
@lam:惩罚项系数lamda,默认值为0.2
'''
#计算矩阵内积
xTx=xMat.T*xMat
#添加惩罚项,使矩阵xTx变换后可逆
denom=xTx+eye(shape(xMat)[1])*lam
#判断行列式值是否为0,确定是否可逆
if linalg.det(denom)==0.0:
print('This matrix is singular,cannot do inverse')
return
#计算回归系数
ws=denom.I*(xMat.T*yMat)
return ws
#特征需要标准化处理,使所有特征具有相同重要性
def ridgeTest(xArr,yArr):
xMat=mat(xArr);yMat=mat(yArr).T
#计算均值
yMean=mean(yMat,0)
yMat=yMat-yMean
xMeans=mean(xMat,0)
#计算各个特征的方差
xVar=var(xMat,0)
#特征-均值/方差
xMat=(xMat-xMeans)/xVar
#在30个不同的lamda下进行测试
numTestpts=30
#30次的结果保存在wMat中
wMat=zeros((numTestpts,shape(xMat)[1]))
for i in range(numTestpts):
#计算对应lamda回归系数,lamda以指数形式变换
ws=ridgeRegres(xMat,yMat,exp(i-10))
wMat[i,:]=ws.T
return wMat | [
"deanyuton@gmail.com"
] | deanyuton@gmail.com |
13a9675c88f2c319982ff7d6f346121a7255f7ed | 80b7f2a10506f70477d8720e229d7530da2eff5d | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/groupbucketdescstatlearnedinformation_c5c1fdcf0cd8750ead47c9919177d367.py | 6b312374bb89b93e31c3fff29a61dbf52b7037cc | [
"MIT"
] | permissive | OpenIxia/ixnetwork_restpy | 00fdc305901aa7e4b26e4000b133655e2d0e346a | c8ecc779421bffbc27c906c1ea51af3756d83398 | refs/heads/master | 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 | MIT | 2023-02-02T07:02:43 | 2019-03-06T15:27:20 | Python | UTF-8 | Python | false | false | 8,164 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class GroupBucketDescStatLearnedInformation(Base):
"""NOT DEFINED
The GroupBucketDescStatLearnedInformation class encapsulates a list of groupBucketDescStatLearnedInformation resources that are managed by the system.
A list of resources can be retrieved from the server using the GroupBucketDescStatLearnedInformation.find() method.
"""
__slots__ = ()
_SDM_NAME = "groupBucketDescStatLearnedInformation"
_SDM_ATT_MAP = {
"ActionCount": "actionCount",
"DataPathId": "dataPathId",
"DataPathIdAsHex": "dataPathIdAsHex",
"GroupId": "groupId",
"LocalIp": "localIp",
"RemoteIp": "remoteIp",
"WatchGroup": "watchGroup",
"WatchPort": "watchPort",
"Weight": "weight",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(GroupBucketDescStatLearnedInformation, self).__init__(parent, list_op)
@property
def ActionCount(self):
# type: () -> int
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP["ActionCount"])
@property
def DataPathId(self):
# type: () -> str
"""
Returns
-------
- str: The Data Path ID of the OpenFlow switch.
"""
return self._get_attribute(self._SDM_ATT_MAP["DataPathId"])
@property
def DataPathIdAsHex(self):
# type: () -> str
"""
Returns
-------
- str: The Data Path ID of the OpenFlow switch in hexadecimal format.
"""
return self._get_attribute(self._SDM_ATT_MAP["DataPathIdAsHex"])
@property
def GroupId(self):
# type: () -> int
"""
Returns
-------
- number: A 32-bit integer uniquely identifying the group.
"""
return self._get_attribute(self._SDM_ATT_MAP["GroupId"])
@property
def LocalIp(self):
# type: () -> str
"""
Returns
-------
- str: The Data Path ID of the OpenFlow switch.
"""
return self._get_attribute(self._SDM_ATT_MAP["LocalIp"])
@property
def RemoteIp(self):
# type: () -> str
"""
Returns
-------
- str: The Remote IP address of the selected interface.
"""
return self._get_attribute(self._SDM_ATT_MAP["RemoteIp"])
@property
def WatchGroup(self):
# type: () -> int
"""
Returns
-------
- number: A group whose state determines whether this bucket is live or not. Default value OFPG_ANY(4,294,967,295) indicates that Watch Group is not specified in ofp_group_mod packets.
"""
return self._get_attribute(self._SDM_ATT_MAP["WatchGroup"])
@property
def WatchPort(self):
# type: () -> int
"""
Returns
-------
- number: A Port whose state determines whether this bucket is live or not. Default value OFPP_ANY(4,294,967,295) indicates that Watch Port is not specified in ofp_group_mod packets.
"""
return self._get_attribute(self._SDM_ATT_MAP["WatchPort"])
@property
def Weight(self):
# type: () -> int
"""
Returns
-------
- number: Specify the weight of buckets. The range allowed is 0-65535.
"""
return self._get_attribute(self._SDM_ATT_MAP["Weight"])
def add(self):
"""Adds a new groupBucketDescStatLearnedInformation resource on the json, only valid with batch add utility
Returns
-------
- self: This instance with all currently retrieved groupBucketDescStatLearnedInformation resources using find and the newly added groupBucketDescStatLearnedInformation resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
ActionCount=None,
DataPathId=None,
DataPathIdAsHex=None,
GroupId=None,
LocalIp=None,
RemoteIp=None,
WatchGroup=None,
WatchPort=None,
Weight=None,
):
# type: (int, str, str, int, str, str, int, int, int) -> GroupBucketDescStatLearnedInformation
"""Finds and retrieves groupBucketDescStatLearnedInformation resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve groupBucketDescStatLearnedInformation resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all groupBucketDescStatLearnedInformation resources from the server.
Args
----
- ActionCount (number): NOT DEFINED
- DataPathId (str): The Data Path ID of the OpenFlow switch.
- DataPathIdAsHex (str): The Data Path ID of the OpenFlow switch in hexadecimal format.
- GroupId (number): A 32-bit integer uniquely identifying the group.
- LocalIp (str): The Data Path ID of the OpenFlow switch.
- RemoteIp (str): The Remote IP address of the selected interface.
- WatchGroup (number): A group whose state determines whether this bucket is live or not. Default value OFPG_ANY(4,294,967,295) indicates that Watch Group is not specified in ofp_group_mod packets.
- WatchPort (number): A Port whose state determines whether this bucket is live or not. Default value OFPP_ANY(4,294,967,295) indicates that Watch Port is not specified in ofp_group_mod packets.
- Weight (number): Specify the weight of buckets. The range allowed is 0-65535.
Returns
-------
- self: This instance with matching groupBucketDescStatLearnedInformation resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of groupBucketDescStatLearnedInformation data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the groupBucketDescStatLearnedInformation resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
b360b1b3eb8df1b4bdb56e27bd41976012314685 | 4dde2454c42df46eb519743ddb7a7db05697b4a6 | /alexa/remindMeOfXWhenZone/remindMeOfXWhenZoneIntent.py | 3f8dddf1c2722ae1034c0d7c95721f162f99f5ef | [
"MIT"
] | permissive | pippyn/appdaemon-scripts | 7e4231e9c28c8f906f97e8bb7d353d4297453426 | 615cdfeaaf039ffbe1be041eb07c35a2494f008d | refs/heads/master | 2020-04-20T22:14:07.608237 | 2019-02-26T10:16:21 | 2019-02-26T10:16:21 | 169,133,019 | 0 | 0 | MIT | 2019-02-19T14:49:54 | 2019-02-04T19:11:20 | Python | UTF-8 | Python | false | false | 2,881 | py | import appdaemon.plugins.hass.hassapi as hass
import datetime
import globals
__ZONE_ACTION_ENTER__ = "kommen"
__ZONE_ACTION_LEAVE__ = "verlassen"
class RemindMeOfXWhenZoneIntent(hass.Hass):
def initialize(self):
self.timer_handle_list = []
self.listen_state_handle_list = []
self.device_tracker = globals.get_arg(self.args,"device_tracker")
self.notify_name = globals.get_arg(self.args,"notify_name")
self.remindMessageSkeleton = globals.get_arg(self.args,"remindMessageSkeleton")
self.notifier = self.get_app('Notifier')
return
def getIntentResponse(self, slots, devicename):
############################################
# an Intent to give back the state from a light.
# but it also can be any other kind of entity
############################################
try:
# get zone_name for friendly name used when talking to alexa
zone_name = None
for key, value in self.args["zoneMapping"].items():
if key == slots["zone"].lower():
zone_name = value
# listen to a state change of the zone
if zone_name == None:
raise Exception("Could not find zonemapping for: {}".format(slots["zone"].lower()))
else:
self.listen_state_handle_list.append(self.listen_state(self.remind_callback, self.device_tracker, zone=slots["zone"], zoneAction=slots["zoneAction"], reminder=slots["reminder"]))
# set correct zoneAction response
if slots["zoneAction"] == __ZONE_ACTION_ENTER__:
text = self.args["textLine"] + self.args["textEnter"]
else:
text = self.args["textLine"] + self.args["textLeave"]
except Exception as e:
self.log("Exception: {}".format(e))
self.log("slots: {}".format(slots))
text = self.random_arg(self.args["Error"])
return text
def remind_callback(self, entity, attribute, old, new, kwargs):
if kwargs["zoneAction"] == __ZONE_ACTION_ENTER__:
if new != old and new == kwargs["zone"]:
self.log("Notifying")
self.notifier.notify(self.notify_name, self.remindMessageSkeleton + kwargs["reminder"], useAlexa=False)
elif kwargs["zoneAction"] == __ZONE_ACTION_LEAVE__:
if new != old and old == kwargs["zone"]:
self.log("Notifying")
self.notifier.notify(self.notify_name, self.remindMessageSkeleton + kwargs["reminder"], useAlexa=False)
def terminate(self):
for timer_handle in self.timer_handle_list:
self.cancel_timer(timer_handle)
for listen_state_handle in self.listen_state_handle_list:
self.cancel_listen_state(listen_state_handle) | [
"k.eifinger@googlemail.com"
] | k.eifinger@googlemail.com |
8c3da2bbe7f5f87820bcb9005c4e3ef16da39be2 | 13f78c34e80a52442d72e0aa609666163233e7e0 | /Other/Kattis/Waterloo 2004-06-12/workreduction.py | c80361090eb8f0fba8bb708dfdf2b4d4c124c2a4 | [] | no_license | Giantpizzahead/comp-programming | 0d16babe49064aee525d78a70641ca154927af20 | 232a19fdd06ecef7be845c92db38772240a33e41 | refs/heads/master | 2023-08-17T20:23:28.693280 | 2023-08-11T22:18:26 | 2023-08-11T22:18:26 | 252,904,746 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | # https://open.kattis.com/problems/reduction
import re
C = int(input())
for c in range(C):
print('Case', c+1)
N, M, L = map(int, input().split())
A = []
for i in range(L):
name, a, b = re.compile('(.*):(.*),(.*)').match(input()).groups()
a = int(a)
b = int(b)
# print(name, a, b)
best_c = a * (N-M)
curr_n = N
curr_c = 0
while curr_n // 2 >= M:
curr_n //= 2
curr_c += b
best_c = min(curr_c + a * (curr_n-M), best_c)
A.append({'name': name, 'cost': best_c})
A = sorted(A, key=lambda x: (x['cost'], x['name']))
for x in A:
print(x['name'], x['cost'])
| [
"43867185+Giantpizzahead@users.noreply.github.com"
] | 43867185+Giantpizzahead@users.noreply.github.com |
1c94a0c295b12369a8682ebb5c180c3eae6f1936 | 7a09af404f29389504742a3d5f1727bfbe562750 | /TrekBot2_WS/build/razor_imu_9dof/catkin_generated/generate_cached_setup.py | 1190411b71f9bdcde84c39269e3159441d673eab | [
"MIT"
] | permissive | Rafcin/TrekBot | 4baa2ed93b90920b36adba0b72384ac320d2de01 | d3dc63e6c16a040b16170f143556ef358018b7da | refs/heads/master | 2020-03-30T02:15:35.361254 | 2018-12-14T03:30:25 | 2018-12-14T03:30:25 | 150,622,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/xavier_ssd/TrekBot/TrekBot2_WS/devel;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python3/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/razor_imu_9dof/env.sh')
output_filename = '/xavier_ssd/TrekBot/TrekBot2_WS/build/razor_imu_9dof/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"Rafcin.s@gmail.com"
] | Rafcin.s@gmail.com |
e390b936805ed8e4841c5543cf4ce0d3c7ddee54 | ef0e9cf79347ecde056d947fe157da95c39b1bac | /mundo_3/exercicios/ex115/115.py | 83de4f5643ad7dca477b9d3aa4baee5f3c681b33 | [] | no_license | Iuri-Almeida/CeV-Python | 0c78a1b365bdbd3345ea894ddd6c01b4c81761e7 | cfc3ff1853fdc998a9ea2301d86165263d0f216d | refs/heads/master | 2023-04-20T17:07:39.454125 | 2021-05-11T21:04:29 | 2021-05-11T21:04:29 | 329,114,163 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | from lib import menu, opc_0, opc_1, opc_2, opc_3
print(f'{" Exercício 115 ":=^31}')
# Programa Principal
while True:
menu()
opc = opc_0()
if opc == '1':
opc_1()
elif opc == '2':
opc_2()
elif opc == '3':
opc_3()
break
| [
"iurilopesalmeida.gmail.com"
] | iurilopesalmeida.gmail.com |
aeeff36ea48d96733e9ec151a94c9382d4596cc6 | f72fe33d1a181f89d2464cc07744dbd275a7d071 | /CNNectome/networks/custom_ops.py | 989abdd29eae6ad7b3e40f2b0ca1ec7239ace8f0 | [
"BSD-2-Clause"
] | permissive | saalfeldlab/CNNectome | 6c8d44d8cc2e161a91b10abb7b4a425d7fc64d1b | c043e3111ff5ec6707a68edffae54eb902a1652d | refs/heads/master | 2023-04-03T15:11:36.586030 | 2022-06-15T14:12:17 | 2022-06-15T14:12:17 | 124,144,317 | 8 | 10 | BSD-2-Clause | 2023-03-24T22:16:04 | 2018-03-06T22:04:16 | Python | UTF-8 | Python | false | false | 1,672 | py | import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
import tensorflow as tf
def ignore(x, binary_tensor, name=None):
with ops.name_scope(name, "ignore", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
keep_ratio = math_ops.divide(
math_ops.reduce_sum(binary_tensor),
math_ops.reduce_prod(
array_ops.shape(binary_tensor, out_type=dtypes.float32)
),
)
keep_ratio.get_shape().assert_is_compatible_with(tensor_shape.scalar())
with tf.Session() as sess:
print(keep_ratio.eval(session=sess))
ret = math_ops.div(x, keep_ratio) * binary_tensor
ret.set_shape(x.get_shape())
return ret
def tf_var_summary(var):
# compute mean of variable
mean = tf.reduce_mean(var)
tf.summary.scalar("mean_" + var.name, mean)
# compute std of variable
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar("stddev_" + var.name, stddev)
tf.summary.scalar("max_" + var.name, tf.reduce_max(var))
tf.summary.scalar("min_" + var.name, tf.reduce_min(var))
tf.summary.histogram("histogram_" + var.name, var)
| [
"heinrichl@janelia.hhmi.org"
] | heinrichl@janelia.hhmi.org |
73551d725e67569eda29757bfcb488d0f9880e01 | a74418a8adacfdf67ecf172310d3c13905466f08 | /pg/tests/test_pg.py | 087ad38046799ae1b1b18a481cd1e28922484e7a | [
"BSD-3-Clause"
] | permissive | eduardolujan/sqlalchemist | ac98cf67c78d367df2f3627f068500b4bc908431 | 7d0d04051ad11fe046fbb6295d9ecec72912d3a4 | refs/heads/master | 2022-07-19T13:53:45.326090 | 2020-05-22T11:12:24 | 2020-05-22T11:12:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | import pytest
import psycopg2
from ..config import get_config
from ..models import Session
@pytest.fixture
def config():
return get_config()
def test_configuration(config):
assert "database" in config
assert "conn_str" in config.database
def test_connection(config):
config = get_config()
psycopg2.connect(config.database.conn_str)
| [
"pmav99@gmail.com"
] | pmav99@gmail.com |
96ef7908b5993d5104612be601d4c170f6ea3135 | a234ecbf8a71075ba90c84f19443d2235c0f4234 | /test_numpy/test_sklearn/lightgbm/lightgbm_cv.py | 4b9befec8d5a724d9b9d2a1dffd05f1b79465ee7 | [] | no_license | hkxIron/hkx_tf_practice | b9b58f7c52b07d4f10709804efc964cf17e5e3ff | 76d0c12750d66b17e71a7102263e1d1fc637611a | refs/heads/master | 2023-08-04T16:51:30.852650 | 2023-08-04T13:05:35 | 2023-08-04T13:05:35 | 118,223,247 | 10 | 13 | null | 2022-12-07T23:32:58 | 2018-01-20T08:14:06 | Jupyter Notebook | UTF-8 | Python | false | false | 5,940 | py | import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold, KFold, cross_val_predict, cross_validate
from sklearn.metrics import roc_auc_score
from lightgbm import LGBMClassifier
import os
import socket
class LGBMClassifierCV(object):
"""cross_val_predict"""
def __init__(self, params=None, cv=5, random_state=None, n_repeats=None):
self.clf = LGBMClassifier()
if params:
self.clf.set_params(**params)
if n_repeats:
self._kf = RepeatedStratifiedKFold(cv, True, random_state) # 复制N次
self._num_preds = cv * n_repeats
else:
self._kf = StratifiedKFold(cv, True, random_state)
self._num_preds = cv
def fit(self, X, y, X_test=None,
feval=roc_auc_score,
sample_weight=None,
init_score=None,
eval_metric='auc',
early_stopping_rounds=100,
verbose=100,
feature_name='auto',
categorical_feature='auto',
callbacks=None):
"""输入数组"""
if X_test is None:
X_test = X[:1] # 将第一行作为test集
self.oof_train = np.zeros(len(X))
self.oof_test = np.zeros((len(X_test), self._num_preds)) # num_preds:有多少折
for n_fold, (train_index, valid_index) in enumerate(self._kf.split(X, y)):
if verbose:
print("\033[94mFold %s started at %s\033[0m" % (n_fold + 1, time.ctime()))
X_train, y_train = X[train_index], y[train_index]
X_valid, y_valid = X[valid_index], y[valid_index]
eval_set = [(X_train, y_train), (X_valid, y_valid)] # 需要同时验证两个集合
########################################################################
self.clf.fit(X_train,
y_train,
sample_weight,
init_score,
eval_set,
eval_names=('Train', 'Valid'),
eval_sample_weight=None,
eval_class_weight=None,
eval_init_score=None,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
self.oof_train[valid_index] = self.clf.predict_proba(X_valid)[:, 1]
self.oof_test[:, n_fold] = self.clf.predict_proba(X_test)[:, 1]
########################################################################
# 输出 测试集 out-of-fold
self.oof_test_rank = (pd.DataFrame(self.oof_test).rank().mean(axis=1)/len(self.oof_test)).values
self.oof_test = self.oof_test.mean(axis=1) # 测试集的oof score算平均
assert len(X) == len(self.oof_train)
assert len(X_test) == len(self.oof_test)
# 计算 训练集 oof 得分(out_of_fold)
if feval:
self.oof_train_score = feval(y, self.oof_train)
print(f"\n\033[94mtrain CV Score: {self.oof_train_score} ended at {time.ctime()}\033[0m")
return self.oof_train_score
def oof_submit(self, ids, pred_ranking=False, file=None, preds=None):
"""preds分用于submit"""
if file is None:
file = f'submit_{self.oof_train_score}.csv'
print(f'Save {file} ...')
if preds is None:
preds = self.oof_test if pred_ranking else self.oof_test_rank
if not isinstance(ids, pd.DataFrame):
ids = pd.DataFrame(ids)
ids.assign(preds=preds).to_csv(file, index=False, header=False)
@property
def oof_train_and_test(self):
return np.r_[self.oof_train, self.oof_test]
def oof_save(self, file='./oof_train_and_test.csv'):
pd.DataFrame(self.oof_train_and_test, columns=['oof_train_and_test']).to_csv(file, index=False)
def plot_feature_importances(self, feature_names=None, topk=20, figsize=(10, 6), pic_name=None):
columns = ['Importances', 'Features']
importances = self.clf.feature_importances_.tolist()
if feature_names is None:
feature_names = list(map(lambda x: f'F_{x}', range(len(importances))))
_ = list(zip(importances, feature_names))
df = pd.DataFrame(_, columns=columns).sort_values('Importances', 0, False)
plt.figure(figsize=figsize)
sns.barplot(*columns, data=df[:topk])
plt.title('Features Importances\n')
plt.tight_layout()
if pic_name is None:
plt.savefig(f'importances_{self.oof_train_score}.png')
if __name__ == "__main__":
from sklearn.datasets import make_classification
X, y = make_classification()
X_test, _ = make_classification()
clf = LGBMClassifierCV()
clf.fit(X, y, X_test)
clf.plot_feature_importances()
"""
一组lightgbmcv参数:
params = {
'class_weight':'balanced',
'metric': 'auc',
'boosting_type': 'gbdt',
'objective': 'binary',
'max_depth': -1,
'num_leaves': 16,
'learning_rate': 0.005,
'min_split_gain': 0.884,
'min_child_weight': 0.01,
'min_child_samples': 31,
'subsample': 0.788,
'subsample_freq': 8,
'colsample_bytree': 0.617,
'reg_alpha': 0.631,
'reg_lambda': 0.81,
'scale_pos_weight': 1,
'random_state': 666,
'verbosity': -1,
'n_jobs': -1,
'n_estimators': 30000} # 300分数好像很高
oof8 = LGBMClassifierCV(params, 8, 999)
oof8.fit(X, y, X_test, early_stopping_rounds=300)
""" | [
"hukexin0000@126.com"
] | hukexin0000@126.com |
fa4a6b6fafa904dd6b8f73b059e98f06cc017a3d | cc2029f40a12e82712072275fc76a07ac59b5940 | /levelup/practice/python/introduction/05_python_division.py | 2a18c52f2b3f31ea8c46490d14b69dcfef6dc1e2 | [
"MIT"
] | permissive | heitorchang/learn-code | d3fb8e45d539d302372126fe28e85032590b5707 | 5e6e56f7257de1910830619c01d470e892d7f9d8 | refs/heads/master | 2023-08-09T13:46:18.623772 | 2023-07-21T16:57:11 | 2023-07-21T16:57:11 | 147,522,837 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | def solution(a, b):
print(a // b)
print(a / b)
if __name__ == "__main__":
a = int(input())
b = int(input())
solution(a, b)
| [
"heitorchang@gmail.com"
] | heitorchang@gmail.com |
56bfe735a21870f3ff4b0bfa624cdd7cda45126a | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/contrib/grid_rnn/python/ops/grid_rnn_cell.pyi | 67a5259cffa6ae62b64e32131f4ff72733496071 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | pyi | # Stubs for tensorflow.contrib.grid_rnn.python.ops.grid_rnn_cell (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from collections import namedtuple as namedtuple
from tensorflow.contrib import layers as layers, rnn as rnn
from tensorflow.python.ops import array_ops as array_ops, math_ops as math_ops, nn as nn
from typing import Any as Any, Optional as Optional
class GridRNNCell(rnn.RNNCell):
def __init__(self, num_units: Any, num_dims: int = ..., input_dims: Optional[Any] = ..., output_dims: Optional[Any] = ..., priority_dims: Optional[Any] = ..., non_recurrent_dims: Optional[Any] = ..., tied: bool = ..., cell_fn: Optional[Any] = ..., non_recurrent_fn: Optional[Any] = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
@property
def output_size(self): ...
@property
def state_size(self): ...
def __call__(self, inputs: Any, state: Any, scope: Optional[Any] = ...): ...
class Grid1BasicRNNCell(GridRNNCell):
def __init__(self, num_units: Any, state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid2BasicRNNCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid1BasicLSTMCell(GridRNNCell):
def __init__(self, num_units: Any, forget_bias: int = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid2BasicLSTMCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., forget_bias: int = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid1LSTMCell(GridRNNCell):
def __init__(self, num_units: Any, use_peepholes: bool = ..., forget_bias: float = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid2LSTMCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., use_peepholes: bool = ..., forget_bias: float = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid3LSTMCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., use_peepholes: bool = ..., forget_bias: float = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid2GRUCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
_GridRNNDimension = namedtuple('_GridRNNDimension', ['idx', 'is_input', 'is_output', 'is_priority', 'non_recurrent_fn'])
_GridRNNConfig = namedtuple('_GridRNNConfig', ['num_dims', 'dims', 'inputs', 'outputs', 'recurrents', 'priority', 'non_priority', 'tied', 'num_units'])
| [
"matangover@gmail.com"
] | matangover@gmail.com |
8ce9a4c443487d77f258782e07d82452b044ae5a | 3fc029440f5d6c2ae2e9f2402ce8d92906d4a789 | /betomax_shop/celery.py | ea647618bddb84d4ab230bdc41232a6285a011e6 | [] | no_license | NickVazovsky/betomax-app | 8d832986202165483d222de9fe53f88bc4476c8e | 4a99a990f5e162948592e18bb595aa6b05375c80 | refs/heads/master | 2020-05-22T08:22:25.083415 | 2019-05-12T16:02:25 | 2019-05-12T16:02:25 | 186,273,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'betomax_shop.settings')
app = Celery('betomax_shop')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| [
"nvazovsky@gmail.com"
] | nvazovsky@gmail.com |
c42df29bc0afa54406c074c4736e6481149e8f18 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/19496b5b7395085ac068dfd071ec6773b471d1fa-<main>-fix.py | ba56216710ec092e27b73c3fe73ccd79f93bfc99 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,521 | py | def main():
module = AnsibleModule(argument_spec=dict(name=dict(required=True, type='str', aliases=['unit', 'service']), state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'), enabled=dict(type='bool'), masked=dict(type='bool'), daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']), user=dict(type='bool', default=False), no_block=dict(type='bool', default=False)), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']])
systemctl = module.get_bin_path('systemctl')
if module.params['user']:
systemctl = (systemctl + ' --user')
if module.params['no_block']:
systemctl = (systemctl + ' --no-block')
unit = module.params['name']
rc = 0
out = err = ''
result = {
'name': unit,
'changed': False,
'status': {
},
'warnings': [],
}
if module.params['daemon_reload']:
(rc, out, err) = module.run_command(('%s daemon-reload' % systemctl))
if (rc != 0):
module.fail_json(msg=('failure %d during daemon-reload: %s' % (rc, err)))
found = False
is_initd = sysv_exists(unit)
is_systemd = False
(rc, out, err) = module.run_command(("%s show '%s'" % (systemctl, unit)))
if (rc == 0):
multival = []
if out:
k = None
for line in to_native(out).split('\n'):
if line.strip():
if (k is None):
if ('=' in line):
(k, v) = line.split('=', 1)
if v.lstrip().startswith('{'):
if (not v.rstrip().endswith('}')):
multival.append(line)
continue
result['status'][k] = v.strip()
k = None
elif line.rstrip().endswith('}'):
result['status'][k] = '\n'.join(multival).strip()
multival = []
k = None
else:
multival.append(line)
is_systemd = (('LoadState' in result['status']) and (result['status']['LoadState'] != 'not-found'))
if (is_systemd and ('LoadError' in result['status'])):
module.fail_json(msg=("Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])))
found = (is_systemd or is_initd)
if (is_initd and (not is_systemd)):
result['warnings'].append(('The service (%s) is actually an init script but the system is managed by systemd' % unit))
if (module.params['masked'] is not None):
masked = (('LoadState' in result['status']) and (result['status']['LoadState'] == 'masked'))
if (masked != module.params['masked']):
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if (not module.check_mode):
(rc, out, err) = module.run_command(("%s %s '%s'" % (systemctl, action, unit)))
if (rc != 0):
fail_if_missing(module, found, unit, msg='host')
if (module.params['enabled'] is not None):
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
enabled = False
(rc, out, err) = module.run_command(("%s is-enabled '%s'" % (systemctl, unit)))
if (rc == 0):
enabled = True
elif (rc == 1):
if ((not module.params['user']) and is_initd and ((not out.strip().endswith('disabled')) or sysv_is_enabled(unit))):
enabled = True
result['enabled'] = enabled
if (enabled != module.params['enabled']):
result['changed'] = True
if (not module.check_mode):
(rc, out, err) = module.run_command(("%s %s '%s'" % (systemctl, action, unit)))
if (rc != 0):
module.fail_json(msg=('Unable to %s service %s: %s' % (action, unit, (out + err))))
result['enabled'] = (not enabled)
if (module.params['state'] is not None):
fail_if_missing(module, found, unit, msg='host')
result['state'] = module.params['state']
if ('ActiveState' in result['status']):
action = None
if (module.params['state'] == 'started'):
if (not is_running_service(result['status'])):
action = 'start'
elif (module.params['state'] == 'stopped'):
if is_running_service(result['status']):
action = 'stop'
else:
if (not is_running_service(result['status'])):
action = 'start'
else:
action = module.params['state'][:(- 2)]
result['state'] = 'started'
if action:
result['changed'] = True
if (not module.check_mode):
(rc, out, err) = module.run_command(("%s %s '%s'" % (systemctl, action, unit)))
if (rc != 0):
module.fail_json(msg=('Unable to %s service %s: %s' % (action, unit, err)))
else:
module.fail_json(msg='Service is in unknown state', status=result['status'])
module.exit_json(**result) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
e882ef3aee4b0683d0a470824ba429a2bde7b12e | 39206c42c70818066839a6a6edbd63057a9636cf | /tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple_test.py | 59c22ea10e876ac11d2589725371affb12020288 | [
"Apache-2.0"
] | permissive | azrin-dev/tfx | 8e55d6ddad5490f39eacbbef85eb56ea71c78954 | 88d999b1ab767940aef96805e29bc7784652e8f0 | refs/heads/master | 2020-08-30T09:30:52.528197 | 2019-10-29T15:46:51 | 2019-10-29T15:47:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.examples.chicago_taxi_pipeline.taxi_pipeline_simple."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from airflow import models
import tensorflow as tf
from tfx.examples.chicago_taxi_pipeline import taxi_pipeline_simple
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
class TaxiPipelineSimpleTest(tf.test.TestCase):
def setUp(self):
super(TaxiPipelineSimpleTest, self).setUp()
self._test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
def testTaxiPipelineCheckDagConstruction(self):
airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
logical_pipeline = taxi_pipeline_simple._create_pipeline(
pipeline_name='Test',
pipeline_root=self._test_dir,
data_root=self._test_dir,
module_file=self._test_dir,
serving_model_dir=self._test_dir,
metadata_path=self._test_dir,
direct_num_workers=1)
self.assertEqual(9, len(logical_pipeline.components))
pipeline = AirflowDagRunner(airflow_config).run(logical_pipeline)
self.assertIsInstance(pipeline, models.DAG)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
5a6fda791aaf455984c75d0b4a48e62137ce6558 | d8fe3b5243bec2b61fd7907c4ff799b24bb617e5 | /Bloomberg_codecon/2015_Finals/conference_room_scheduler.py | 4bb1db33982bbc78e65bad6fc5afe88eb073136e | [
"Unlicense"
] | permissive | SelvorWhim/competitive | b89ed252512d88d9346d168dc6b48e0a42a6142d | 1c73a5c7b2d0dc1b6c4f3f06ace69cdf5c6a34c0 | refs/heads/master | 2023-04-13T01:02:52.083519 | 2023-04-11T10:14:38 | 2023-04-11T10:14:38 | 96,573,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | ### INSTRUCTIONS ###
'''
Bloomberg needs a new system to schedule conference rooms! To keep things simple, the system operates in 15 minutes blocks, and only during an 8 hour workday, so that there are 32 available time slots per day for each room.
Users will submit one of two different commands: Create Booking, and Query Available.
When a user attempts to create a booking, they will submit a Room Id, a starting Timeslot, and a length (in timeslots). Each conference room can only be occupied by one user, and is booked in increments of timeslots (a minimum booking is length 1, maximum is length 32). Any user can book any room for as many slots as possible, so long as their booking does not interfere with an already occupied room. If the booking overlaps with any other bookings for that conference room (even if it's only for one slot of many), the entire booking command is rejected (i.e., the room schedule remains unchanged).
A user can also query availability to ask which rooms are available during a certain time block (a starting timeslot + a length, in timeslots). The system should report to the user which rooms are available for the entire length of their requested time. If a room is unavailable for any amount of time during the requested window, it is not returned from the Query.
> Input Specifications
Input will be the number of rooms N on the first line (1<=N<=100), followed by any number of the following request types:
Booking: RoomId-TimeSlot-#OfSlots
Query: TimeSlot-#OfSlots
You can assume that no more than 100 requests will be made. Also, RoomIds and TimeSlots are indexed starting at 1, not 0.
> Output Specifications
Output as many lines as necessary to answer each request in the order they were received.
Booking: You will output Y if the booking is possible and N otherwise.
Query: You will output a list of space-delimited room ids in order. There should be no trailing or preceding spaces. If there are no rooms available that match the Query, print None
'''
### MY SOLUTION (accepted) ###
#Problem : Finals Spring 2015 - Conference Room Scheduler
#Language : Python 3
#Compiled Using : py_compile
#Version : Python 3.4.3
#Input for your program will be provided from STDIN
#Print out all output from your program to STDOUT
import sys
# room and slot numbers indexed from 1
def areSlotsEmpty(mat,room,slot1,slots):
if slot1+slots-1 > 32:
return False
return sum(mat[room-1][slot1-1:slots+slot1-1])==0 # all slots have 0 or 1, if sum is 0 all are 0
data = sys.stdin.read().splitlines()
N=int(data[0])
queries = [[int(n) for n in line.split('-')] for line in data[1:]]
slot_mat = [[0 for x in range(32)] for y in range(N)] # table of rooms and slots. Will be 1 for taken - no identity need be saved
for q in queries:
if len(q) == 3: # Booking
if(areSlotsEmpty(slot_mat,q[0],q[1],q[2])):
for i in range(q[1]-1,q[2]+q[1]-1):
slot_mat[q[0]-1][i] = 1
print('Y')
else:
print('N')
else: # assumed len(q)==2 -> Query
free_rooms=''
for room in range(1,N+1):
if(areSlotsEmpty(slot_mat,room,q[0],q[1])):
free_rooms += str(room) + ' '
if free_rooms=='':
print('None')
else:
print(free_rooms.rstrip()) | [
"Carmeverre@gmail.com"
] | Carmeverre@gmail.com |
0b0a70e9f2d420767ef23ff86c91c8e79597d405 | 3ec9d3aa7e59475683dba30a87ca68242a7ec181 | /cn/study/days100/days09/10扑克游戏.py | 04ac207e67cdf30f300135ee8f51d0247c5e8d5b | [
"Apache-2.0"
] | permissive | Jasonandy/Python-X | 58bf36499572cdfb7d7bf80c6a3cd0c818f62c1e | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | refs/heads/master | 2021-06-16T17:07:29.277404 | 2021-03-07T14:17:05 | 2021-03-07T14:17:05 | 175,353,402 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | """
扑克游戏
"""
import random
class Card(object):
"""一张牌"""
def __init__(self, suite, face):
self._suite = suite
self._face = face
@property
def face(self):
return self._face
@property
def suite(self):
return self._suite
# 类似于重写str方法?
def __str__(self):
if self._face == 1:
face_str = 'A'
elif self._face == 11:
face_str = 'J'
elif self._face == 12:
face_str = 'Q'
elif self._face == 13:
face_str = 'K'
else:
face_str = str(self._face)
# 字符串的替换 if 1 = A if 11 = J if 12 = Q if 13 = K
return '%s%s' % (self._suite, face_str)
def __repr__(self):
return self.__str__()
class Poker(object):
"""一副牌"""
def __init__(self):
# 洗牌随机
self._cards = [Card(suite, face)
# 花色四种 + 随机的数字 1 - 13 A-K
for suite in '♠♥♣♦'
for face in range(1, 14)]
self._current = 0
@property
def cards(self):
return self._cards
def shuffle(self):
"""洗牌(随机乱序)"""
self._current = 0
# shuffle 随机洗牌 方法将序列的所有元素随机排序
random.shuffle(self._cards)
@property
def next(self):
"""发牌"""
# 从list里面取出数据
card = self._cards[self._current]
self._current += 1
return card
@property
def has_next(self):
"""还有没有牌 判断下面的游标 指向还有无数据 """
return self._current < len(self._cards)
class Player(object):
"""玩家"""
def __init__(self, name):
self._name = name
self._cards_on_hand = []
@property
def name(self):
return self._name
@property
def cards_on_hand(self):
# 第一手牌
return self._cards_on_hand
def get(self, card):
"""摸牌"""
self._cards_on_hand.append(card)
def arrange(self, card_key):
"""玩家整理手上的牌"""
self._cards_on_hand.sort(key=card_key)
# 排序规则-先根据花色再根据点数排序
def get_key(card):
return (card.suite, card.face)
def main():
p = Poker()
p.shuffle()
players = [Player('东邪'), Player('西毒'), Player('南帝'), Player('北丐')]
for _ in range(13):
for player in players:
player.get(p.next)
for player in players:
print(player.name + ':', end=' ')
player.arrange(get_key)
print(player.cards_on_hand)
if __name__ == '__main__':
main()
| [
"jasonandy@hotmail.com"
] | jasonandy@hotmail.com |
3b3c4b37598cbee3b7cd1bdeb617baa477c1b5b0 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_285/ch63_2019_09_04_16_30_36_096314.py | de45a44ec20c2bdaed00b6cd7662603836b4dfb8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | def pos_arroba(email):
for i in len(email):
for e in range(len(email)):
if e == "@":
return i
| [
"you@example.com"
] | you@example.com |
34d0876905040aa34d7ff199773c19627d21f202 | 61b475c33745dbe11d88ea288cbdee279f89c610 | /src/izi/apps/analytics/models.py | 881f973e80cc48cc81f2c745f541d7868235c9ee | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | izi-ecommerce/izi-core | a092ea285d0dbd83d17427de3157a9f1e77d6c51 | 21176be2d41f0cf54ca954f294209c585f643dba | refs/heads/master | 2020-03-30T08:37:39.045514 | 2018-10-08T02:58:46 | 2018-10-08T02:58:46 | 151,029,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | from izi.apps.analytics.abstract_models import (
AbstractProductRecord, AbstractUserProductView,
AbstractUserRecord, AbstractUserSearch)
from izi.core.loading import is_model_registered
__all__ = []
if not is_model_registered('analytics', 'ProductRecord'):
class ProductRecord(AbstractProductRecord):
pass
__all__.append('ProductRecord')
if not is_model_registered('analytics', 'UserRecord'):
class UserRecord(AbstractUserRecord):
pass
__all__.append('UserRecord')
if not is_model_registered('analytics', 'UserProductView'):
class UserProductView(AbstractUserProductView):
pass
__all__.append('UserProductView')
if not is_model_registered('analytics', 'UserSearch'):
class UserSearch(AbstractUserSearch):
pass
__all__.append('UserSearch')
| [
"diepdt@izi.asia"
] | diepdt@izi.asia |
8b0779994b8a00dc87933b5788fc4e363116d694 | 67583749bab9e87fe2b890dd0aee2c09e8f4bbfb | /yandex-algorithm-training/dz5-b-e.py | 857af6911b1a88bdcb9e2b9def7ab77864cb8e69 | [] | no_license | allburov/codeforces | 41e6d0f5155a0ee635d864053aad996b5880aabe | e7455f9fc85e66988b2a195234efdec155c5f8a6 | refs/heads/master | 2023-04-13T07:28:00.920469 | 2023-04-06T11:53:01 | 2023-04-06T11:53:01 | 160,622,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | def read_input():
with open('threesum.in') as input_:
s = int(input_.readline())
_, *num1 = list(map(int, input_.readline().split()))
_, *num2 = list(map(int, input_.readline().split()))
_, *num3 = list(map(int, input_.readline().split()))
return s, num1, num2, num3
def findMiddle(numsA, numsB, S):
nA = len(numsA)
nB = len(numsB)
i, j = 0, nB - 1
answers = []
while i < nA and j >= 0:
sumN = numsA[i][0] + numsB[j][0]
if sumN == S:
answers.append((numsA[i][1], numsB[j][1]))
j-=1
elif sumN < S:
i += 1
else:
j -= 1
if not answers:
return None
answers.sort()
return answers[0]
# num, pos
def task(s, nums1, nums2, nums3):
minSum = min(nums2) + min(nums3)
maxSum = max(nums2) + max(nums3)
nums2 = [(num, i) for i, num in enumerate(nums2)]
nums2.sort()
nums3 = [(num, i) for i, num in enumerate(nums3)]
nums3.sort()
for i, num in enumerate(nums1):
find = s - num
if find > maxSum or find < minSum:
continue
res = findMiddle(nums2, nums3, S=s - num)
if res:
return i, res[0], res[1]
return [-1]
if __name__ == "__main__":
args = read_input()
res = task(*args)
print(" ".join(map(str, res)))
| [
"allburov@gmail.com"
] | allburov@gmail.com |
1386d20539b4efddf945c6345a59b476a6126afa | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/read_pyexcal_20201111164148.py | ea7974f00258d36f2ceeacd3dc9d4247825542bd | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | >>> from openpyxl import load_workbook
>>> wb = load_workbook(filename = 'empty_book.xlsx')
>>> sheet_ranges = wb['range names']
>>> print(sheet_ranges['D18'].value) | [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
334a1f105f8d090005013ca75b048975e5761708 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/list_resource_resp.py | a4d35a9e336cac44e1b8ff9b5883573a8a79921f | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,036 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListResourceResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_detail': 'object',
'resource_id': 'str',
'resource_name': 'str',
'tags': 'list[ResourceTagResp]'
}
attribute_map = {
'resource_detail': 'resource_detail',
'resource_id': 'resource_id',
'resource_name': 'resource_name',
'tags': 'tags'
}
def __init__(self, resource_detail=None, resource_id=None, resource_name=None, tags=None):
"""ListResourceResp
The model defined in huaweicloud sdk
:param resource_detail: 资源详情。 资源对象,用于扩展。默认为空
:type resource_detail: object
:param resource_id: 资源ID
:type resource_id: str
:param resource_name: 资源名称,没有默认为空字符串
:type resource_name: str
:param tags: 标签列表,没有标签默认为空数组
:type tags: list[:class:`huaweicloudsdkeip.v2.ResourceTagResp`]
"""
self._resource_detail = None
self._resource_id = None
self._resource_name = None
self._tags = None
self.discriminator = None
if resource_detail is not None:
self.resource_detail = resource_detail
if resource_id is not None:
self.resource_id = resource_id
if resource_name is not None:
self.resource_name = resource_name
if tags is not None:
self.tags = tags
@property
def resource_detail(self):
"""Gets the resource_detail of this ListResourceResp.
资源详情。 资源对象,用于扩展。默认为空
:return: The resource_detail of this ListResourceResp.
:rtype: object
"""
return self._resource_detail
@resource_detail.setter
def resource_detail(self, resource_detail):
"""Sets the resource_detail of this ListResourceResp.
资源详情。 资源对象,用于扩展。默认为空
:param resource_detail: The resource_detail of this ListResourceResp.
:type resource_detail: object
"""
self._resource_detail = resource_detail
@property
def resource_id(self):
"""Gets the resource_id of this ListResourceResp.
资源ID
:return: The resource_id of this ListResourceResp.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this ListResourceResp.
资源ID
:param resource_id: The resource_id of this ListResourceResp.
:type resource_id: str
"""
self._resource_id = resource_id
@property
def resource_name(self):
"""Gets the resource_name of this ListResourceResp.
资源名称,没有默认为空字符串
:return: The resource_name of this ListResourceResp.
:rtype: str
"""
return self._resource_name
@resource_name.setter
def resource_name(self, resource_name):
"""Sets the resource_name of this ListResourceResp.
资源名称,没有默认为空字符串
:param resource_name: The resource_name of this ListResourceResp.
:type resource_name: str
"""
self._resource_name = resource_name
@property
def tags(self):
"""Gets the tags of this ListResourceResp.
标签列表,没有标签默认为空数组
:return: The tags of this ListResourceResp.
:rtype: list[:class:`huaweicloudsdkeip.v2.ResourceTagResp`]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListResourceResp.
标签列表,没有标签默认为空数组
:param tags: The tags of this ListResourceResp.
:type tags: list[:class:`huaweicloudsdkeip.v2.ResourceTagResp`]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListResourceResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
4ee899fc7c2d5bbf7189cb564d06e4bb4594a523 | 41898ee4cc597a19540d64c333687610d5792168 | /Problems/1200. Minimum Absolute Difference.py | 889e116531cce0b35ad923e6d519e77d91b0f6d4 | [] | no_license | aidardarmesh/leetcode | 82c4e09a85dc5b6cf05bceb089b57b3a81e2406e | 4509f4b2b83e172e6ccc21ff89fc1204e0c6b3f3 | refs/heads/master | 2021-07-06T15:56:04.244369 | 2020-11-15T20:47:16 | 2020-11-15T20:47:16 | 205,086,346 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | from typing import *
class Solution:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
min_delta = 9999999
result = []
n = len(arr)
arr = sorted(arr)
for i in range(0, n-1):
delta = arr[i+1] - arr[i]
if delta < min_delta:
result = []
min_delta = delta
if delta == min_delta:
result.append([arr[i], arr[i+1]])
return result
s = Solution()
assert s.minimumAbsDifference([4,2,1,3]) == [[1,2],[2,3],[3,4]]
assert s.minimumAbsDifference([1,3,6,10,15]) == [[1,3]]
assert s.minimumAbsDifference([3,8,-10,23,19,-4,-14,27]) == [[-14,-10],[19,23],[23,27]] | [
"darmesh.aidar@gmail.com"
] | darmesh.aidar@gmail.com |
49367f0e5d32687bdb5d111a90ac1d482dacc060 | 6121da376efe804fc8d9a5b33731c7c35f6d5fc0 | /python_basics/ex35.py | f0442bfb4afd630f3417b06b74b0aadd4bcdfe64 | [] | no_license | Gitus-Maximus/Skills | 4e67b5cdc19d695aef0ab1f768d9ab5c2a9591ac | 1ba6bd63de18afe2ca698430aaa4b5bd5434351b | refs/heads/main | 2023-04-30T18:35:31.654718 | 2021-05-22T11:56:06 | 2021-05-22T11:56:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | week = ["Poniedziałek", "Wtorek", "Środa", "Czwartek", "Piątek", "Sobota", "Niedziela"]
print("$".join (week[1:3])) #metoda .join sprawia, ze mozemy wyświetlić dowolony tekst pomiędzy argumentami z listy.
print("Pozbywam się:", week.pop(4))
print(week) | [
"kakulinski@wp.pl"
] | kakulinski@wp.pl |
a30bc5873f3cbe1ae76f71b47f868d176dd3823c | 998a965258a75e4a9d48805d7b2873afafae2f7d | /dpoll/polls/migrations/0012_auto_20190125_0654.py | 1458061d046401d3c9263a40b9998a90ad538f70 | [
"MIT"
] | permissive | emre/dpoll.xyz | af64cb7d933c579d9cb8720e456fa3d3b7ae8d5e | 15927cb82bc525f99068a0ab92c14087e88f7950 | refs/heads/master | 2022-12-12T18:35:56.351060 | 2022-04-24T22:11:36 | 2022-04-24T22:11:36 | 149,586,879 | 23 | 9 | MIT | 2021-06-10T20:48:55 | 2018-09-20T09:42:45 | Python | UTF-8 | Python | false | false | 449 | py | # Generated by Django 2.1.1 on 2019-01-25 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0011_auto_20190125_0649'),
]
operations = [
migrations.AlterField(
model_name='promotiontransaction',
name='from_user',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
]
| [
"mail@emreyilmaz.me"
] | mail@emreyilmaz.me |
502af0bed2bbfa2a50d2e1de9e7666692457d803 | 4d99350a527a88110b7bdc7d6766fc32cf66f211 | /OpenGLCffi/EGL/EXT/NOK/swap_region.py | b2fd2f88da1733562175c045d1e5ec660dfd4f1c | [
"MIT"
] | permissive | cydenix/OpenGLCffi | e790ef67c2f6c9877badd5c38b7d58961c8739cd | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | refs/heads/master | 2021-01-11T07:31:10.591188 | 2017-04-17T11:04:55 | 2017-04-17T11:04:55 | 80,312,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from OpenGLCffi.EGL import params
@params(api='egl', prms=['dpy', 'surface', 'numRects', 'rects'])
def eglSwapBuffersRegionNOK(dpy, surface, numRects, rects):
pass
| [
"cdenizol@gmail.com"
] | cdenizol@gmail.com |
615957840ee474ac9841f70c54cafc727ca1a5c4 | 0e4df81fb59129d23ccc820b11117ba78a372099 | /inheritance.py | e263c9de3b6611c8d780899f66d55edec7963e15 | [] | no_license | crishonsou/hackerrank_30_days_of_code | ffdb51163a4e14d2b8438a8e01183f31b4d9a138 | aa267d82915dd7d3cfb6f5cbfb52b86497044b84 | refs/heads/main | 2022-12-22T18:06:51.595103 | 2020-10-06T15:07:15 | 2020-10-06T15:07:15 | 301,764,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | ## Inheritance
class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print('Name:', self.lastName + ',', self.firstName)
print('ID:', self.idNumber)
class Student(Person):
def __init__(self, firstName, lastName, idNumber, scores):
Person.__init__(self, firstName, lastName, idNumber)
self.testScores = scores
def calculate(self):
average = 0
for i in self.testScores:
average += i
average = average / len(self.testScores)
if(average >= 90):
return 'O' # Outstanding
elif(average >= 80):
return 'E' # Exceeds Expectations
elif(average >= 70):
return 'A' # Acceptable
elif(average >= 55):
return 'P' # Poor
elif(average >= 40):
return 'D' # Dreadful
else:
return 'T' # Troll
line = input().split()
firstName = line[0]
lastName = line[1]
idNum = line[2]
numScores = int(input())
scores = list(map(int, input().split()))
s = Student(firstName, lastName, idNum, scores)
s.printPerson()
print('Grade:', s.calculate())
| [
"noreply@github.com"
] | crishonsou.noreply@github.com |
9b5706ba5278a195707b47a5d60c00805fc6e7e5 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/valid_20200616204038.py | 910525208285c836bf6cb5ec61a6cd7e8d587d35 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Ipv4 --> 4 decimal numbers,between 0 to 255
# leading zero's is invalid
# check whethere its a digit between 0 to 255
def valid(str):
address = str.split(".")
numbers = range(0,256)
for a in address:
if int(a) in numbers:
if len(a) == 2 and a[0] == "0":
return False
else:
print(address)
valid("172.16.254.02") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
08087af6a1ab27fa92e29b9cc8a336473af168f4 | 8ccc03e848cd180186fec52179740a6007875c32 | /Control/main.py | 239ad03da3601da79bf3dd294a73c23e148fc0f1 | [] | no_license | ManiacalLabs/Laz-A-Sketch | 013166f06ccd63a7237608eec83dbc0a789ebe0f | 2901aecc09b1e4f34e982b59016269ed69950e1d | refs/heads/master | 2020-07-05T09:15:23.999032 | 2019-09-21T18:31:14 | 2019-09-21T18:31:14 | 202,604,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,869 | py | from com import Encoder
from grbl import grbl
from time import sleep
# constrain to specific range
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
# round to a specific increment, such as 0.25
def inc_round(v, inc):
return round(v/inc)*inc
class Control(object):
def __init__(self, g, enc, spm, inc, power, speed, init, size):
self.grbl = g
self.enc = enc
self.spm = spm
self.inc = inc
self.power = power
self.speed = speed
self.init = init
self._x, self._y = 0,0 # internal use "high res" values
self.x, self.y = 0,0 # values constrained to specific increments
self.grbl.unlock()
self.cfg = self.grbl.get_config()
# self.max_x, self.max_y = 130,130
if size:
self.max_x, self.max_y = size[0], size[1]
else:
self.max_x = self.cfg['$130']
self.max_y = self.cfg['$131']
self.grbl.send(self.init)
self.set_speed(self.speed)
self.set_power(self.power)
def home(self):
print('Homing...')
self.x, self.y, _ = self.grbl.home()
self._x, self._y = self.x, self.y
print('Homing complete')
def set_speed(self, speed):
self.speed = speed
self.grbl.send('F{}'.format(self.speed))
def set_power(self, power):
self.power = power
self.grbl.send('S{}'.format(1000*self.power))
def check(self):
# store previous values
lx, ly = self.x, self.y
# read encoder deltas
dx, dy = self.enc.read()
# update and constrain internal values
self._x += (dx / self.spm)
self._y += (dy / self.spm)
self._x = clamp(self._x, 0, self.max_x)
self._y = clamp(self._y, 0, self.max_y)
# round to configured increment
self.x = inc_round(self._x, self.inc)
self.y = inc_round(self._y, self.inc)
return (self.x != lx or self.y != ly)
def move(self):
cmd = 'G1 X{0:.3f} Y{1:.3f}'.format(self.x, self.y)
self.grbl.send(cmd)
MACHINE_CFG = {
"size": None, # X,Y dimensions in mm, None to autodetect
"spm": 100, # encoder steps per mm movement
"inc": 0.01, # constrain move values to this increment
"power": 0.05, # default power level (0.0 - 1.0)
"speed": 5000, # default movement speed,
"init": "G90 G21 G54 M4" # startup gcode
}
def main():
g = grbl()
enc = Encoder()
con = Control(g, enc, **MACHINE_CFG)
con.home()
while True:
if con.check():
con.move()
# print('{0:.2f},{1:.2f}'.format(con.x, con.y))
# sleep(0.05)
if __name__ == '__main__':
main() | [
"adammhaile@gmail.com"
] | adammhaile@gmail.com |
7455209477479b379ca339fc7009bf72424ba4ab | 52c8f780d1b2d6086b0c9e70e4ddfbcba8a8d97a | /sam-app/tests/unit/test_acc_FanManager_with_driver.py | e8aa91dad46f301dcac03dc84b2b99e9da9cc77b | [] | no_license | stevezieglerva/lutils | 75089dec093e3b0377fe6e333844daa6a923acbd | d28fb8269e9dbc10a01b48761b4c706145d45cd1 | refs/heads/master | 2023-07-27T18:17:08.026448 | 2021-09-17T15:33:50 | 2021-09-17T15:33:50 | 276,093,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,593 | py | import inspect
import json
import os
import sys
import boto3
from moto import mock_dynamodb2
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir) + "/common_layer_hex/python"
sys.path.insert(0, parentdir)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
print("Updated path:")
print(json.dumps(sys.path, indent=3))
import unittest
from unittest import mock
from common_layer_hex.python.domain.FanManager import *
from FanManagerTestDriver import *
from common_layer_hex.python.infrastructure.repository.InMemoryRepository import (
InMemoryRepository,
)
from common_layer_hex.python.infrastructure.notifications.TestNotifier import (
TestNotifier,
)
from common_layer_hex.python.domain.ProcessDTO import *
from common_layer_hex.python.domain.TaskDTO import *
from common_layer_hex.python.infrastructure.repository.DynamoDB import DynamoDB
from common_layer_hex.python.domain.FanEventDTO import FanEventDTO
class FanManagerDriverUnitTests(unittest.TestCase):
@mock_dynamodb2
def test_start_process__given_valid_inputs__then_process_and_task_in_repo(self):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
# Act
results = driver.when_start_process("fan manager test", tasks)
# Assert
driver.then_process_in_repo(results.updated_process.process_id)
driver.then_count_of_tasks_in_status(results.updated_process, "fan_out", 2)
driver.then_tasks_linked_to_process(
results.updated_process, results.updated_tasks[0]
)
driver.then_event_created_for(
results.event_notifications[0], EVENT_PROCESS_STARTED
)
self.assertEqual(results.updated_process.information, "special info")
@mock_dynamodb2
def test_fan_out__given_newly_created_tasks__then_tasks_status_changed_and_notifications_sent(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
process = results.updated_process
# Act
results = driver.when_fan_out(results.updated_tasks)
print(f"\n\n{results}")
# Assert
driver.then_count_of_tasks_in_status(process, "created", 2)
driver.then_event_created_for(
results.event_notifications[0], EVENT_TASK_CREATED
)
@mock_dynamodb2
def test_complete_task__given_some_tasks_open__then_tasks_status_changed_and_process_progress_set(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
results = driver.when_fan_out(results.updated_tasks)
second_updated_task = results.updated_tasks[1]
# Act
results = driver.when_complete_task(second_updated_task)
print(results)
# Assert
driver.then_count_of_tasks_in_status(
results.updated_process,
"completed",
1,
)
driver.then_progress_is(results.updated_process.progress, 0.5)
driver.then_event_created_for(
results.event_notifications[0], EVENT_TASK_COMPLETED
)
self.assertEqual(results.updated_process.information, "special info")
@mock_dynamodb2
def test_complete_task__given_all_tasks_completed__then_tasks_status_changed_and_process_progress_set(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
process = results.updated_process
saved_tasks = results.updated_tasks
results = driver.when_fan_out(saved_tasks)
# Act
results = driver.when_complete_task(saved_tasks[0])
results = driver.when_complete_task(saved_tasks[1])
print(results)
# Assert
driver.then_count_of_tasks_in_status(
results.updated_process,
"completed",
2,
)
driver.then_progress_is(results.updated_process.progress, 1)
driver.then_event_created_for(
results.event_notifications[0], EVENT_TASK_COMPLETED
)
@mock_dynamodb2
def test_complete_process__given_all_tasks_completed__then_tasks_status_changed_and_process_progress_set(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
saved_tasks = results.updated_tasks
results = driver.when_fan_out(saved_tasks)
results = driver.when_complete_task(saved_tasks[0])
results = driver.when_complete_task(saved_tasks[1])
# Act
results = driver.when_complete_process_if_needed(results.updated_process)
# Assert
driver.then_count_of_tasks_in_status(
results.updated_process,
"completed",
2,
)
driver.then_progress_is(results.updated_process.progress, 1)
driver.then_event_created_for(
results.event_notifications[0], EVENT_PROCESS_COMPLETED
)
@mock_dynamodb2
def test_complete_proceess__given_some_tasks_open__then_progress_not_1_and_process_complete_event_not_sent(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
process = results.updated_process
saved_tasks = results.updated_tasks
results = driver.when_fan_out(saved_tasks)
results = driver.when_complete_task(saved_tasks[0])
results = driver.when_complete_task(saved_tasks[1])
# Act
results = driver.when_complete_process_if_needed(process)
# Assert
driver.then_count_of_tasks_in_status(
results.updated_process,
"completed",
2,
)
driver.then_progress_is(results.updated_process.progress, 1)
driver.then_event_created_for(
results.event_notifications[0], EVENT_PROCESS_COMPLETED
)
| [
"stephen.v.ziegler@gmail.com"
] | stephen.v.ziegler@gmail.com |
79d4842cf8368b7277beb304cd01456d2b9ee061 | f6c6e0ebc18b7b1a28c23367f62c960e86194c88 | /fileIO/hdf5/nexus_tools.py | 421d6569c4932aaca6baeea4dd1b1d2bf5521445 | [] | no_license | TheGrim1/python_work | 9316d6fbb71a4be9bd901f104e939949dfd91174 | 5b34277aed4c06b62276644160e0aa97a4260233 | refs/heads/master | 2021-01-11T13:54:54.366575 | 2019-03-12T12:38:39 | 2019-03-12T12:38:39 | 94,876,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,053 | py | import sys, os
import h5py
import numpy as np
from nexusformat.nexus import *
import datetime
# local import for testing:
sys.path.append(os.path.abspath("/data/id13/inhouse2/AJ/skript"))
from fileIO.hdf5.open_h5 import open_h5
import time
def createlink(f,dest,linkdir,linkname,soft=True):
"""
Args:
f (h5py.File|str): hdf5 file
dest (str): destination for the link
linkdir (str): link directory
linkname (str): link name
adapted from spectrocrunch (woutdenolf)
"""
bclose = False
if isinstance(f,h5py.File) or isinstance(f,h5py.Group):
hdf5FileObject = f
elif isinstance(f,str):
hdf5FileObject = h5py.File(f)
bclose = True
else:
raise ValueError("The hdf5 file must be either a string or an hdf5 file object.")
if dest in hdf5FileObject:
if soft:
# Remove the link if it exists
if linkdir in hdf5FileObject:
if linkname in hdf5FileObject[linkdir]:
hdf5FileObject[linkdir].id.unlink(linkname)
# Create the link
hdf5FileObject[linkdir][linkname] = h5py.SoftLink(dest)
else:
b = True
if linkdir in hdf5FileObject:
if linkname in hdf5FileObject[linkdir]:
hdf5FileObject[linkdir][linkname].path = f[dest]
b = False
if b:
hdf5FileObject[linkdir][linkname] = f[dest]
if bclose:
hdf5FileObject.close()
def timestamp(nx_f = None):
'''
timestamps the passed nexus file, returns 1 if succesfull, -1 else
'''
if type(nx_f) == h5py._hl.files.File or type(nx_f) == NXroot:
timestamp = "T".join(str(datetime.datetime.now()).split())
if 'file_time' in list(nx_f.attrs.keys()):
nx_f.attrs['file_update_time'] = timestamp
else:
nx_f.attrs['file_time'] = timestamp
test = 1
else:
test = -1
return test
def find_dataset_path(nx_g, dataset_name):
'''
returns the path to dataset_name within the groups in nx_g.
kind of like to find --maxdepth=1
'''
dataset_path = 'did not find a valid path'
for key in list(nx_g.keys()):
for dataset in nx_g[key]:
if dataset.name == dataset_name:
data_set.path = key + '/' + dataset_name
return dataset_path
def id13_default_units(name):
angles = ['Theta',
'Rot1',
'Rot2',
'Rot3']
piezo = ['nnp1',
'nnp2',
'nnp3']
time = ['time',
'exp']
meter = ['PixelSize1',
'PixelSize2',
'Distance',
'Poni1',
'Poni2',
'Wavelength']
if name in angles:
units = 'degrees'
elif name in meter:
units = 'm'
elif name in piezo:
units = 'um'
elif name in time:
units = 's'
else:
units = 'mm'
return units
| [
"opid13@nanofocus.esrf.fr"
] | opid13@nanofocus.esrf.fr |
1f01193d2bfd5c64c2531378677587ae85d761bf | 8aa1b94626402c0c614128d6061edb771dad05cf | /qt/qt03/qt04_main.py | d39ff9264bb6cdfeb174a973c92f7e9ebe2cd09e | [] | no_license | netfj/Project_Stu02 | 31e76c1b656ee74c54cae2185821dec7ccf50401 | afc1b26b7c586fd6979ab574c7d357a6b9ef4d29 | refs/heads/master | 2023-03-13T22:24:40.364167 | 2021-02-23T09:53:31 | 2021-02-23T09:53:31 | 341,506,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | #coding:utf-8
"""
@info:
@author:NetFj @software:PyCharm @file:qt04_main.py @time:2018/11/19.19:10
"""
import sys
from PyQt5 import QtWidgets,QtCore,QtGui
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from qt04_lineEdit import Ui_Form
class myForm(QWidget,Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self)
action = QAction(self)
action.setIcon(QIcon('Warning.ico'))
action.triggered.connect(self.Check)
self.lineEdit.addAction(action, QLineEdit.TrailingPosition)
regx = QRegExp("^[a-zA-Z][0-9A-Za-z]{14}$")
validator = QRegExpValidator(regx, self.lineEdit_4)
self.lineEdit_4.setValidator(validator)
def Check(self):
print('你输入了:',self.lineEdit.text())
app = QApplication(sys.argv)
myshow = myForm()
myshow.show()
sys.exit(app.exec_()) | [
"netfj@sina.com"
] | netfj@sina.com |
e0254a9e2cd5d791b1204e4fbb4bb1b67eaa4c7d | f88f900c0384f6da82eeb749371ad44115527700 | /course-book/09-matching/0908-brisk.py | cd57fd239de27d3da4c9d0d75da5fa6d0aa2061a | [] | no_license | aaron-kr/learning-opencv | eff382e8f0c822400f765451d57b192a63cd1b74 | 158239f0140569aec519fc1fbf255c54ef2567d2 | refs/heads/main | 2023-08-21T11:02:49.775425 | 2021-10-27T00:04:01 | 2021-10-27T00:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,671 | py | # 0908.py
# BRISK = Binary Robust Invariant Scalable Keypoints
import cv2
import numpy as np
#1
def distance(f1,f2):
x1,y1 = f1.pt
x2,y2 = f2.pt
return np.sqrt((x2-x1) ** 2 + (y2-y1) ** 2)
def filteringByDistance(kp, distE = 0.5):
size = len(kp)
mask = np.arange(1, size + 1).astype(np.bool8) # all True
for i, f1 in enumerate(kp):
if not mask[i]:
continue
else: # True
for j, f2 in enumerate(kp):
if i == j:
continue
if distance(f1,f2) < distE:
mask[j] = False
np_kp = np.array(kp)
return list(np_kp[mask])
#2
src = cv2.imread('../../img/chessboard.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5,5), 0.0)
briskF = cv2.BRISK_create()
kp = briskF.detect(gray)
print('len(kp) = ', len(kp))
dst = cv2.drawKeypoints(gray, kp, None, color = (0,0,255))
cv2.imshow('dst', dst)
#3
kp = sorted(kp, key = lambda f: f.response, reverse = True)
filtered_kp = list(filter(lambda f: f.response > 50, kp))
filtered_kp = filteringByDistance(kp, 10)
print('len(filtered_kp) = ', len(filtered_kp))
kp, des = briskF.compute(gray, filtered_kp)
print('des.shape = ', des.shape)
print('des = ', des)
#4
dst2 = cv2.drawKeypoints(gray, filtered_kp, None, color = (0,0,255))
for f in filtered_kp:
x,y = f.pt
size = f.size
rect = ((x,y), (size,size), f.angle)
box = cv2.boxPoints(rect).astype(np.int32)
cv2.polylines(dst2, [box], True, (0,255,0), 2)
cv2.circle(dst2, (round(x), round(y)), round(f.size / 2), (255,0,0), 2)
cv2.imshow('dst2', dst2)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"jekkilekki@gmail.com"
] | jekkilekki@gmail.com |
d9dd5624a3b479ff9480ceb782b0bcd58623699f | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5753053697277952_1/Python/Ifni/A.py | aec0465a73baa41598ab32da433f7e0c9aada43c | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | import math
import numpy as numpy
inp=open('A-large.in', 'r')
out=open("A-large.out", 'w')
T=int(inp.readline())
for index in range(T):
N=int(inp.readline())
temp = [int(x) for x in (inp.readline()).split()]
senate=[];
total=0;
for j in range(N):
total=total+temp[j]
senate.append((temp[j],j))
senate.sort(key=lambda tup: tup[0])
result=""
while(total!=0):
temp=senate.pop()
result=result+chr(ord('A')+temp[1]);
temp=(temp[0]-1, temp[1]); total=total-1;
if senate[0][0]>total/2:
result=result+chr(ord('A')+senate[0][1])+" ";
if senate[0][0]==1:
senate.pop(); total=total-1; N=N-1;
else:
senate[0]=(senate[0][0]-1, senate[0][1]); total=total-1;
else:
result=result+" ";
if temp[0]!=0:
senate.append(temp); senate.sort(key=lambda tup: tup[0])
else:
N=N-1
out.write('Case #{}: {}\n'.format(index+1, result))
inp.close()
out.close() | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
bb6314173e62dd8b39229874f63a97bf93583b80 | 8a780cb47eac9da046bdb5d6917f97a086887603 | /problems/sum_of_square_numbers/solution.py | 6f6f83cfeba7a9d7f3b7efee88d11a9bfb0df65a | [] | no_license | dengl11/Leetcode | d16315bc98842922569a5526d71b7fd0609ee9fb | 43a5e436b6ec8950c6952554329ae0314430afea | refs/heads/master | 2022-12-20T03:15:30.993739 | 2020-09-05T01:04:08 | 2020-09-05T01:04:08 | 279,178,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from math import sqrt
class Solution(object):
def judgeSquareSum(self, c):
"""
:type c: int
:rtype: bool
"""
if c < 0: return False
i = 0
j = int(sqrt(c))
while i <= j:
curr = i*i + j *j
if curr == c: return True
if curr < c:
i += 1
else:
j -= 1
return False
| [
"ldeng1314@gmail.com"
] | ldeng1314@gmail.com |
bc74bc20b8ccdcdc3a3d27ba681bafa08fb5d157 | 51aeee42378b72c570ed61264e25075c20884b44 | /platzigram_api/users/permissions/users.py | 9dac3c49607e04a6306b2c3d568565be22e1acfc | [
"MIT"
] | permissive | ChekeGT/Platzigram-Api | ee3ac0d04194e70d84b27d39585a2c2341550025 | 0ab05f1bb325b02563aead2e885194e274013150 | refs/heads/master | 2022-06-03T03:28:16.312898 | 2019-07-16T21:07:36 | 2019-07-16T21:07:36 | 182,161,789 | 0 | 0 | MIT | 2022-05-25T01:41:31 | 2019-04-18T21:48:00 | Python | UTF-8 | Python | false | false | 522 | py | """User model related permissions."""
# Django REST Framework
from rest_framework.permissions import BasePermission
class IsAccountOwner(BasePermission):
"""Permission that allows
a user to access a view only if the requesting
user matches with the user object
"""
message = 'You are not the account owner.'
def has_object_permission(self, request, view, user):
"""Returns if the requesting user matches with the user being used by the view."""
return request.user == user
| [
"chekelosos@gmail.com"
] | chekelosos@gmail.com |
4b25dd4310919c082d27c8ddf76559522a033981 | d400c32010a414a2f536c5c0a3490c8b8e2e9d5a | /modules/m16e/i18n/pt_pt/widgets/zip_validator.py | 99374e1459daa4da8474d3b5dc41aa96697bb3c9 | [
"LicenseRef-scancode-public-domain"
] | permissive | CarlosCorreiaM16e/chirico_cms | 3e521eae8f38b732497a2b808950c6a534e69d4f | 73897cbddb230630e13f22333b9094d0a047acb3 | refs/heads/master | 2020-12-30T07:59:04.100330 | 2020-05-02T12:26:58 | 2020-05-02T12:26:58 | 238,917,321 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,078 | py | # -*- coding: utf-8 -*-
import sys
import traceback
from gluon import current
import m16e.term as term
class is_valid_zip( object ):
def __init__( self,
allowBlank=False,
error_message=None ):
self.allowBlank = allowBlank
self.error_message = error_message
def __call__( self, value ):
return self.validate( value )
def formatter( self, value ):
return value
def get_county_zip_code( self, value ):
db = current.db
T = current.T
from m16e.db import db_tables
czc_model = db_tables.get_table_model( 'county_zip_code', db=db )
parts = value.split( '-' )
if len( parts ) < 2:
return None
p1 = parts[0].strip()
p2 = parts[1].strip()
if len( p1 ) == 4 and len( p2 ) == 3:
q_sql = (db.county_zip_code.zip_part_1 == p1)
q_sql &= (db.county_zip_code.zip_part_2 == p2)
czc = czc_model.select( q_sql ).first()
return czc
return None
def validate( self, value ):
db = current.db
T = current.T
try:
# term.printLog( 'zip: %s' % ( repr( value ) ) )
valid = False
blank = not value
if self.allowBlank and blank:
return ( value, None )
if value:
czc = self.get_county_zip_code( value )
if czc:
valid = True
# term.printLog( 'valid: %s' % ( repr( valid ) ) )
if valid:
msg = None
else:
msg = self.error_message
if not msg:
msg = T( 'must be a valid zip code (ex.: 1000-001)' )
# term.printDebug( 'msg: %s' % repr( msg ) )
return ( value, msg )
except Exception, err:
t, v, tb = sys.exc_info()
traceback.print_exception( t, v, tb )
term.printLog( 'error: %s' % ( str( err ) ) )
return ( value, self.error_message )
| [
"carlos@memoriapersistente.pt"
] | carlos@memoriapersistente.pt |
c0a02b8a9cfd9ac990ca398a1120393672a231dc | f60b0c051d8ba5088dc4246679b870f577646bb0 | /59 Wed, 21 Mar 2012 23:58:41.py | 3636882413236459b26b8f671227018953648249 | [] | no_license | joopeed/lp1 | bbd11fe7749356828a16fc45703e010db5d35464 | 117bf769a048ec1dff53f779b26c9e7adec052ba | refs/heads/master | 2021-01-02T22:50:08.600553 | 2014-04-03T21:15:40 | 2014-04-03T21:15:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # Date: Wed, 21 Mar 2012 23:58:41 +0000
# Question 59
# JOAO PEDRO FERREIRA 21211940
meses = ["jan","fev","mar","abr",=
"mai","jun","jul","ago","set&q=
uot;,"out","nov","dez"]
n = map(float,raw_input().split())
m = map(float,raw_input().split())
for i in range(12):
if n[i]-m[i]<0:
print meses[i], n[i]-m[i]
| [
"joopeeds@gmail.com"
] | joopeeds@gmail.com |
32fe7a8ea69ff3766fddde1db073e44f542f5944 | 9e5f89954fae8ac705d3e721d82b7b72d9fbcbaa | /4. ARREGLO/Diez numeros ordenados de forma ascendente.py | abd54618f407fda3a01e54660c8bb74a5a277bfa | [] | no_license | Diego-18/python-algorithmic-exercises | dda950d0fcabd25244ce3ecfc1296d06a7701f5f | 583907b82c3549a4bff7718d23aa3e0d7be2e4a3 | refs/heads/main | 2023-08-12T02:28:49.425894 | 2021-09-26T05:14:07 | 2021-09-26T05:14:07 | 410,439,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | #!/usr/bin/env python
#-*- coding: UTF8 -*-
################################
# Elaborado por: DIEGO CHAVEZ #
################################
#Realice un algoritmo que cargue 10 numeros a una lista, y ordenelos de menor a mayor
def fILeerI(psTexto):
lfVar=int(raw_input(psTexto))
return lfVar
def fsLeerS(psTexto):
lsVar=raw_input(psTexto)
return lsVar
def faBurbuja (paArreglo): #Ordena de manera ascendente la lista
liTamano=len(paArreglo)
for liX in range (0,liTamano):
for liJ in range (liX, liTamano-1):
if (paArreglo[liX]>paArreglo[liJ+1]):
liAux=paArreglo[liX]
paArreglo[liX]=paArreglo[liJ+1]
paArreglo[liJ+1]=liAux
return paArreglo
laLista=[]
for liN in range (1,11):
liNum=fILeerI("Introduzca el número: ")
laLista.append(liNum)
LaLista=faBurbuja(laLista)
print LaLista
#UPTP S1-T1 | [
"ingdiegochavez18@gmail.com"
] | ingdiegochavez18@gmail.com |
10be9709f131c693640209830d55dfdd6e70b369 | 225469cfb5507fba6770b91e2195b65e3f5ec066 | /bin/django-admin | 180e615386c07b80c6f55dc0a44b19eeaa940414 | [] | no_license | Michkail/portrapit-store | 9bc12a063f5c2810bdc85fb5daf189f14300a849 | c55e5d9879369fe195a1d22a3956ac17d9a71904 | refs/heads/main | 2023-02-15T06:17:56.208851 | 2021-01-15T15:57:11 | 2021-01-15T15:57:11 | 324,973,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | #!/home/michkail/Documents/portrapit-store/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"travis@travis-ci.org"
] | travis@travis-ci.org | |
e707ea93d81b8d0c6720ffcee498fad5390d9260 | f2c7b1befad2e01129a899426d3006d0298aedd6 | /apps/transactions/api/serializer.py | 2db12993f21f85da3c2e7aa29738f439799ed847 | [
"MIT"
] | permissive | AlcindoSchleder/flask_validator | 9821a71660699976ebf161f7e16a809fc08b58c4 | 5d4b73b755ee434daab400a4d69d05237965334e | refs/heads/main | 2023-02-26T23:27:16.246461 | 2021-02-05T02:32:28 | 2021-02-05T02:32:28 | 335,714,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | from datetime import datetime
from rules.configure_app import ma
from apps.transactions.model import Transactions
from flask_marshmallow.fields import fields
class TransactionsSerializer(ma.SQLAlchemyAutoSchema):
id = fields.Str()
customer_id = fields.Str(required=True)
doc_id = fields.Str(required=True)
score = fields.Float(required=True, default=0.0)
income = fields.Float(required=True, default=0.0)
requested_value = fields.Float(required=True, default=0.0)
installments = fields.Integer(required=True, default=0)
status = fields.Integer(required=True, default=400)
time = fields.DateTime(requested=True, default=datetime.now)
class Meta:
model = Transactions
include_fk = True
| [
"alcindoschleder@gmail.com"
] | alcindoschleder@gmail.com |
5b80e84195b05148e414152bb5e7ec6b8abe981a | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4163/codes/1593_1803.py | 0fe4f037b835ec2220a49b366c342d4819a6b0fb | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | x = int(input("numero do cartao: "))
a = x//1000
b = x//100%10
c = x//10%10
d = x%10
m = a * 5
n = b*4
o = c*3
p = d*2
s= m+n+o+p
v = (s%11)
print(v)
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
abc7a937b19b83288cf794e0c35454d0c0fac86d | 45b4ff764fafaa08140bced64fc1f0fb0ae36087 | /icecreamratings_projcet/icecreamratings_projcet/users/urls.py | 3db76cdfb4c65666078e3346486be9f1d24bda6e | [
"MIT"
] | permissive | wlgud0402/studying-django-with-twoscoops | c35297cc2c169170bff43c62a6325139a269a7d2 | 78a69ad311aefc59e271c86824196c130b92bb0a | refs/heads/master | 2023-01-08T23:43:33.177538 | 2020-10-27T11:16:05 | 2020-10-27T11:16:05 | 307,382,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from django.urls import path
from icecreamratings_projcet.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| [
"wlgudrlgus@naver.com"
] | wlgudrlgus@naver.com |
46061121dc8d4f34e77a91f018bcfd036c3bfeb4 | 2f963d7989749037a3ec27aaa39b31416b33cbb2 | /ib_users/views/get_user_details_from_usernames/tests/__init__.py | 9ac1df71a9a7863cc83f6fffe550618bd1f21516 | [] | no_license | migsantos121/phd3-backend | 3cd014908856c995de3c4473d82059bc9c1b5794 | 9d1d2bd6f55dc89719ce5a1916c5db3d573aec1e | refs/heads/master | 2022-12-12T17:25:59.334509 | 2020-03-09T09:24:08 | 2020-03-09T09:24:08 | 245,991,086 | 0 | 0 | null | 2022-06-28T14:45:50 | 2020-03-09T09:17:18 | Python | UTF-8 | Python | false | false | 304 | py | # Endpoint Configuration
APP_NAME = "ib_users"
OPERATION_NAME = "get_user_details_from_usernames"
REQUEST_METHOD = "post"
URL_SUFFIX = "users/usernames/"
from .test_case_01 import TestCase01GetUserDetailsFromUsernamesAPITestCase
__all__ = [
"TestCase01GetUserDetailsFromUsernamesAPITestCase"
]
| [
"migsantos121@outlook.com"
] | migsantos121@outlook.com |
8f3a268d0ec0eaaf90cc0c89f0fe0766ee974c5a | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/utility/test_xl_col_to_name.py | 2825d4b5c9fe0888c7aa52ba32d837d01dc4e18d | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 1,515 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
import unittest
import warnings
from ...utility import xl_col_to_name
class TestUtility(unittest.TestCase):
"""
Test xl_col_to_name() utility function.
"""
def test_xl_col_to_name(self):
"""Test xl_col_to_name()"""
tests = [
# col, col string
(0, 'A'),
(1, 'B'),
(2, 'C'),
(9, 'J'),
(24, 'Y'),
(25, 'Z'),
(26, 'AA'),
(254, 'IU'),
(255, 'IV'),
(256, 'IW'),
(16383, 'XFD'),
(16384, 'XFE'),
(-1, None),
]
for col, string in tests:
exp = string
got = xl_col_to_name(col)
# Ignore the warnings for negative values.
warnings.filterwarnings('ignore')
self.assertEqual(got, exp)
def test_xl_col_to_name_abs(self):
"""Test xl_col_to_name() with absolute references"""
tests = [
# col, col_abs, col string
(0, True, '$A'),
(-1, True, None),
]
for col, col_abs, string in tests:
exp = string
got = xl_col_to_name(col, col_abs)
# Ignore the warnings for negative values.
warnings.filterwarnings('ignore')
self.assertEqual(got, exp)
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
acfe1e5fdabe708e131174f39c9bc40f741babb0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_maltreating.py | 6f0f0ae372087c9a1569c183fc0592e2807f290b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py |
from xai.brain.wordbase.verbs._maltreat import _MALTREAT
#calss header
class _MALTREATING(_MALTREAT, ):
def __init__(self,):
_MALTREAT.__init__(self)
self.name = "MALTREATING"
self.specie = 'verbs'
self.basic = "maltreat"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
26b7a7b1e9f6bb8b0d7ab6d73e12dc2bffdcb7e7 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /run/bulk_order/insert_canceled_SjSzNHG-2.py | c25c05a95105d64ba05e733196db6e86eccf3877 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
import random
import time
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from QueryStkpriceDBNHG import QueryStkSz
from QueryStkpriceDBNHG import QueryStkSh
# 总下单数
order_count = 50
# 下单计数
count = 0
# 存放撤单用的原xtpid
xtpids = []
stk_sz = QueryStkSz()
stk_info_sz = [ele for ele in stk_sz][0:10]
stk_sh = QueryStkSh()
stk_info_sh = [ele for ele in stk_sh][0:10]
class Order(xtp_test_case):
def test_order(self):
# insert_all_traded()
insert_all_canceled()
#insert_all_random()
# 全成
def insert_all_traded():
global count
while count < order_count:
for i in xrange(len(stk_info_sz)):
all_traded_common(stk_info_sz, i, 2)
for i in xrange(len(stk_info_sh)):
all_traded_common(stk_info_sh, i, 1)
def all_traded_common(stk_list, index, market):
global count
market = Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'] if market == 1 else \
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
if count == order_count:
return
count += 1
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_CASH'],
'order_client_id': 2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stk_list[index][0],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stk_list[index][1] / 10000.0,
'quantity': 200
}
Api.trade.InsertOrder(wt_reqs)
# 撤单
def insert_all_canceled():
count = 0
while count < order_count:
for i in xrange(len(stk_info_sz)):
print 'sz------'
print stk_info_sz, i
all_canceled_common(stk_info_sz, i, 2)
for i in xrange(len(stk_info_sh)):
print 'sh------'
all_canceled_common(stk_info_sh, i, 1)
count += 1
for xtpid in xtpids:
print xtpid
Api.trade.CancelOrder(xtpid)
def all_canceled_common(stk_list, index, market):
global xtpids
market = Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'] if market == 1 else \
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_CASH'],
'order_client_id': 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stk_list[index][0],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stk_list[index][1] / 10000.0,
'quantity': 200
}
xtpid = Api.trade.InsertOrder(wt_reqs)
xtpids.append(xtpid)
# 各种成交类型轮旬下单
def insert_all_random():
trade_typ_sz = [1, 2, 2, 3, 5] # 1-订单确认 2-全成 3-部成 5-废单
trade_typ_sh = [1, 2, 2, 3, 4] # 1-订单确认 2-全成 3-部成 4-废单
while count < order_count:
for i in xrange(len(stk_info_sz)):
all_random_common(stk_info_sz, i, 2, trade_typ_sz)
for i in xrange(len(stk_info_sh)):
all_random_common(stk_info_sh, i, 1, trade_typ_sh)
def all_random_common(stk_list, index, market, trade_type):
global count
market = Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'] if market == 1 else \
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
if count == order_count:
return
count += 1
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_CASH'],
'order_client_id': trade_type[index % len(trade_type)],
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stk_list[index][0],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stk_list[index][1] / 10000.0,
'quantity': 200
}
Api.trade.InsertOrder(wt_reqs)
# time.sleep(0.01)
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
572247a7fae95f6c0a8df09df833154268dba90e | 5995eb33f64467273db3b63beaf70e648a053fd0 | /Bai6.py | b60a67dfbd34d8e2fb2b5de0a546a87a43de4fc7 | [] | no_license | lbbquoc/Python-Lab | c0068a1c81590850fb2893c01685ee6ea07db9c5 | 8b227ebc1e38c0080abef9284f07717d338e3015 | refs/heads/master | 2022-04-17T11:25:19.011062 | 2020-04-21T02:20:16 | 2020-04-21T02:20:16 | 257,454,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,596 | py | import math
import random
class Point2D:
def __init__(self,x,y):
self.x = x
self.y = y
def CalDistance(x1,y1,x2,y2):
diemA = Point2D(x1,y1)
diemB = Point2D(x2,y2)
return round(math.sqrt(pow(diemB.x - diemA.x,2) + pow(diemB.y - diemA.y, 2)),2)
# round dùng để làm tròn cú pháp : round(<số>, <làm tròn đến chữ số hàng ?> )
def createTriangle(x1,y1,x2,y2,x3,y3):
AB = CalDistance(x1,y1,x2,y2)
AC = CalDistance(x1,y1,x3,y3)
BC = CalDistance(x2,y2,x3,y3)
print(AB,AC,BC)
if (AB + BC > AC or AB + AC > BC or BC + AC >AB ):
return True
else:
return False
def calCircumference(a,b,c):
return a+b+c
def calArea(a,b,c):
p = calCircumference(a,b,c) / 2
return math.sqrt(p*(p-a)*(p-b)*(p-c))
def whatIsTriangle(x1,y1,x2,y2,x3,y3):
if createTriangle(x1,y1,x2,y2,x3,y3):
AB = CalDistance(x1,y1,x2,y2)
AC = CalDistance(x1,y1,x3,y3)
BC = CalDistance(x2,y2,x3,y3)
if (AB == AC == BC):
print("It is equilateral triangle\n")
print("Circumference is : ",calCircumference(AB,BC,AC))
print("Area is:", calArea(AB,BC,AC))
return
if ((AB == AC and AB != BC) or (AB == BC and AB != AC) or (BC == AC and BC != AB)):
print("It is isosceles triangle\n")
print("Circumference is : ",calCircumference(AB,BC,AC))
print("Area is:", calArea(AB,BC,AC))
return
if (AB**2 + BC**2 == AC**2) or (AB**2 + AC**2 == BC**2) or (BC**2 + AC**2 == AB**2):
print("It is right triangle\n")
print("Circumference is : ",calCircumference(AB,BC,AC))
print("Area is:", calArea(AB,BC,AC))
return
else:
print("It's normal triangle !")
print("Circumference is : ",calCircumference(AB,BC,AC))
print("Area is:", calArea(AB,BC,AC))
return
else:
return
def GiaiPhuongTrinh2An(x1,y1,x2,y2):
# phương trinh y = Ax + B
list = []
D = x1*1 - 1*x2
Dx = y1*1 - 1*y2
Dy = x1*y2 - y1*x2
print(D,Dx,Dy)
print()
if (D == 0 and Dx == 0):
print("He phuong trinh vo so nghiem !")
return
if (D == 0 and Dx != 0):
print("He phuong trinh vo nghiem !")
return
if (D != 0):
print("Phuong trinh co nghiem: ")
n1 = Dx / D
n2 = Dy / D
print(n1,n2)
list.insert(0,n1)
list.insert(0,n2)
list.reverse()
return list
#list = GiaiPhuongTrinh2An(1,2,4,8)
def vtTuongDoiCua2DT(x1,y1,x2,y2,x3,y3,x4,y4):
listAB = GiaiPhuongTrinh2An(x1,y1,x2,y2)
listCD = GiaiPhuongTrinh2An(x3,y3,x4,y4)
if((listCD[0]/listAB[0] == -1/-1) and (listCD[1]/listAB[1] == -1/-1) ):
print("AB trung CD")
if (listCD[0]/listAB[0] == -1/-1) and (listCD[1]/listAB[1] != -1/-1) :
print("AB // CD")
if listCD[0]/listAB[0] != -1/-1 :
print("AB cat CD")
# lý do b2/b1 luôn bằng -1/-1 là do : mình dùng phương trình y = ax +b (mình chuyển vế y về bên tay phải sẽ có được b luôn bằng -1)
# x1 = round(random.randint(1,100),0)
# x2 = random.randint(1,100)
# x3 = random.randint(1,100)
# y1 = random.randint(1,100)
# y2 = random.randint(1,100)
# y3 = random.randint(1,100)
# x4 = random.randint(1,100)
# y4 = random.randint(1,100)
# print(x1)
# print(createTriangle(x1,y1,x2,y2,x3,y3))
# whatIsTriangle(x1,y1,x2,y2,x3,y3)
# vtTuongDoiCua2DT(x1,y1,x2,y2,x3,y3,x4,y4)
| [
"="
] | = |
de14d2a3fe9fafb2f89c91e98c86b209c7ff70be | cbe264842df4eae3569b28ed4aae9489014ed23c | /RedBook/ch2/my_httpserver.py | 5b49fd27d00328f1582ef2d9d776d3770f7b1a2f | [
"MIT"
] | permissive | zeroam/TIL | 31e176c2f4c3e1ef72b1155353690cc2f7160f96 | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | refs/heads/master | 2021-07-23T01:43:34.135033 | 2021-07-10T06:47:17 | 2021-07-10T06:47:17 | 167,952,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from http.server import HTTPServer, BaseHTTPRequestHandler
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response_only(200, 'OK')
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(b"Hello World")
if __name__ == "__main__":
server = HTTPServer(('', 8888), MyHandler)
print("Started WebServer on port 8888...")
print("Press ^C to quit WebServer.")
server.serve_forever() | [
"imdff0803@gmail.com"
] | imdff0803@gmail.com |
d28335641d8721cac96e45ba527c31bd369024bc | c7d6fbccc4e72a2fb3ab9f5dc31bb74e6c86fc1e | /bw2regional/xtables.py | 8a814fe1a3f3211224e8ac45a81cb62d52480171 | [] | permissive | brightway-lca/brightway2-regional | 5c97f5fd1284254e53ef104c12ce2395886f439c | d11bea7555b915348aff6432de6afe9034271256 | refs/heads/main | 2023-06-09T17:27:01.732840 | 2023-04-28T13:49:29 | 2023-04-28T13:49:29 | 246,269,473 | 2 | 2 | BSD-3-Clause | 2023-05-24T14:51:24 | 2020-03-10T10:18:53 | Python | UTF-8 | Python | false | false | 1,601 | py | from .loading import Loading
from .meta import extension_tables, geocollections
from .validate import xtable_validator
class ExtensionTable(Loading):
_metadata = extension_tables
validator = xtable_validator
matrix = "xtable_matrix"
@property
def filename(self):
return super(ExtensionTable, self).filename.replace(".loading", ".xtable")
def write_to_map(self, *args, **kwargs):
raise NotImplementedError
def import_from_map(self, mask=None):
from .utils import get_pandarus_map
geocollection = extension_tables[self.name].get("geocollection")
xt_field = extension_tables[self.name].get("xt_field")
if not geocollection:
raise ValueError("No geocollection for this extension table")
if geocollections[geocollection].get('kind') == 'raster':
raise ValueError("This function is only for vectors.")
map_obj = get_pandarus_map(geocollection)
data = []
if xt_field is None:
raise ValueError("No `xt_field` field name specified")
id_field = geocollections[geocollection].get("field")
if not id_field:
raise ValueError(
"Geocollection must specify ``field`` field name for unique feature ids"
)
for feature in map_obj:
label = feature["properties"][id_field]
value = float(feature["properties"][xt_field])
if mask is not None and value == mask:
continue
data.append((value, (geocollection, label)))
self.write(data)
| [
"cmutel@gmail.com"
] | cmutel@gmail.com |
d06ab9b80ef1af8c850a34a8d5715c716e3849ee | a9f767c9abe9ef645b505ec33661b815e8021432 | /kaybee/plugins/widgets/handlers.py | 6d901d3bdb92ff21877ac86629873051028df02e | [
"Apache-2.0"
] | permissive | pauleveritt/kaybee | bcd402a1f28e3e37f42217d9550c0981a494bfe4 | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | refs/heads/master | 2022-06-18T04:58:52.286306 | 2018-08-21T13:52:10 | 2018-08-21T13:52:10 | 115,625,247 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,484 | py | import inspect
import os
from typing import List
from docutils import nodes
from docutils.readers import doctree
from sphinx.application import Sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.environment import BuildEnvironment
from sphinx.jinja2glue import SphinxFileSystemLoader
from kaybee.app import kb
from kaybee.plugins.events import SphinxEvent
from kaybee.plugins.widgets.action import WidgetAction
from kaybee.plugins.widgets.node import widget
from kaybee.plugins.widgets.directive import WidgetDirective
@kb.event(SphinxEvent.BI, scope='widgets')
def add_widget_node(kb_app: kb, sphinx_app: Sphinx):
sphinx_app.add_node(widget)
@kb.event(SphinxEvent.EBRD, scope='widgets', system_order=40)
def initialize_widgets_container(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames=List[str],
):
if not hasattr(sphinx_app.env, 'widgets'):
sphinx_app.env.widgets = dict()
@kb.event(SphinxEvent.EBRD, scope='widgets', system_order=50)
def register_template_directory(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames=List[str],
):
""" Add this widget's templates dir to template paths """
template_bridge = sphinx_app.builder.templates
actions = WidgetAction.get_callbacks(kb_app)
for action in actions:
f = os.path.dirname(inspect.getfile(action))
template_bridge.loaders.append(SphinxFileSystemLoader(f))
@kb.event(SphinxEvent.EBRD, scope='widgets', system_order=60)
def register_widget_directive(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames=List[str],
):
# Register a directive
for k, v in list(kb_app.config.widgets.items()):
sphinx_app.add_directive(k, WidgetDirective)
@kb.event(SphinxEvent.DRES, scope='widgets')
def render_widgets(kb_app: kb,
sphinx_app: Sphinx,
doctree: doctree,
fromdocname: str,
):
""" Go through docs and replace widget directive with rendering """
builder: StandaloneHTMLBuilder = sphinx_app.builder
for node in doctree.traverse(widget):
# Render the output
w = sphinx_app.env.widgets.get(node.name)
context = builder.globalcontext.copy()
# Add in certain globals
context['resources'] = sphinx_app.env.resources
context['references'] = sphinx_app.env.references
output = w.render(sphinx_app, context)
# Put the output into the node contents
listing = [nodes.raw('', output, format='html')]
node.replace_self(listing)
@kb.dumper('widgets')
def dump_settings(kb_app: kb, sphinx_env: BuildEnvironment):
# First get the kb app configuration for widgets
config = {
k: v.__module__ + '.' + v.__name__
for (k, v) in kb_app.config.widgets.items()
}
# Next, get the actual widgets in the app.widgets DB
widgets = sphinx_env.widgets
values = {k: v.__json__() for (k, v) in widgets.items()}
widgets = dict(
config=config,
values=values
)
return dict(widgets=widgets)
| [
"pauleveritt@me.com"
] | pauleveritt@me.com |
db82a4ebbfaf27c4005b0b52244b7e7b6cef93d7 | e70a17e8a37847a961f19b136f3bbe74393fa2af | /RPI/build/image_view/catkin_generated/pkg.develspace.context.pc.py | 9059bfd521c98ccd13c2659ef88db64293193e9a | [
"MIT"
] | permissive | Mondiegus/ROS-4x4-CAR-AI | 1413ead6f46a8b16005abeea3e0b215caa45f27e | 124efe39168ce96eec13d57e644f4ddb6dfe2364 | refs/heads/Master | 2023-07-14T23:56:53.519082 | 2021-03-27T17:28:45 | 2021-03-27T17:28:45 | 334,233,839 | 0 | 0 | MIT | 2021-02-02T13:00:30 | 2021-01-29T18:46:16 | Makefile | UTF-8 | Python | false | false | 512 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/pi/catkin_ws/devel/.private/image_view/include".split(';') if "/home/pi/catkin_ws/devel/.private/image_view/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "image_view"
PROJECT_SPACE_DIR = "/home/pi/catkin_ws/devel/.private/image_view"
PROJECT_VERSION = "1.15.3"
| [
"Mondiegus9@gmail.com"
] | Mondiegus9@gmail.com |
4f6ae9cc7f5fb40a62904cfab554ac8996d10568 | f802c49ab73cadfab92253e2c5c7d2dd96180576 | /mcq_v2/mcq_v2/forms.py | cd294af3dc24e988ee58ed79e830f8d0afeb25d4 | [] | no_license | anushiv25/Coding-IDE | 258ed2a0f1fa3ceba97981e543ce665bc39f280e | 81cacbe350d426680a157a9eb617641eb92d3f28 | refs/heads/master | 2021-11-05T19:43:08.175946 | 2019-02-19T14:14:50 | 2019-02-19T14:14:50 | 170,346,050 | 0 | 1 | null | 2021-10-31T18:26:18 | 2019-02-12T15:54:30 | CSS | UTF-8 | Python | false | false | 2,314 | py | from django import forms
from django.contrib.auth import get_user_model
User=get_user_model()
CHOICES= [
('CS', 'Computer Science'),
('IT', 'Information Technology'),
('Civil', 'Civil'),
('Mechanical', 'Mechanical'),
('EC', 'Electronics & Communication'),
]
class signup_form(forms.Form):
name=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-name"}))
branch=forms.CharField(widget=forms.Select(choices=CHOICES,attrs={"class":"multiple","id":"defaultForm-college"}))
#branch=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-branch"}))
year=forms.DecimalField(max_value=3,min_value=1,widget=forms.NumberInput(attrs={"class":"form-control","id":"defaultForm-year"}))
college=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-college"}))
email=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-email"}))
password=forms.CharField(min_length=8,widget=forms.PasswordInput(attrs={"class":"form-control","id":"defaultForm-password"}))
confirm_password=forms.CharField(widget=forms.PasswordInput(attrs={"class":"form-control","id":"defaultForm-confirm_pass"}))
def clean_email(self):
email=self.cleaned_data.get("email")
qs=User.objects.filter(username=email)
if qs.exists():
raise forms.ValidationError("Email Taken !")
elif ("@" not in email) or (".com" not in email):
raise forms.ValidationError("Please Enter a Valid Email !")
return email
# def clean(self):
# data=self.cleaned_data
# password=self.cleaned_data.get("password")
# password1=self.cleaned_data.get("confirm_password")
# if password1 != password:
# raise forms.ValidationError("Password must Match !")
# return data
def clean_confirm_password(self):
data=self.cleaned_data
print(data)
password=self.cleaned_data.get('password')
password1=self.cleaned_data.get('confirm_password')
if password1!=password:
raise forms.ValidationError("Password must Match !!")
return data
class login_form(forms.Form):
email=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-email"}))
password=forms.CharField(widget=forms.PasswordInput(attrs={"class":"form-control","id":"defaultForm-pass"}))
| [
"dwevediar@gmail.com"
] | dwevediar@gmail.com |
7953dfc4ee37a3ff7e911c16aee6627b68443f3b | ff734af8ae77fd1e0af7ebee85ec7321b50cbdea | /challenges/repeated_word/repeated_word.py | 9e56bac66e9f3d2f73da71faebc510050dcaa536 | [] | no_license | LeoKuhorev/data-structures-and-algorithms | 005c0ae4a84762437b966d284fb28d3cf5a17b17 | 59d9d9ccc35ef7e475aeea820f1800db8bf42807 | refs/heads/master | 2023-01-20T12:46:03.141941 | 2022-05-16T21:44:08 | 2022-05-16T21:44:08 | 215,949,237 | 0 | 0 | null | 2023-01-07T15:28:59 | 2019-10-18T05:42:41 | JavaScript | UTF-8 | Python | false | false | 658 | py | def repeated_word(text: str) -> str:
"""Return first repeated word in the given text. If all words are unique returns None
Args:
text (str): Given text
Raises:
TypeError: If the passed in text is not a string
Returns:
str: First repeated word
"""
if not type(text) is str:
raise TypeError('text must be a string!')
text_lst = text.split(' ')
existing_words = set()
for word in text_lst:
word = ''.join(char for char in word if char. isalnum()).lower()
if word in existing_words:
return word
else:
existing_words.add(word)
return None
| [
"kuhorev.leonid@gmail.com"
] | kuhorev.leonid@gmail.com |
59c6e82f2a13acca52e2f000798ad80f03cfdc58 | b33d1d4b74d375a2050baf80cda5b8571aff7462 | /s14/day02/集合.py | 2fee61ee0195463eae45991ab61b369b0541380b | [] | no_license | sunwang33/code | e979e1b11209200fba07a99d926d76f09c83b514 | 377f3e919555bf0f02ef56c9395d57992c84fcfd | refs/heads/master | 2021-01-16T18:10:08.358744 | 2018-01-01T02:58:43 | 2018-01-01T02:58:43 | 100,045,002 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | __author__ = "sun wang"
list_1 = [1,4,5,7,3,6,7,9]
list_1 = set(list_1)
print(list_1 , type(list_1))
list_2 = set([2,6,0,66,22,4])
print(list_1 , list_2 )
print(list_1.intersection(list_2))
print(list_1.union(list_2))
print(list_1.difference(list_2))
print(list_2.difference(list_1))
print(list_1.issubset(list_2))
print(list_1.issuperset(list_2))
list_3 = set([1,3,7])
print(list_3.issubset(list_1))
print (list_1.symmetric_difference(list_2))
name_1="分割线"
print(name_1.center(50,'-'))
list_4 = set([5,6,8])
print(list_3.isdisjoint(list_4))
#求交集
print (list_1 & list_2)
print (list_1 | list_2 )
print (list_1 - list_2 )
print (list_1 ^ list_2)
list_1.add(999)
print (list_1)
list_1.update([10,37,42])
print(list_1)
list_1.remove(42)
print(list_1)
list_1.add(42)
print(list_1)
print(len(list_1))
if 42 in list_1:
print ("42 in list_1")
print(list_1.pop())
print(list_1)
list_1.discard(888)
list_1.discard(3)
print(list_1)
| [
"330463670@qq.com"
] | 330463670@qq.com |
605c5ee633eb838f3f770819ac8122cce8f0f6d6 | 078918048099dfa2454cfac2d449ea3d77fbec55 | /1392-longest-happy-prefix.py | c6fd8935b35347c62acda8c328e994b31284a44b | [] | no_license | DmitryVlaznev/leetcode | 931784dcc4b465eebda7d22311f5bf5fa879f068 | b2a2afdfc725330545c9a2869fefc7d45ec594bc | refs/heads/master | 2023-06-10T05:42:34.992220 | 2023-06-05T09:54:10 | 2023-06-05T09:54:30 | 241,064,389 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | # 1392. Longest Happy Prefix
# A string is called a happy prefix if is a non-empty prefix which is
# also a suffix (excluding itself).
# Given a string s. Return the longest happy prefix of s .
# Return an empty string if no such prefix exists.
# Example 1:
# Input: s = "level"
# Output: "l"
# Explanation: s contains 4 prefix excluding itself ("l", "le", "lev",
# "leve"), and suffix ("l", "el", "vel", "evel"). The largest prefix
# which is also suffix is given by "l".
# Example 2:
# Input: s = "ababab"
# Output: "abab"
# Explanation: "abab" is the largest prefix which is also suffix. They
# can overlap in the original string.
# Example 3:
# Input: s = "leetcodeleet"
# Output: "leet"
# Example 4:
# Input: s = "a"
# Output: ""
# Constraints:
# 1 <= s.length <= 10^5
# s contains only lowercase English letters.
class Solution:
def longestPrefix(self, s: str) -> str:
pf = [0] * len(s)
for i in range(1, len(s)):
j = pf[i - 1]
while j > 0 and s[i] != s[j]: j = pf[j - 1]
if s[i] == s[j]: j+=1
pf[i] = j
return s[:pf[-1]]
def log(correct, res):
if correct == res:
print("[v]", res)
else:
print(">>> INCORRECT >>>", correct, " | ", res)
t = Solution()
log("l", t.longestPrefix("level"))
log("abab", t.longestPrefix("ababab"))
log("leet", t.longestPrefix("leetcodeleet"))
log("", t.longestPrefix("a")) | [
"dmitry.vlaznev@datadvance.net"
] | dmitry.vlaznev@datadvance.net |
7cc28369b53704bdce93420c3d80c2239a0c75b0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03732/s203879619.py | 5aea1f8b02c534b21ae12dcaf3b7a37028bead41 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,955 | py | #!/usr/bin/env python3
import sys
# import time
# import math
# import numpy as np
# import scipy.sparse.csgraph as cs # csgraph_from_dense(ndarray, null_value=inf), bellman_ford(G, return_predecessors=True), dijkstra, floyd_warshall
# import random # random, uniform, randint, randrange, shuffle, sample
# import string # ascii_lowercase, ascii_uppercase, ascii_letters, digits, hexdigits
# import re # re.compile(pattern) => ptn obj; p.search(s), p.match(s), p.finditer(s) => match obj; p.sub(after, s)
# from bisect import bisect_left, bisect_right # bisect_left(a, x, lo=0, hi=len(a)) returns i such that all(val<x for val in a[lo:i]) and all(val>-=x for val in a[i:hi]).
# from collections import deque # deque class. deque(L): dq.append(x), dq.appendleft(x), dq.pop(), dq.popleft(), dq.rotate()
# from collections import defaultdict # subclass of dict. defaultdict(facroty)
# from collections import Counter # subclass of dict. Counter(iter): c.elements(), c.most_common(n), c.subtract(iter)
# from datetime import date, datetime # date.today(), date(year,month,day) => date obj; datetime.now(), datetime(year,month,day,hour,second,microsecond) => datetime obj; subtraction => timedelta obj
# from datetime.datetime import strptime # strptime('2019/01/01 10:05:20', '%Y/%m/%d/ %H:%M:%S') returns datetime obj
# from datetime import timedelta # td.days, td.seconds, td.microseconds, td.total_seconds(). abs function is also available.
# from copy import copy, deepcopy # use deepcopy to copy multi-dimentional matrix without reference
# from functools import reduce # reduce(f, iter[, init])
# from functools import lru_cache # @lrucache ...arguments of functions should be able to be keys of dict (e.g. list is not allowed)
# from heapq import heapify, heappush, heappop # built-in list. heapify(L) changes list in-place to min-heap in O(n), heappush(heapL, x) and heappop(heapL) in O(lgn).
# from heapq import nlargest, nsmallest # nlargest(n, iter[, key]) returns k-largest-list in O(n+klgn).
# from itertools import count, cycle, repeat # count(start[,step]), cycle(iter), repeat(elm[,n])
# from itertools import groupby # [(k, list(g)) for k, g in groupby('000112')] returns [('0',['0','0','0']), ('1',['1','1']), ('2',['2'])]
# from itertools import starmap # starmap(pow, [[2,5], [3,2]]) returns [32, 9]
# from itertools import product, permutations # product(iter, repeat=n), permutations(iter[,r])
# from itertools import combinations, combinations_with_replacement
from itertools import accumulate # accumulate(iter[, f])
# from operator import itemgetter # itemgetter(1), itemgetter('key')
# from fractions import gcd # for Python 3.4 (previous contest @AtCoder)
def main():
mod = 1000000007 # 10^9+7
inf = float('inf') # sys.float_info.max = 1.79...e+308
# inf = 2 ** 64 - 1 # (for fast JIT compile in PyPy) 1.84...e+19
sys.setrecursionlimit(10**6) # 1000 -> 1000000
def input(): return sys.stdin.readline().rstrip()
def ii(): return int(input())
def mi(): return map(int, input().split())
def mi_0(): return map(lambda x: int(x)-1, input().split())
def lmi(): return list(map(int, input().split()))
def lmi_0(): return list(map(lambda x: int(x)-1, input().split()))
def li(): return list(input())
n, w = mi()
L = [lmi() for _ in range(n)]
w1 = L[0][0]
if n * (w1 + 3) <= w:
print(sum(map(lambda x: x[1], L)))
elif n * w <= 10**6:
# 普通の dp 戦略
dp = [0 for _ in range(w+1)]
for i in range(n):
weight, value = L[i]
for j in range(w, 0, -1):
if j - weight >= 0:
dp[j] = max(dp[j], dp[j - weight] + value)
print(dp[w])
else:
group_by_weight = [[] for _ in range(4)]
for weight, value in L:
group_by_weight[weight - w1].append(value)
w1_0 = sorted(group_by_weight[0], reverse=True)
w1_1 = sorted(group_by_weight[1], reverse=True)
w1_2 = sorted(group_by_weight[2], reverse=True)
w1_3 = sorted(group_by_weight[3], reverse=True)
accum_0, accum_1, accum_2, accum_3 = map(lambda x: [0] + list(accumulate(x)), [w1_0, w1_1, w1_2, w1_3])
ans = -1
for i in range(len(w1_0)+1):
for j in range(len(w1_1)+1):
for k in range(len(w1_2)+1):
for l in range(len(w1_3)+1):
if (i+j+k+l)*w1 + j + 2*k + 3*l <= w:
ans = max(ans, accum_0[i] + accum_1[j] + accum_2[k] + accum_3[l])
print(ans)
if __name__ == "__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d396a8f8b20811e04bbd7ca37975e7cbd6e02b53 | e1ef3cf26898340de4b1a6f64d3ec399a169e873 | /organizer/migrations/0003_startup_data.py | c6ca357021080b8d600e05246675cc5d4466fadb | [] | no_license | JMorris1575/djututorial | 1e6db5b2513a92adc5016c77998af759b99d80db | 307b4859ca5cc8992ec113144e174c9238449cbf | refs/heads/master | 2021-01-10T16:30:28.600985 | 2016-03-31T00:23:11 | 2016-03-31T00:23:11 | 50,468,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,309 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from django.db import migrations
STARTUPS = [
{
"name": "Arachnobots",
"slug": "arachnobots",
"contact": "contact@arachnobots.com",
"description":
"Remote-controlled internet-enabled "
"Spider Robots.",
"founded_date": date(2014, 10, 31),
"tags": ["mobile", "augmented-reality"],
"website":
"http://frightenyourroommate.com/",
},
{
"name": "Boundless Software",
"slug": "boundless-software",
"contact": "hello@boundless.com",
"description": "The sky was the limit.",
"founded_date": date(2013, 5, 15),
"tags": ["big-data"],
"website": "http://boundless.com/",
},
{
"name": "Game Congress",
"slug": "game-congress",
"contact": "vote@gamecongress.com",
"description":
"By gamers, for gamers, of gamers.",
"founded_date": date(2012, 7, 4),
"tags": ["video-games"],
"website": "http://gamecongress.com/",
},
{
"name": "JamBon Software",
"slug": "jambon-software",
"contact": "django@jambonsw.com",
"description":
"JamBon Software is a consulting "
"company that specializes in web and "
"mobile products. They can carry out "
"full end-to-end development of new "
"products, or review and advise on "
"existing projects. They also offer "
"hands-on training in Django.",
"founded_date": date(2013, 1, 18),
"tags": ["django"],
"website": "http://jambonsw.com/",
},
{
"name": "Lightning Rod Consulting",
"slug": "lightning-rod-consulting",
"contact": "help@lightningrod.com",
"description":
"Channel the storm. "
"Trouble shoot the cloud.",
"founded_date": date(2014, 4, 1),
"tags":
["ipython", "jupyter", "big-data"],
"website": "http://lightningrod.com/",
},
{
"name": "Monkey Software",
"slug": "monkey-software",
"contact": "shakespeare@monkeysw.com",
"description":
"1000 code monkeys making software.",
"founded_date": date(2014, 12, 10),
"tags": ["video-games"],
"website": "http://monkeysw.com/",
},
{
"name": "Simple Robots",
"slug": "simple-robots",
"contact": "yoshimi@simplerobots.com",
"description":
"Your resource to understanding "
"computer, robots, and technology.",
"founded_date": date(2010, 1, 2),
"tags": ["python", "augmented-reality"],
"website": "http://simplerobots.com/",
},
{
"name": "Thingies",
"slug": "thingies",
"contact": "help@lightningrod.com",
"description":
"A marketplace for arduino, "
"raspberry pi, and other "
"homemade stuff.",
"founded_date": date(2015, 4, 7),
"tags": ["python"],
"website": "http://buythingies.com/",
},
]
def add_startup_data(apps, schema_editor):
Startup = apps.get_model(
'organizer', 'Startup')
Tag = apps.get_model('organizer', 'Tag')
for startup in STARTUPS:
startup_object = Startup.objects.create(
name=startup['name'],
slug=startup['slug'],
contact=startup['contact'],
description=startup['description'],
founded_date=startup['founded_date'],
website=startup['website'])
for tag_slug in startup['tags']:
startup_object.tags.add(
Tag.objects.get(
slug=tag_slug))
def remove_startup_data(apps, schema_editor):
Startup = apps.get_model(
'organizer', 'Startup')
for startup in STARTUPS:
startup_object = Startup.objects.get(
slug=startup['slug'])
startup_object.delete()
class Migration(migrations.Migration):
dependencies = [
('organizer', '0002_tag_data'),
]
operations = [
migrations.RunPython(
add_startup_data,
remove_startup_data)
]
| [
"FrJamesMorris@gmail.com"
] | FrJamesMorris@gmail.com |
992cea9f56e775cee4b0e905475671a7ec84941a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/180/usersdata/276/112623/submittedfiles/matriz1.py | 827d37ca5f8c411adcdb865dc0e68dabe5c30d8d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | # -*- coding: utf-8 -*-
import numpy as np
def primeiralinha (matriz):
for i in range (0,matriz.shape[0],1):
for j in range (0,matriz.shape[1],1):
if matriz[i,j]==1:
return (i)
def ultimalinha (matriz):
for i in range (0,matriz.shape[0],1):
for j in range (0,matriz.shape[1],1):
if matriz[i,j]==1:
l = j
return (l)
def primeiracoluna (matriz):
for j in range (0,matriz.shape[1],1):
for i in range (0,matriz.shape[0],1):
if matriz[i,j]==1:
return (j)
def ultimacoluna (matriz):
for j in range (0,matriz.shape[1],1):
for i in range (0,matriz.shape[0],1):
if matriz[i,j]==1:
c = i
return (c)
linhas = int (input('Digite a quantidade de linhas: '))
colunas = int (input('Digite a quantidade de colunas: '))
a = np.zeros ((linhas, colunas))
for i in range (0,linhas,1):
for j in range (0,colunas,1):
a[i,j] = int(input('Digite o elemento da matriz:'))
menorlinha = primeiralinha (a)
maiorlinha = ultimalinha (a)
menorcoluna = primeiracoluna (a)
maiorcoluna = ultimacoluna (a)
b = a[menorlinha:maiorlinha+1,menorcoluna:maiorcoluna+1]
print (b) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
943d06d2f6e39a13dc30d6fb4ec46fdbb3e6bf10 | baaaa9f2d3049a5bd8ec266af84b00b43eab8bbf | /core/migrations/0045_image_size.py | dedac82a43cd86f1fa2394f393e51d6aaf13745b | [] | no_license | mary-lev/edu_test | 13efd9e566ad99db4e2be03a391c48be609be336 | 51a18e6be3098b488e98db41f1226cb40a9b13d1 | refs/heads/master | 2023-01-29T08:01:57.701848 | 2020-12-13T16:31:58 | 2020-12-13T16:31:58 | 302,146,712 | 1 | 0 | null | 2020-12-06T22:11:44 | 2020-10-07T20:00:10 | Python | UTF-8 | Python | false | false | 426 | py | # Generated by Django 3.1.2 on 2020-12-11 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0044_auto_20201211_1338'),
]
operations = [
migrations.AddField(
model_name='image',
name='size',
field=models.CharField(default=1, max_length=100),
preserve_default=False,
),
]
| [
"marylevchenko@gmail.com"
] | marylevchenko@gmail.com |
a48349645033c2bb643c69ebbb80cd98c1b165f8 | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_all_sp_Reorgs_qsplit_TholeExp_emp_a/Jobs/C8_BTBT/C8_BTBT_anion_neut_inner1_outer0/C8_BTBT_anion_neut_inner1_outer0.py | 4307ea1e54784e8a85b5358d45654ab5db863908 | [] | no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 7,466 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
qdict={"anion": -1.0, "neut": 0.0, "cation": 1.0}
name='C8_BTBT_anion_neut_inner1_outer0'
#For crystals here, all cubic and centred at centre
insize=1
#number of TVs in each dir central mol is from edge of inner region
outsize=0
state='anion'
mols_cen=['sp_C8_BTBT_mola_neut.xyz','sp_C8_BTBT_molb_neut.xyz']
mols_sur=['sp_C8_BTBT_mola_neut.xyz','sp_C8_BTBT_molb_neut.xyz']
mols_outer=['sp_C8_BTBT_mola_neut.xyz','sp_C8_BTBT_molb_neut.xyz']
Natoms=16
#From cif:
'''
C8_BTBT
_cell_length_a 5.927(7)
_cell_length_b 7.88(1)
_cell_length_c 29.18(4)
_cell_angle_alpha 90
_cell_angle_beta 92.443(4)
_cell_angle_gamma 90
_cell_volume 1361.61
'''
#Get translation vectors:
a=5.9277/0.5291772109217
b=7.881/0.5291772109217
c=29.184/0.5291772109217
alpha=90*(pi/180)
beta=92.4434*(pi/180)
gamma=90*(pi/180)
cif_unit_cell_volume=1361.61/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
print 'state',state
print 'q: ', qdict[state]
for atom in prot_neut_cry()._mols[0][prot_neut_cry()._cenpos[0]][prot_neut_cry()._cenpos[1]][prot_neut_cry()._cenpos[2]]():
atom()._crg=qdict[state]
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
screenradius=1.7278
# Thole paper screenradius value for fit to components of pol. tensor divided by no. atoms in mol. We choose this screenradius value for smearing of charge as, with near planar mols, in some dirs we have molecule-like polarisabilities with near atom-like separations.
#This form of screenradius will result in charge being smeared along the separation axis of molecules by NAtoms*(Thole's value for a single atom)
jm = JMatrix(jmtype='TholeExp',screenradius=screenradius)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for reorgs
f = open('reorg_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tReorg(eV)')
f.flush()
f.close()
# REORGANISATION ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(prot_neut_cry()._cenpos[0]-dist,prot_neut_cry()._cenpos[0]+dist+1,1):
for b in range(prot_neut_cry()._cenpos[1]-dist,prot_neut_cry()._cenpos[1]+dist+1,1):
for c in range(prot_neut_cry()._cenpos[2]-dist,prot_neut_cry()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(prot_neut_cry()._mols),1):
prot_neut_cry().calc_reorg_shareq(a1=prot_neut_cry()._cenpos[0],b1=prot_neut_cry()._cenpos[1],c1=prot_neut_cry()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,jm=jm._m,oldUqd=Uqd)
print 'Reorg: ', prot_neut_cry()._reorgs_shareq[molincell][a][b][c]
f = open('reorg_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,prot_neut_cry()._reorgs_shareq[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all reorgs complete
prot_neut_cry().print_reorgs_shareq()
print 'Job Completed Successfully.'
| [
"sheridan.few@gmail.com"
] | sheridan.few@gmail.com |
a7c965bda295df19c2aaa63b9a6c4712b018af08 | 01ec983c0a3cc35857d1900b8f26bb7e8ad93c2f | /Image_Processing_in_OpenCV/Geometric_Transformations_of_Images/Image_Thresholding/Otsu's_Binarization.py | 2121e75d27dd1263e93dd4b74226da0d5eac8936 | [] | no_license | Joevaen/Opencv_On_CT | f88560c2eb3655e09e21ffe012d5c531c0742c94 | 3c896b4791d99c5be4fc21054aeeb25feb5d2e33 | refs/heads/main | 2023-03-18T21:10:28.772470 | 2021-03-04T07:34:02 | 2021-03-04T07:34:02 | 343,656,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('/home/qiao/PythonProjects/Opencv_On_CT/Test_Img/10.jpg',0)
# global thresholding
ret1,th1 = cv.threshold(img,200,255,cv.THRESH_BINARY)
# Otsu's thresholding
ret2,th2 = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv.GaussianBlur(img,(5,5),0)
ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# plot all the images and their histograms
images = [img, 0, th1,
img, 0, th2,
blur, 0, th3]
titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
'Original Noisy Image','Histogram',"Otsu's Thresholding",
'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
for i in range(3):
plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])
plt.show() | [
"joevaen@126.com"
] | joevaen@126.com |
831fdc372ca74798f62c475972295be51a4f1e8b | 78c4ccb183a99ebaabcdc3a3a69f029e4aee0f5c | /AlgorithmStudy/백준/19 해시맵/1620 나는야 포켓몬 마스터 이다솜(10.26).py | f5af375ee7308b90025d55f3cf56d9a6be83d7c7 | [] | no_license | cladren123/study | ef2c45bc489fa658dbc9360fb0b0de53250500e5 | 241326e618f1f3bb1568d588bf6f53b78920587a | refs/heads/master | 2023-09-02T02:21:24.560967 | 2021-11-05T12:20:06 | 2021-11-05T12:20:06 | 368,753,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py |
import sys
input = sys.stdin.readline
# n : 포켓몬 개수, m : 문제의 개수
n, m = map(int, input().split())
# 시간복잡도를 피하기 위해 딕셔너리를 2개 사용
dogam = dict()
dogam2 = dict()
# 한개의 딕셔너리는 숫자,이름 나머지 하나는 이름,숫자 순으로 만든다.
for number in range(1,n+1) :
name = input().strip()
dogam[str(number)] = name
dogam2[name] = str(number)
for _ in range(m) :
one = input().strip()
# 숫자인지 확인
if one.isdigit() :
print(dogam[one])
else :
print(dogam2[one])
| [
"48821942+cladren123@users.noreply.github.com"
] | 48821942+cladren123@users.noreply.github.com |
2f754b68ad9d915c95dc469bc7caf5a5e105f0a6 | 1882ba2b04e2230692e7da0b963f20ccf859ce34 | /Collect/VIIRS/LST_daily.py | b464ba525a4313c4144963048046d4c6ac92bb0a | [
"Apache-2.0"
] | permissive | TimHessels/watertools | 908230ae0f45de5379e6808fec827c55245c1cc2 | 2fc3680bfc6ad34bd2a11fba4cf302c5b84e5d78 | refs/heads/master | 2023-08-16T16:18:47.003632 | 2023-08-06T15:35:49 | 2023-08-06T15:35:49 | 158,684,796 | 4 | 6 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 9 13:16:00 2022
@author: timhe
"""
import sys
from watertools.Collect.VIIRS.DataAccess import DownloadData
def main(Dir, Startdate, Enddate, latlim, lonlim, Waitbar = 1):
"""
This function downloads daily LST data from VIIRS
interval, and spatial extent.
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
Waitbar -- 1 (Default) will print a waitbar
"""
print('\nDownload daily VIIRS LST data for period %s till %s' %(Startdate, Enddate))
DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar)
if __name__ == '__main__':
main(sys.argv) | [
"timhessels@hotmail.com"
] | timhessels@hotmail.com |
595cccc02dc74de53021e84d1eae7a581cf94a0d | e17a64f1a063fca4fc7d833f85239a39a973c119 | /tests/test_weight_allocations.py | 503a1d58c1e6ce57a2d21d9b2e518549b625373a | [
"Apache-2.0"
] | permissive | stjordanis/moonshot | ff18694a3977fd01a40cabaae32907aae56bdb9f | d79cf26e7fb5ce3fcb34060771ea4992e19dc46a | refs/heads/master | 2023-07-03T08:24:24.729332 | 2021-04-13T15:48:47 | 2021-04-13T15:48:47 | 271,562,585 | 0 | 0 | Apache-2.0 | 2020-06-11T14:07:20 | 2020-06-11T14:07:19 | null | UTF-8 | Python | false | false | 4,746 | py | # Copyright 2018 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pandas as pd
from moonshot import Moonshot
from moonshot.cache import TMP_DIR
class WeightAllocationsTestCase(unittest.TestCase):
def test_allocate_equal_weights(self):
"""
Tests that the allocate_equal_weights returns the expected
DataFrames.
"""
signals = pd.DataFrame(
data={
"FI12345": [1, 1, 1, 0, 0],
"FI23456": [0, -1, 1, 0, -1],
}
)
target_weights = Moonshot().allocate_equal_weights(signals, cap=1.0)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [1.0, 0.5, 0.5, 0.0, 0.0],
"FI23456": [0.0, -0.5, 0.5, 0.0, -1.0]}
)
target_weights = Moonshot().allocate_equal_weights(signals, cap=0.5)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.5, 0.25, 0.25, 0.0, 0.0],
"FI23456": [0.0, -0.25, 0.25, 0.0, -0.5]}
)
def test_allocate_fixed_weights(self):
"""
Tests that the allocate_fixed_weights returns the expected
DataFrames.
"""
signals = pd.DataFrame(
data={
"FI12345": [1, 1, 1, 0, 0],
"FI23456": [0, -1, 1, 0, -1],
"FI34567": [1, 1, 1, -1, -1]
}
)
target_weights = Moonshot().allocate_fixed_weights(signals, 0.34)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.34, 0.34, 0.34, 0.0, 0.0],
"FI23456": [0.0, -0.34, 0.34, 0.0, -0.34],
"FI34567": [0.34, 0.34, 0.34, -0.34, -0.34]}
)
def test_allocate_fixed_weights_capped(self):
"""
Tests that the allocate_fixed_weights_capped returns the expected
DataFrames.
"""
signals = pd.DataFrame(
data={
"FI12345": [1, 1, 1, 0, 0],
"FI23456": [0, -1, 1, 0, -1],
"FI34567": [1, 1, 1, -1, -1]
}
)
target_weights = Moonshot().allocate_fixed_weights_capped(signals, 0.34, cap=1.5)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.34, 0.34, 0.34, 0.0, 0.0],
"FI23456": [0.0, -0.34, 0.34, 0.0, -0.34],
"FI34567": [0.34, 0.34, 0.34, -0.34, -0.34]}
)
target_weights = Moonshot().allocate_fixed_weights_capped(signals, 0.34, cap=0.81)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.34, 0.27, 0.27, 0.0, 0.0],
"FI23456": [0.0, -0.27, 0.27, 0.0, -0.34],
"FI34567": [0.34, 0.27, 0.27, -0.34, -0.34]}
)
def test_allocate_market_neutral_fixed_weights_capped(self):
"""
Tests that the allocate_market_neutral_fixed_weights_capped returns
the expected DataFrames.
"""
signals = pd.DataFrame(
data={
"FI12345": [1, 1, 1, 0, 0],
"FI23456": [0, -1, 1, 1, -1],
"FI34567": [1, 1, -1, -1, -1]
}
)
target_weights = Moonshot().allocate_market_neutral_fixed_weights_capped(
signals, 0.34, cap=1.2, neutralize_weights=False)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.3, 0.3, 0.3, 0.0, 0.0],
"FI23456": [0.0, -0.34, 0.3, 0.34, -0.3],
"FI34567": [0.3, 0.3, -0.34, -0.34, -0.3]}
)
target_weights = Moonshot().allocate_market_neutral_fixed_weights_capped(
signals, 0.34, cap=1.2, neutralize_weights=True)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.0, 0.17, 0.17, 0.0, 0.0],
"FI23456": [0.0, -0.34, 0.17, 0.34, -0.0],
"FI34567": [0.0, 0.17, -0.34, -0.34, -0.0]}
)
| [
"brian@quantrocket.com"
] | brian@quantrocket.com |
6b0c978d955aa41ed67d4d22e3bf81f7d1c4269f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_decorating.py | 664550501ff25b0218fee67c62a52ded0ce21da4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py |
from xai.brain.wordbase.verbs._decorate import _DECORATE
#calss header
class _DECORATING(_DECORATE, ):
def __init__(self,):
_DECORATE.__init__(self)
self.name = "DECORATING"
self.specie = 'verbs'
self.basic = "decorate"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
400b1595acdef016b6a3e38e8563f17404ebaac7 | 7b315bbe8c85ce05e6c51112e985ae1b392d83f5 | /métodos de classe e static metod/metod_staticmetod.py | 65d27469fda2f7bcaba11e7083ced4d807d76800 | [] | no_license | Cica013/aprendendoPython | e9f993b1b144e294a338a53f2bc36673d3cd00a6 | 9c964f2322e3d52b39a811aceec64b169bab4e10 | refs/heads/main | 2023-08-10T20:12:47.640239 | 2021-10-06T21:01:19 | 2021-10-06T21:01:19 | 385,755,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | from random import randint
class Pessoa:
ano_atual = 2021
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
def get_ano_nascimento(self):
print(self.ano_atual - self.idade)
# Métodos de classes não precisam receber a instância, mas sim a classe em si.
@classmethod
def por_ano_nascimento(cls, nome, ano_nascimento):
idade = cls.ano_atual - ano_nascimento
return cls(nome, idade)
# Métodos estáticos não precisam receber instância nenhuma.
@staticmethod
def gera_id():
rand = randint(10000, 19999)
return rand
p = Pessoa.por_ano_nascimento('Luiz', 1995)
print(p.nome, p.idade)
print(Pessoa.gera_id())
print(p.gera_id())
| [
"61808853+Cica013@users.noreply.github.com"
] | 61808853+Cica013@users.noreply.github.com |
7c045e6cdaa81eee4b7557bc8cd5d770c25980d1 | 482ffa5e0848030b9327eb58215f6b626f825e5d | /accounts/migrations/0003_transactions.py | 7afe9b9c6cb159569599a8e8daafa4c6def624ed | [] | no_license | syash5/Django-Banking-App | 6400d862a12d2fd4b555346722c756b6f4292e34 | 7dd9ed1286b61e1a508943b05b1616620fbf0118 | refs/heads/master | 2020-04-17T11:17:29.478938 | 2019-01-24T08:12:47 | 2019-01-24T08:12:47 | 166,534,958 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | # Generated by Django 2.1 on 2019-01-13 18:14
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
atomic = False
dependencies = [
('accounts', '0002_auto_20190112_0635'),
]
operations = [
migrations.CreateModel(
name='transactions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Diposit_amount', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator('10.00')])),
('Withdrawal_amount', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator('10.00')])),
('Interest_amount', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator('10.00')])),
('timestamp', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.UserProfile')),
],
),
]
| [
"you@example.com"
] | you@example.com |
1366562def67d68e2dab7e07c82a5d1954cd37b5 | beb9ac9ed895b375fbea240bf7d56281d6a0a481 | /20200715/test4.py | 588c3c362dc327af8a8622e5ff59c276f93f66f9 | [] | no_license | MinjeongSuh88/python_workspace | 5b0c7e2a7b3543e65df1f07066e4a52f23294ac5 | b13afdc8cf4e42496fa2b5c8df3c5effc7f7488d | refs/heads/master | 2022-11-30T11:05:52.347243 | 2020-08-14T09:04:42 | 2020-08-14T09:04:42 | 285,185,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | print('python')
# 주석 : 한줄 주석
a=10 #a변수에 10의 값을 대입한다.
b=1+1 #b변수에 1+1의 값을 대입
c=a+b #변수에 a변수의 값과 b변수의 값을 더해서 대입
print('--------------')
print('=================')
msg="문자열" #문자열은 싱글, 더블 쿼테이션으로 둘 다 묶을 수 있음
print(msg)
print('text')
print("text2")
print('''text''')
'''
#없이 문자열로 주석을 다는 경우도 있음. 어차피 출력되지 않기 때문에
코드에 설명이나 해야할 일 등을 적기도 함
나중에 코드를 볼 때 무얼하기 위해 사용한 코드인지 설명을 붙이기도 함
''' | [
"69196506+MinjeongSuh88@users.noreply.github.com"
] | 69196506+MinjeongSuh88@users.noreply.github.com |
7bfc60af5297c781220498970f2bba57d33b8fe6 | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /120.py | 665b598078de636f20809fca40b17315cda10277 | [] | no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if len(triangle)==0:
return 0
if len(triangle[0])==0:
return 0
dp=len(triangle[-1])*[0]
dp[0]=triangle[0][0]
for i in range(1,len(triangle)):
dp[i]=dp[i-1]+triangle[i][-1]
for j in list(reversed(range(1,len(triangle[i])-1))):
dp[j]=min(dp[j],dp[j-1])+triangle[i][j]
dp[0]=triangle[i][0]+dp[0]
return min(dp)
| [
"1533441387@qq.com"
] | 1533441387@qq.com |
4840a6a9eb97cf1d681992490fb82a335d0548ee | 5a25f4f5f9c7cba03f9b5848eafc01a760c88768 | /reduction/pipeline_scripts/member.uid___A001_X1296_X1d3.hifa_calimage.casa_pipescript.py | 225beed4b342927fd883933387885ab8b02b6a9f | [] | no_license | ALMA-IMF/reduction | b3579a548fe20193b807a7415a040f351c879beb | de606cc6bc542f088223ce84082ff333739c9007 | refs/heads/master | 2023-06-22T13:21:13.841999 | 2023-06-12T09:17:50 | 2023-06-12T09:17:50 | 115,018,799 | 9 | 29 | null | 2023-06-12T09:17:51 | 2017-12-21T15:13:55 | Python | UTF-8 | Python | false | false | 2,919 | py | from recipes.almahelpers import fixsyscaltimes # SACM/JAO - Fixes
__rethrow_casa_exceptions = True
context = h_init()
context.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')
context.set_state('ProjectSummary', 'piname', 'unknown')
context.set_state('ProjectSummary', 'proposal_title', 'unknown')
context.set_state('ProjectStructure', 'ous_part_id', 'X1218271487')
context.set_state('ProjectStructure', 'ous_title', 'Undefined')
context.set_state('ProjectStructure', 'ppr_file', '/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_16T04_10_47.494/SOUS_uid___A001_X1296_X1cf/GOUS_uid___A001_X1296_X1d0/MOUS_uid___A001_X1296_X1d3/working/PPR_uid___A001_X1296_X1d4.xml')
context.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')
context.set_state('ProjectStructure', 'recipe_name', 'hifa_calimage')
context.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')
context.set_state('ProjectStructure', 'ousstatus_entity_id', 'uid://A001/X1296/X1d3')
try:
hifa_importdata(vis=['uid___A002_Xcfae6a_Xe0b'], session=['session_1'])
fixsyscaltimes(vis = 'uid___A002_Xcfae6a_Xe0b.ms')# SACM/JAO - Fixes
h_save() # SACM/JAO - Finish weblog after fixes
h_init() # SACM/JAO - Restart weblog after fixes
hifa_importdata(vis=['uid___A002_Xcfae6a_Xe0b'], session=['session_1'])
hifa_flagdata(pipelinemode="automatic")
hifa_fluxcalflag(pipelinemode="automatic")
hif_rawflagchans(pipelinemode="automatic")
hif_refant(pipelinemode="automatic")
h_tsyscal(pipelinemode="automatic")
hifa_tsysflag(pipelinemode="automatic")
hifa_antpos(pipelinemode="automatic")
hifa_wvrgcalflag(pipelinemode="automatic")
hif_lowgainflag(pipelinemode="automatic")
hif_setmodels(pipelinemode="automatic")
hifa_bandpassflag(pipelinemode="automatic")
hifa_spwphaseup(pipelinemode="automatic")
hifa_gfluxscaleflag(pipelinemode="automatic")
hifa_gfluxscale(pipelinemode="automatic")
hifa_timegaincal(pipelinemode="automatic")
hif_applycal(pipelinemode="automatic")
hifa_imageprecheck(pipelinemode="automatic")
hif_makeimlist(intent='PHASE,BANDPASS,CHECK')
hif_makeimages(pipelinemode="automatic")
hif_checkproductsize(maxcubelimit=40.0, maxproductsize=400.0, maxcubesize=30.0)
hifa_exportdata(pipelinemode="automatic")
hif_mstransform(pipelinemode="automatic")
hifa_flagtargets(pipelinemode="automatic")
hif_makeimlist(specmode='mfs')
hif_findcont(pipelinemode="automatic")
hif_uvcontfit(pipelinemode="automatic")
hif_uvcontsub(pipelinemode="automatic")
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(specmode='cont')
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(pipelinemode="automatic")
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(specmode='repBW')
hif_makeimages(pipelinemode="automatic")
finally:
h_save()
| [
"keflavich@gmail.com"
] | keflavich@gmail.com |
dd760e6f9b4ff2ec08119811edbb68ae2a2b08b9 | f22d31484a12d001826c1775a6f2d245a720fce8 | /Introdução à Programação com Python/Do autor/Códigi fonte e listagem/listagem/capitulo 11/11.25 - Acessando um campo do tipo data.py | 0501a6557cd74191d14b35613f9ec6f3f88ff792 | [] | no_license | eduardoprograma/linguagem_Python | 9eb55f0a5a432a986e047b091eb7ed7152b7da67 | 942aba9146800fc33bbea98778467f837396cb93 | refs/heads/master | 2021-07-07T20:48:37.673101 | 2020-07-31T21:24:17 | 2020-07-31T21:24:17 | 159,852,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 11\11.25 - Acessando um campo do tipo data.py
##############################################################################
import sqlite3
with sqlite3.connect("brasil.db") as conexão:
for feriado in conexão.execute("select * from feriados"):
print(feriado)
| [
"eduardo.candido@fatec.sp.gov.br"
] | eduardo.candido@fatec.sp.gov.br |
6f7fae7437a7d13832deec4352974a08fee85933 | effdd4579ce829f0965e59d3282504ccdca3278e | /apps/users/models.py | a7e464a9005dd9491e878679604a3b0ee1e071fe | [
"Apache-2.0",
"MIT"
] | permissive | Liuyanzhi/OnlineMooc | 55cad27d8f2168dd6a18f850b923d6c866024c24 | 88e49e0bd8ab3002c3150b6ad8bd2a8ef7b6deb8 | refs/heads/master | 2022-11-29T12:06:02.193585 | 2020-07-29T15:30:40 | 2020-07-29T15:30:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | from django.db import models
from datetime import datetime
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
# 自定义的性别选择规则
GENDER_CHOICES = (
("male", u"男"),
("female", u"女")
)
# 昵称
nick_name = models.CharField(max_length=50, verbose_name=u"昵称", default="")
# 生日,可以为空
birthday = models.DateField(verbose_name=u"生日", null=True, blank=True)
# 性别 只能男或女,默认女
gender = models.CharField(
max_length=6,
verbose_name=u"性别",
choices=GENDER_CHOICES,
default="female")
# 地址
address = models.CharField(max_length=100, verbose_name="地址", default="")
# 电话
mobile = models.CharField(
max_length=11,
null=True,
blank=True,
verbose_name=u"电话")
# 头像 默认使用default.png
image = models.ImageField(
upload_to="image/%Y/%m",
default=u"image/default.png",
max_length=100,
verbose_name=u"头像"
)
# meta信息,即后台栏目名
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
# 重载__str__方法,打印实例会打印username,username为继承自AbstractUser
def __str__(self):
return self.username
# 获取用户未读消息的数量
def unread_nums(self):
from operation.models import UserMessage
return UserMessage.objects.filter(has_read=False, user=self.id).count()
class EmailVerifyRecord(models.Model):
"""邮箱验证码model"""
SEND_CHOICES = (
("register", u"注册"),
("forget", u"找回密码"),
("update_email", u"修改邮箱"),
)
code = models.CharField(max_length=20, verbose_name=u"验证码")
# 未设置null = true blank = true 默认不可为空
email = models.EmailField(max_length=50, verbose_name=u"邮箱")
send_type = models.CharField(
choices=SEND_CHOICES,
max_length=20,
verbose_name=u"验证码类型")
# 这里的now得去掉(),不去掉会根据编译时间。而不是根据实例化时间。
send_time = models.DateTimeField(
default=datetime.now, verbose_name=u"发送时间")
class Meta:
verbose_name = "邮箱验证码"
verbose_name_plural = verbose_name
# 重载str方法使后台不再直接显示object
def __str__(self):
return '{0}({1})'.format(self.code, self.email)
class Banner(models.Model):
"""轮播图model"""
title = models.CharField(max_length=100, verbose_name=u"标题")
image = models.ImageField(
upload_to="banner/%Y/%m",
verbose_name=u"轮播图",
max_length=100)
url = models.URLField(max_length=200, verbose_name=u"访问地址")
# 默认index很大靠后。想要靠前修改index值。
index = models.IntegerField(default=100, verbose_name=u"顺序")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"轮播图"
verbose_name_plural = verbose_name
# 重载__str__方法使后台不再直接显示object
def __str__(self):
return '{0}(位于第{1}位)'.format(self.title, self.index)
| [
"1147727180@qq.com"
] | 1147727180@qq.com |
274b7e630d7bb6048b374afa3e25f1d914cc5f4f | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-resource/azure/mgmt/resource/resources/v2016_02_01/models/deployment_extended_py3.py | 095e187951cbf55221db70bf9c50748087ff0832 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,569 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentExtended(Model):
"""Deployment information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The ID of the deployment.
:vartype id: str
:param name: Required. The name of the deployment.
:type name: str
:param properties: Deployment properties.
:type properties:
~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentPropertiesExtended
"""
_validation = {
'id': {'readonly': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DeploymentPropertiesExtended'},
}
def __init__(self, *, name: str, properties=None, **kwargs) -> None:
super(DeploymentExtended, self).__init__(**kwargs)
self.id = None
self.name = name
self.properties = properties
| [
"noreply@github.com"
] | xiafu-msft.noreply@github.com |
24bb14de8d57979dddcf4ba16b0a388f44b51dc0 | 27fd33abe12f48b0cfdafef7a624e4a96b311744 | /stock_invoice_chained_delivery/__openerp__.py | 520dac5979944730e9f09fa6a13a0d84e2876e7f | [] | no_license | mgielissen/julius-openobject-addons | 7b9966ed2894ce82b5fb396bca1bd9984f263737 | 3e35f7ba7246c54e5a5b31921b28aa5f1ab24999 | refs/heads/master | 2021-01-16T20:41:49.131160 | 2016-03-01T12:31:05 | 2016-03-01T12:31:05 | 52,984,093 | 1 | 0 | null | 2016-03-02T17:53:42 | 2016-03-02T17:53:42 | null | UTF-8 | Python | false | false | 1,456 | py | # -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Julius Network Solutions SARL <contact@julius.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
{
"name" : "Stock Picking invoice the chained delivery",
"version" : "0.1",
"author" : "Julius Network Solutions",
"website" : "http://julius.fr",
"category" : "Warehouse Management",
"depends" : [
"stock",
],
"description": """
Stock Picking invoice the chained delivery.
""",
"demo" : [],
"data" : [],
'installable' : False,
'active' : False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"mathieu@julius.fr"
] | mathieu@julius.fr |
302aad1cf1976fdcf53cdcf52935b5bf7730306f | a3f4fc66a64aaf359fbc7176b3b2335db73c5dd9 | /features/environment.py | 821222beae4c53daad3f8e45a39a3bed7f4531f2 | [] | no_license | hpifu/go-cloud | cd6b13fe0f757a5d961f8be15cbc3844d6d5282f | fd4edabddca679c19c887c8a019e1a1ecce02e0b | refs/heads/master | 2021-08-02T09:21:05.245600 | 2020-01-03T13:07:28 | 2020-01-03T13:07:28 | 203,243,843 | 0 | 0 | null | 2021-07-26T23:44:47 | 2019-08-19T20:16:13 | Go | UTF-8 | Python | false | false | 3,043 | py | #!/usr/bin/env python3
import pymysql
import redis
import subprocess
import time
import requests
import datetime
import json
import socket
from behave import *
register_type(int=int)
register_type(str=lambda x: x if x != "N/A" else "")
register_type(bool=lambda x: True if x == "true" else False)
config = {
"prefix": "output/go-cloud",
"service": {
"port": 16061,
"cookieSecure": False,
"allowOrigins": ["http://127.0.0.1:4000"],
"cookieDomain": "127.0.0.1"
},
"es": {
"uri": "http://test-elasticsearch:9200"
},
"account": {
"address": "test-go-account:16060",
"maxConn": 20,
"connTimeout": "200ms",
"recvtimeout": "200ms"
},
"mysqldb": {
"host": "test-mysql",
"port": 3306,
"user": "hatlonely",
"password": "keaiduo1",
"db": "hads"
},
"redis": {
"host": "test-redis",
"port": 6379
}
}
def wait_for_port(port, host="localhost", timeout=5.0):
start_time = time.perf_counter()
while True:
try:
with socket.create_connection((host, port), timeout=timeout):
break
except OSError as ex:
time.sleep(0.01)
if time.perf_counter() - start_time >= timeout:
raise TimeoutError("Waited too long for the port {} on host {} to start accepting connections.".format(
port, host
)) from ex
def deploy():
fp = open("{}/configs/cloud.json".format(config["prefix"]))
cf = json.loads(fp.read())
fp.close()
cf["service"]["port"] = ":{}".format(config["service"]["port"])
cf["service"]["cookieSecure"] = config["service"]["cookieSecure"]
cf["service"]["cookieDomain"] = config["service"]["cookieDomain"]
cf["service"]["allowOrigins"] = config["service"]["allowOrigins"]
cf["account"]["address"] = config["account"]["address"]
cf["es"]["uri"] = config["es"]["uri"]
print(cf)
fp = open("{}/configs/cloud.json".format(config["prefix"]), "w")
fp.write(json.dumps(cf, indent=4))
fp.close()
def start():
subprocess.Popen(
"cd {} && nohup bin/cloud &".format(config["prefix"]), shell=True
)
wait_for_port(config["service"]["port"], timeout=5)
def stop():
subprocess.getstatusoutput(
"ps aux | grep bin/cloud | grep -v grep | awk '{print $2}' | xargs kill"
)
def before_all(context):
config["url"] = "http://127.0.0.1:{}".format(config["service"]["port"])
deploy()
start()
context.config = config
context.mysql_conn = pymysql.connect(
host=config["mysqldb"]["host"],
user=config["mysqldb"]["user"],
port=config["mysqldb"]["port"],
password=config["mysqldb"]["password"],
db=config["mysqldb"]["db"],
charset="utf8",
cursorclass=pymysql.cursors.DictCursor
)
context.redis_client = redis.Redis(
config["redis"]["host"], port=6379, db=0
)
def after_all(context):
stop()
| [
"hatlonely@gmail.com"
] | hatlonely@gmail.com |
2b775a08b758b5a4abcdf87c7a14a1b6965a4cdc | 22bec7f5d2c2bc9b6fa0c23d6733bf6d81d41982 | /src/스택_큐/Level_2_주식가격.py | 7f564eab4d0a16ee36b6b487f386b67870da5a90 | [
"MIT"
] | permissive | taki0112/coding_interview | 3cb5eeb5b545cc15de84551370923e307a93293d | 06b61646c3fafb63ac74b1170a4d0a77f02231a0 | refs/heads/master | 2021-08-07T08:09:13.392815 | 2020-04-22T10:11:52 | 2020-04-22T10:11:52 | 158,521,986 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | def solution(prices):
answer = []
for i in range(len(prices) - 1) :
for j in range(i+1, len(prices)) :
if prices[i] > prices[j] :
answer.append(j - i)
break
else :
answer.append(len(prices) - 1 - i)
else :
answer.append(0)
return answer
prices = [498,501,470,489]
x = solution(prices)
print(x) | [
"takis0112@gmail.com"
] | takis0112@gmail.com |
57a213ea3ecfa383451d374b3a1edfcc716243f0 | 98590747113ca3022c67c8bc6332b2bf48d7073e | /remove_element.py | 3cf2b6fba59b3b62c77e92e65fe5b24b3c65aee4 | [] | no_license | buxizhizhoum/leetcode | a54291519a23fe82e9f9620e5a2266833696f005 | cf4235170db3629b65790fd0855a8a72ac5886f7 | refs/heads/master | 2022-06-04T02:54:26.381077 | 2022-04-01T06:58:19 | 2022-04-01T06:58:19 | 116,791,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Given an array and a value, remove all instances of that value in-place and return the new length.
Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
The order of elements can be changed. It doesn't matter what you leave beyond the new length.
Example:
Given nums = [3,2,2,3], val = 3,
Your function should return length = 2, with the first two elements of nums being 2.
"""
class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
# delete element from the end of the list
for index in range((len(nums) - 1), -1, -1):
if nums[index] == val:
del nums[index]
return len(nums)
if __name__ == "__main__":
# test_list = range(10)
test_list = [3, 2, 2, 3]
print Solution().removeElement(test_list, 3)
| [
"mapeaks@126.com"
] | mapeaks@126.com |
94cc1dad31232e4c2af4388ced4955c649c7b260 | 1287ad54942fd2020a217ab12004a541abb62558 | /pythonexercicios/Ex108/moeda.py | 0a519a376cc21a1e962759b43bc9ca7004e980db | [] | no_license | LuPessoa/exerciciospy- | 637f24581722e547a62380973ca645b55ff65d90 | b5faad818f978bb13a65922edceb17888b73a407 | refs/heads/master | 2023-05-12T04:16:39.847184 | 2021-06-04T03:02:24 | 2021-06-04T03:02:24 | 374,410,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | def aumentar(preço=0, taxa=0):
res = preço + (preço * taxa/100)
return res
def diminuir(preço=0, taxa=0):
res = preço - (preço * taxa/100)
return res
def dobro(preço=0):
res = preço * 2
return res
def metade(preço=0):
res = preço / 2
return res
def moeda(preço=0, moeda='R$'):
return f'{moeda}{preço:>.2f}'.replace('.',',') | [
"lulenemacedo29@gmail.com"
] | lulenemacedo29@gmail.com |
2372ed755ddb8bc26b62dd243e35889f5f63cb2a | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/jedi/third_party/typeshed/third_party/2and3/click/termui.pyi | 95b685076433b2403becc6ca2d84a6d9f8a366c8 | [
"MIT",
"Apache-2.0"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 3,356 | pyi | from typing import (
Any,
Callable,
Generator,
Iterable,
IO,
List,
Optional,
Text,
overload,
Tuple,
TypeVar,
)
from click.core import _ConvertibleType
from click._termui_impl import ProgressBar as _ProgressBar
def hidden_prompt_func(prompt: str) -> str:
...
def _build_prompt(
text: str,
suffix: str,
show_default: bool = ...,
default: Optional[str] = ...,
) -> str:
...
def prompt(
text: str,
default: Optional[str] = ...,
hide_input: bool = ...,
confirmation_prompt: bool = ...,
type: Optional[_ConvertibleType] = ...,
value_proc: Optional[Callable[[Optional[str]], Any]] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
show_choices: bool = ...,
) -> Any:
...
def confirm(
text: str,
default: bool = ...,
abort: bool = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
) -> bool:
...
def get_terminal_size() -> Tuple[int, int]:
...
def echo_via_pager(text: str, color: Optional[bool] = ...) -> None:
...
_T = TypeVar('_T')
@overload
def progressbar(
iterable: Iterable[_T],
length: Optional[int] = ...,
label: Optional[str] = ...,
show_eta: bool = ...,
show_percent: Optional[bool] = ...,
show_pos: bool = ...,
item_show_func: Optional[Callable[[_T], str]] = ...,
fill_char: str = ...,
empty_char: str = ...,
bar_template: str = ...,
info_sep: str = ...,
width: int = ...,
file: Optional[IO] = ...,
color: Optional[bool] = ...,
) -> _ProgressBar[_T]:
...
@overload
def progressbar(
iterable: None = ...,
length: Optional[int] = ...,
label: Optional[str] = ...,
show_eta: bool = ...,
show_percent: Optional[bool] = ...,
show_pos: bool = ...,
item_show_func: Optional[Callable[[_T], str]] = ...,
fill_char: str = ...,
empty_char: str = ...,
bar_template: str = ...,
info_sep: str = ...,
width: int = ...,
file: Optional[IO] = ...,
color: Optional[bool] = ...,
) -> _ProgressBar[int]:
...
def clear() -> None:
...
def style(
text: str,
fg: Optional[str] = ...,
bg: Optional[str] = ...,
bold: Optional[bool] = ...,
dim: Optional[bool] = ...,
underline: Optional[bool] = ...,
blink: Optional[bool] = ...,
reverse: Optional[bool] = ...,
reset: bool = ...,
) -> str:
...
def unstyle(text: str) -> str:
...
# Styling options copied from style() for nicer type checking.
def secho(
text: str,
file: Optional[IO] = ...,
nl: bool = ...,
err: bool = ...,
color: Optional[bool] = ...,
fg: Optional[str] = ...,
bg: Optional[str] = ...,
bold: Optional[bool] = ...,
dim: Optional[bool] = ...,
underline: Optional[bool] = ...,
blink: Optional[bool] = ...,
reverse: Optional[bool] = ...,
reset: bool = ...,
):
...
def edit(
text: Optional[str] = ...,
editor: Optional[str] = ...,
env: Optional[str] = ...,
require_save: bool = ...,
extension: str = ...,
filename: Optional[str] = ...,
) -> str:
...
def launch(url: str, wait: bool = ..., locate: bool = ...) -> int:
...
def getchar(echo: bool = ...) -> Text:
...
def pause(
info: str = ..., err: bool = ...
) -> None:
...
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
d7f559bd279f22f7496147ac75813dd0b5c527ec | 21590487701d2dcbe1a1c1dd81c6e983f7523cb6 | /opentelemetry-proto/src/opentelemetry/proto/common/v1/common_pb2.pyi | 304feec5abb7924061a837d4a64891a288db52b4 | [
"Apache-2.0"
] | permissive | open-telemetry/opentelemetry-python | 837199e541c03cff311cad075401791ee2a23583 | d8490c5f557dd7005badeb800095cb51b553c98c | refs/heads/main | 2023-08-26T06:47:23.837997 | 2023-08-17T22:35:13 | 2023-08-17T22:35:13 | 185,478,926 | 1,361 | 668 | Apache-2.0 | 2023-09-14T20:48:40 | 2019-05-07T21:13:30 | Python | UTF-8 | Python | false | false | 6,760 | pyi | """
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import typing
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor = ...
class AnyValue(google.protobuf.message.Message):
"""AnyValue is used to represent any type of attribute value. AnyValue may contain a
primitive value such as a string or integer or it may contain an arbitrary nested
object containing arrays, key-value lists and primitives.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
STRING_VALUE_FIELD_NUMBER: builtins.int
BOOL_VALUE_FIELD_NUMBER: builtins.int
INT_VALUE_FIELD_NUMBER: builtins.int
DOUBLE_VALUE_FIELD_NUMBER: builtins.int
ARRAY_VALUE_FIELD_NUMBER: builtins.int
KVLIST_VALUE_FIELD_NUMBER: builtins.int
BYTES_VALUE_FIELD_NUMBER: builtins.int
string_value: typing.Text = ...
bool_value: builtins.bool = ...
int_value: builtins.int = ...
double_value: builtins.float = ...
@property
def array_value(self) -> global___ArrayValue: ...
@property
def kvlist_value(self) -> global___KeyValueList: ...
bytes_value: builtins.bytes = ...
def __init__(self,
*,
string_value : typing.Text = ...,
bool_value : builtins.bool = ...,
int_value : builtins.int = ...,
double_value : builtins.float = ...,
array_value : typing.Optional[global___ArrayValue] = ...,
kvlist_value : typing.Optional[global___KeyValueList] = ...,
bytes_value : builtins.bytes = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["array_value",b"array_value","bool_value",b"bool_value","bytes_value",b"bytes_value","double_value",b"double_value","int_value",b"int_value","kvlist_value",b"kvlist_value","string_value",b"string_value","value",b"value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["array_value",b"array_value","bool_value",b"bool_value","bytes_value",b"bytes_value","double_value",b"double_value","int_value",b"int_value","kvlist_value",b"kvlist_value","string_value",b"string_value","value",b"value"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["string_value","bool_value","int_value","double_value","array_value","kvlist_value","bytes_value"]]: ...
global___AnyValue = AnyValue
class ArrayValue(google.protobuf.message.Message):
"""ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
since oneof in AnyValue does not allow repeated fields.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
VALUES_FIELD_NUMBER: builtins.int
@property
def values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AnyValue]:
"""Array of values. The array may be empty (contain 0 elements)."""
pass
def __init__(self,
*,
values : typing.Optional[typing.Iterable[global___AnyValue]] = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["values",b"values"]) -> None: ...
global___ArrayValue = ArrayValue
class KeyValueList(google.protobuf.message.Message):
"""KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
are semantically equivalent.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
VALUES_FIELD_NUMBER: builtins.int
@property
def values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KeyValue]:
"""A collection of key/value pairs of key-value pairs. The list may be empty (may
contain 0 elements).
The keys MUST be unique (it is not allowed to have more than one
value with the same key).
"""
pass
def __init__(self,
*,
values : typing.Optional[typing.Iterable[global___KeyValue]] = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["values",b"values"]) -> None: ...
global___KeyValueList = KeyValueList
class KeyValue(google.protobuf.message.Message):
"""KeyValue is a key-value pair that is used to store Span attributes, Link
attributes, etc.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: typing.Text = ...
@property
def value(self) -> global___AnyValue: ...
def __init__(self,
*,
key : typing.Text = ...,
value : typing.Optional[global___AnyValue] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ...
global___KeyValue = KeyValue
class InstrumentationScope(google.protobuf.message.Message):
"""InstrumentationScope is a message representing the instrumentation scope information
such as the fully qualified name and version.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
NAME_FIELD_NUMBER: builtins.int
VERSION_FIELD_NUMBER: builtins.int
ATTRIBUTES_FIELD_NUMBER: builtins.int
DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int
name: typing.Text = ...
"""An empty instrumentation scope name means the name is unknown."""
version: typing.Text = ...
@property
def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KeyValue]:
"""Additional attributes that describe the scope. [Optional].
Attribute keys MUST be unique (it is not allowed to have more than one
attribute with the same key).
"""
pass
dropped_attributes_count: builtins.int = ...
def __init__(self,
*,
name : typing.Text = ...,
version : typing.Text = ...,
attributes : typing.Optional[typing.Iterable[global___KeyValue]] = ...,
dropped_attributes_count : builtins.int = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["attributes",b"attributes","dropped_attributes_count",b"dropped_attributes_count","name",b"name","version",b"version"]) -> None: ...
global___InstrumentationScope = InstrumentationScope
| [
"noreply@github.com"
] | open-telemetry.noreply@github.com |
97563f2780dada381587ccbf25d41f2ad572c094 | 07eaef75c6bc0066d56a8810711e82b0e8b01dda | /options/option_generation/MOMI.py | 45e9f0749ce75b147a73a712866dff4b238e63c4 | [] | no_license | jinnaiyuu/Optimal-Options-ICML-2019 | eb10da610d8ad7828f364c1bdb2e058aa35e7d65 | 4f5cd1776b47f9b16c1022d22b2cc91d6044775b | refs/heads/master | 2021-06-21T06:54:41.968578 | 2021-02-18T09:14:09 | 2021-02-18T09:14:09 | 186,303,067 | 13 | 3 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | #!/usr/bin/env python
# Libraries
import numpy as np
from options.graph.set_cover import SC_APPROX, SC_APPROX2, SC_OPT
from options.util import DeriveGraph
def MOMI(mdp, distance, l, solver):
X = DeriveGraph(distance, l - 1) + np.identity(distance.shape[0])
# Remove states which is already reachable within l steps
xg = []
for s in range(X.shape[0]):
if all(X[s] <= l):
xg.append(s)
if solver == 'chvatal':
print("MOMI(l =", l, ", chvatal)")
C = SC_APPROX2(X.transpose())
elif solver == 'hochbaum':
print("MOMI(l =", l, ", hochbaum)")
C = SC_APPROX(X)
elif solver == 'optimal':
print("MOMI(l =", l, ", OPT)")
C = SC_OPT(X.transpose())
else:
print('unknown solver for set cover', approx)
assert(False)
exit(0)
return C
if __name__ == "__main__":
pass
| [
"ddyuudd@gmail.com"
] | ddyuudd@gmail.com |
46c77cb564c9a110aa518a527e32b9cf01996707 | 35d16ac49032083cafbc8304aebaf462d5346808 | /server/utils.py | 156d104b0b830c9816696397e17076e04be03ac4 | [
"MIT"
] | permissive | panuta/wealth-crawler-advanced | 48c601b29c505f1f31d48a98bbf60c8032136232 | 3b1bfb7f5efd9080514fa40ecdc1325f02f1a78f | refs/heads/master | 2021-04-06T19:31:24.255296 | 2018-03-15T14:56:36 | 2018-03-15T14:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | import sys
def load_module_from_file(module_name, filepath, sys_path=None):
if sys_path:
sys.path.insert(0, sys_path)
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, filepath)
cls = importlib.util.module_from_spec(spec)
spec.loader.exec_module(cls)
if sys_path:
sys.path.remove(sys_path)
return cls
def datetime_to_str(dt):
return dt.strftime('%Y-%m-%dT%H:%M')
def merge_dict(existing_dict, new_dict):
for config_name, config_value in new_dict.items():
existing_dict[config_name] = config_value
return existing_dict
def crontab_hour_to_utc(crontab_hour, timezone):
import re
rebuild_hour_items = []
for hour_item in re.split(r'([-,])', crontab_hour):
if hour_item in ['-', ',']:
rebuild_hour_items.append(hour_item)
else:
try:
hour_num = int(hour_item)
except ValueError:
# Error, return original
return crontab_hour
utc_hour = hour_num - timezone
if utc_hour < 0:
utc_hour = utc_hour + 24
rebuild_hour_items.append(str(utc_hour))
return ''.join(rebuild_hour_items)
| [
"panuta@gmail.com"
] | panuta@gmail.com |
8ed83e1c7ee7d3368da4cd31284945c72863762f | 40b27bdd261a0d8a9e100bc4e83c9f76b9ef710e | /contests/ABC1-100/ABC100/d.py | 68f44a57690db03c79955432af5eba4426879e22 | [] | no_license | kouma1990/AtCoder-code-collection | 486d612ae1def6df49f4aa3632e06aae7ff73d2f | a3040a6025b43fb7dd3522945dce05a2626a92aa | refs/heads/master | 2020-04-16T22:42:39.023009 | 2019-08-29T07:05:43 | 2019-08-29T07:05:43 | 165,980,129 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | n, m = (int(i) for i in input().split())
xyz = [[int(i) for i in input().split()] for i in range(n)]
res = 0
for i in [-1, 1]:
for j in [-1, 1]:
for k in [-1, 1]:
l = []
for x,y,z in xyz:
l.append(x*i + y*j + z*k)
res = max(res, abs(sum(sorted(l, reverse=True)[:m])))
print(res) | [
"kouma1990@gmail.com"
] | kouma1990@gmail.com |
b5e452698c80343c60fc868829630680ebdc41e0 | d3440843f0b3ed85a41e1697ed9862d50b763056 | /8.Regression/test.py | 32267e2fb85c94b891cb888205bae41c09a44ead | [] | no_license | keepingoner/ml | 6f2d800b9e37a6324b2e2e10edd9d64b1bad6fb2 | 0b0091f08c1f77ec0fd176aa3375ada4153d8732 | refs/heads/master | 2020-04-02T16:11:10.616674 | 2018-11-05T05:47:01 | 2018-11-05T05:47:01 | 154,601,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | # -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.model_selection import GridSearchCV
data = pd.read_csv('8.Advertising.csv')
x = data[['TV', 'Radio', 'Newspaper']]
y = data['Sales']
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
# model = Lasso()
model = Ridge()
alpha_can = np.logspace(-3, 2, 10)
print(alpha_can)
lasso_model = GridSearchCV(model, param_grid={'alpha': alpha_can}, cv=5)
lasso_model.fit(x, y)
print '验证参数:\n', lasso_model.best_params_
y_hat = lasso_model.predict(np.array(x_test))
mse = np.average((y_hat - np.array(y_test)) ** 2) # Mean Squared Error
rmse = np.sqrt(mse) # Root Mean Squared Error
print mse, rmse
# 预测准确率
print("预测准确率{}".format(lasso_model.score(x_test, y_test)))
# 交叉验证中最好的结果
print("交叉验证中最好的结果{}".format(lasso_model.best_score_))
# 最好的模型
print("最好的模型{}".format(lasso_model.best_estimator_))
# 每个k的验证结果
print("每个k的验证结果{}".format(lasso_model.cv_results_))
t = np.arange(len(x_test))
plt.plot(t, y_test, 'r-', linewidth=2, label='Test')
plt.plot(t, y_hat, 'g-', linewidth=2, label='Predict')
plt.legend(loc='upper right')
plt.grid()
plt.show()
| [
"keepingoner@163.com"
] | keepingoner@163.com |
f4aeca722de031bfba81b683aa5645494895f05c | 01d982d22d214265eeb7a00b2b8bdd8c869d9064 | /tests/test_invest.py | 316b3bc47753e1d729297ad3a606d0768ec3a132 | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | hkotaro1215/invest | ad6874ea1a9ac73813292fb88c138d13279988b5 | 1ba08bd746977bfa8a4600ad8c821fc43598c421 | refs/heads/master | 2022-11-12T06:06:22.826122 | 2018-03-26T21:08:18 | 2018-03-26T21:08:18 | 142,378,565 | 0 | 1 | NOASSERTION | 2022-10-15T06:47:29 | 2018-07-26T02:36:20 | Python | UTF-8 | Python | false | false | 2,321 | py | """General InVEST tests."""
import unittest
import os
class FileRegistryTests(unittest.TestCase):
"""Tests for the InVEST file registry builder."""
def test_build_file_registry_duplicate_paths(self):
"""InVEST test that file registry recognizes duplicate paths."""
from natcap.invest import utils
with self.assertRaises(ValueError):
utils.build_file_registry(
[({'a': 'a.tif'}, ''), ({'b': 'a.tif'}, '')], '')
def test_build_file_registry_duplicate_keys(self):
"""InVEST test that file registry recognizes duplicate keys."""
from natcap.invest import utils
with self.assertRaises(ValueError):
utils.build_file_registry(
[({'a': 'a.tif'}, ''), ({'a': 'b.tif'}, '')], '')
def test_build_file_registry(self):
"""InVEST test a complicated file registry creation."""
from natcap.invest import utils
dict_a = {
'a': 'aggregated_results.shp',
'b': 'P.tif',
'': 'CN.tif',
'l_avail_path': ''}
dict_b = {
'apple': '.shp',
'bear': 'tif',
'cat': 'CN.tif'}
dict_c = {}
result = utils.build_file_registry(
[(dict_a, ''), (dict_b, 'foo'), (dict_c, 'garbage')], '')
expected_dict = {
'a': 'aggregated_results.shp',
'b': 'P.tif',
'': 'CN.tif',
'l_avail_path': '',
'apple': os.path.join('foo', '.shp'),
'bear': os.path.join('foo', 'tif'),
'cat': os.path.join('foo', 'CN.tif'),
}
unexpected_paths = []
for key, result_path in expected_dict.iteritems():
expected_path = os.path.normpath(result[key])
if os.path.normpath(result_path) != expected_path:
unexpected_paths.append(
(key, expected_path, os.path.normpath(result_path)))
extra_keys = set(result.keys()).difference(set(expected_dict.keys()))
if len(unexpected_paths) > 0 or len(extra_keys) > 0:
raise AssertionError(
"Unexpected paths or keys: %s %s" % (
str(unexpected_paths), str(extra_keys)))
| [
"richpsharp@gmail.com"
] | richpsharp@gmail.com |
7e80b0a0be78686787eaafec4793b508eea9b27d | b332e9e5b63db27b23250ddbbb85b470ceaf92a1 | /List/minSwaps.py | e7f6e4eead0c4705df276774356d0ebc8d20dc2a | [] | no_license | huangketsudou/leetcode_python | 66fcc695b0a4f94a35cc52e161ae4bfdb1138dc2 | e983f42d245b69f9bddd9855f51ee59648a2039e | refs/heads/master | 2021-08-07T23:25:45.532458 | 2020-08-23T06:15:22 | 2020-08-23T06:15:22 | 214,324,229 | 2 | 0 | null | 2020-04-12T14:40:47 | 2019-10-11T02:16:43 | Python | UTF-8 | Python | false | false | 973 | py | from typing import List
class Solution:
def minSwaps(self, grid: List[List[int]]) -> int:
if not grid: return 0
# 统计每一行 从右向左连续0的个数
n = len(grid)
zero_nums = []
for i in range(n):
j = n - 1
while j >= 0 and grid[i][j] == 0: j -= 1
zero_nums.append(n - 1 - j)
# 贪心算法,从上到下查找满足条件的最小下标,即为交换到当前行的次数
cnt = 0
for i in range(n - 1):
need_zeros = n - 1 - i
j = i
while j < len(zero_nums) and zero_nums[j] < need_zeros: j += 1
# 没找到则说明不满足条件
if j == len(zero_nums): return -1
# 增加交换次数
cnt += j - i
# 交换数值
while j > i:
zero_nums[j], zero_nums[j-1]= zero_nums[j-1], zero_nums[j]
j -= 1
return cnt
| [
"1941161938@qq.com"
] | 1941161938@qq.com |
f99682103fd1863b63d36bb3fd3f33ba90d0dd06 | d4224cb20c48933909fc2a1834b75f4f062bd3c9 | /google_or_tools/who_killed_agatha.py | 4003b896e3136b99551cfd6911f118f9296d3f80 | [] | no_license | ajgappmark/hakank | dfe3255fd9d0bcdeb2e3eef7ad68d3428b0dc9f2 | 7c4265d109cfc3f1bf379c1140d434ccf537f982 | refs/heads/master | 2020-05-18T10:33:07.592353 | 2014-08-17T19:34:39 | 2014-08-17T19:34:39 | 23,218,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,967 | py | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Who killed agatha? (The Dreadsbury Mansion Murder Mystery) in Google CP Solver.
This is a standard benchmark for theorem proving.
http://www.lsv.ens-cachan.fr/~goubault/H1.dist/H1.1/Doc/h1003.html
'''
Someone in Dreadsbury Mansion killed Aunt Agatha.
Agatha, the butler, and Charles live in Dreadsbury Mansion, and
are the only ones to live there. A killer always hates, and is no
richer than his victim. Charles hates noone that Agatha hates. Agatha
hates everybody except the butler. The butler hates everyone not richer
than Aunt Agatha. The butler hates everyone whom Agatha hates.
Noone hates everyone. Who killed Agatha?
'''
Originally from F. J. Pelletier:
Seventy-five problems for testing automatic theorem provers.
Journal of Automated Reasoning, 2: 216, 1986.
Note1: Since Google CP Solver/Pythons (currently) don't have
special support for logical operations on decision
variables (i.e. ->, <->, and, or, etc), this model
use some IP modeling tricks.
Note2: There are 8 different solutions, all stating that Agatha
killed herself
Compare with the following models:
* Choco : http://www.hakank.org/choco/WhoKilledAgatha.java
* Choco : http://www.hakank.org/choco/WhoKilledAgatha_element.java
* Comet : http://www.hakank.org/comet/who_killed_agatha.co
* ECLiPSE : http://www.hakank.org/eclipse/who_killed_agatha.ecl
* Gecode : http://www.hakank.org/gecode/who_killed_agatha.cpp
* JaCoP : http://www.hakank.org/JaCoP/WhoKilledAgatha.java
* JaCoP : http://www.hakank.org/JaCoP/WhoKilledAgatha_element.java
* MiniZinc: http://www.hakank.org/minizinc/who_killed_agatha.mzn
* Tailor/Essence': http://www.hakank.org/tailor/who_killed_agatha.eprime
* SICStus : http://hakank.org/sicstus/who_killed_agatha.pl
* Zinc :http://hakank.org/minizinc/who_killed_agatha.zinc
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/
"""
from collections import defaultdict
from constraint_solver import pywrapcp
def var_matrix_array(solver, rows, cols, lb, ub, name):
x = []
for i in range(rows):
t = []
for j in range(cols):
t.append(solver.IntVar(lb, ub, '%s[%i,%i]'%(name, i,j)))
x.append(t)
return x
def flatten_matrix(solver, m, rows, cols):
return [m[i][j] for i in range(rows) for j in range(cols)]
def print_flat_matrix(m_flat, rows, cols):
for i in range(rows):
for j in range(cols):
print m_flat[i*cols+j].Value(),
print
print
def main(the_killers):
# Create the solver.
solver = pywrapcp.Solver('Who killed agatha?')
#
# data
#
n = 3
agatha = 0
butler = 1
charles = 2
#
# declare variables
#
the_killer = solver.IntVar(0,2, 'the_killer')
the_victim = solver.IntVar(0,2, 'the_victim' )
hates = var_matrix_array(solver, n, n, 0, 1, 'hates')
richer = var_matrix_array(solver, n, n, 0, 1, 'richer')
hates_flat = flatten_matrix(solver, hates, n, n)
richer_flat = flatten_matrix(solver, richer, n, n)
#
# constraints
#
# Agatha, the butler, and Charles live in Dreadsbury Mansion, and
# are the only ones to live there.
# A killer always hates, and is no richer than his victim.
# solver.Add(hates[the_killer, the_victim] == 1)
solver.Add(solver.Element(hates_flat,the_killer*n+the_victim) == 1)
# solver.Add(richer[the_killer, the_victim] == 0)
solver.Add(solver.Element(richer_flat,the_killer*n+the_victim) == 0)
# define the concept of richer: no one is richer than him-/herself
for i in range(n):
solver.Add(richer[i][i] == 0)
# (contd...) if i is richer than j then j is not richer than i
# (i != j) => (richer[i,j] = 1) <=> (richer[j,i] = 0),
for i in range(n):
for j in range(n):
if i != j:
solver.Add((richer[i][j] == 1) == (richer[j][i] == 0))
# Charles hates noone that Agatha hates.
#forall i : Range .
# (hates[agatha, i] = 1) => (hates[charles, i] = 0),
for i in range(n):
solver.Add((hates[agatha][i]==1) <= (hates[charles][i] == 0))
# Agatha hates everybody except the butler.
solver.Add(hates[agatha][charles] == 1)
solver.Add(hates[agatha][agatha] == 1)
solver.Add(hates[agatha][butler] == 0)
# The butler hates everyone not richer than Aunt Agatha.
# forall i : Range .
# (richer[i, agatha] = 0) => (hates[butler, i] = 1),
for i in range(n):
solver.Add((richer[i][agatha]==0) <= (hates[butler][i]==1))
# The butler hates everyone whom Agatha hates.
#forall i : Range .
# (hates[agatha, i] = 1) => (hates[butler, i] = 1),
for i in range(n):
solver.Add((hates[agatha][i]==1) <= (hates[butler][i]==1))
# Noone hates everyone.
# forall i : Range .
# (sum j : Range . hates[i,j]) <= 2,
for i in range(n):
solver.Add(solver.Sum([hates[i][j] for j in range(n)]) <= 2)
# Who killed Agatha?
solver.Add(the_victim == agatha)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(the_killer)
solution.Add(the_victim)
solution.Add(hates_flat)
solution.Add(richer_flat)
# db: DecisionBuilder
db = solver.Phase(hates_flat + richer_flat,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print "the_killer:", the_killer.Value()
the_killers[the_killer.Value()] += 1
print "the_victim:", the_victim.Value()
print "hates:"
print_flat_matrix(hates_flat,n,n)
print "richer:"
print_flat_matrix(richer_flat,n,n)
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
the_killers = defaultdict(int)
p = ["agatha", "butler", "charles"]
if __name__ == '__main__':
main(the_killers)
print "\n"
for k in the_killers:
print "the killer %s was choosen in %i solutions" % (p[k], the_killers[k])
| [
"hakank@gmail.com"
] | hakank@gmail.com |
3490eb215b290ce4f27cdec5797fab3a54c5595b | 9d30115d59ed821a5c7aecf2318b5e0ed22c9676 | /src/codewars/python/6kyu/alphabet_position.py | ea394f02b1c84aeb367855d718532ac9ca2829c7 | [] | no_license | garigari-kun/til | 02c7bf05274d1077b454e1f7d4a7355849441524 | b71f36a66045ab7da7f4a97f7e18de2aaa05f493 | refs/heads/master | 2020-04-16T02:13:45.727909 | 2018-12-16T01:26:40 | 2018-12-16T01:26:40 | 56,369,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | """
Welcome.
In this kata you are required to, given a string, replace every letter with its position in the alphabet.
If anything in the text isn't a letter, ignore it and don't return it.
a being 1, b being 2, etc.
As an example:
alphabet_position("The sunset sets at twelve o' clock.")
Should return "20 8 5 19 21 14 19 5 20 19 5 20 19 1 20 20 23 5 12 22 5 15 3 12 15 3 11" as a string.
"""
import string
def alphabet_position(text):
alphabets = list(string.ascii_letters)
pos_list = []
for ch in text:
if ch in alphabets:
pos = alphabets.index(ch.lower())
# index is started at 0 so need to add 1 for pos index
pos_list.append(str(pos+1))
return ' '.join(pos_list)
if __name__ == '__main__':
print(alphabet_position("The sunset sets at twelve o' clock."))
| [
"keisuke.cs@gmail.com"
] | keisuke.cs@gmail.com |
7cdc9598584590dacb3d6b8a0f07716b5b178462 | 97a09265d7898765a3f561c1b4a12e5b46346db8 | /30DaysOfCode/day27_testing.py | cdd71b0cc82b218eaa507470534ddd6b8be43b44 | [] | no_license | 14E47/Hackerrank | 35e7b5520fe00ae98377624b8429d42d237cbd46 | c2af2fa7ee49c2a94304ee543900425f5a3b6551 | refs/heads/master | 2020-03-26T21:22:45.492630 | 2019-10-04T03:37:14 | 2019-10-04T03:37:14 | 145,384,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
# complete this function
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
# complete this function
return [5, 7, 2, 4]
@staticmethod
def get_expected_result():
# complete this function
return 2
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
# complete this function
return [5, 4, 2, 3, 2, 7]
@staticmethod
def get_expected_result():
# complete this function
return 2
| [
"wasique3@gmail.com"
] | wasique3@gmail.com |
7ab151207135bb1d3da3bcd2c20b4b0233a5da8d | e821f62aead9a6a4911435224ecf3ff9ccb2be96 | /CNN/tok.py | 76f589c72efc2394d98d31273775f77d2e6868c5 | [] | no_license | dagrawa2/toxic_comments | 799bcaabf8d8bf461fd5a011e2fc124379d021ea | aaccdc3184b48ff6086093a70cda9bbd20ff0f02 | refs/heads/master | 2022-12-28T23:03:03.139883 | 2018-04-29T21:00:00 | 2018-04-29T21:00:00 | 302,200,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | import pickle
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
num_words = 10000
data = pd.read_csv("../Data/train.csv")
comments = data["comment_text"].tolist()
T = Tokenizer(num_words=num_words)
T.fit_on_texts(comments)
X_train = T.texts_to_sequences(comments)
with open("Objects/X_train.list", "wb") as fp:
pickle.dump(X_train, fp)
Y_train = data.iloc[:,2:].as_matrix()
np.save("Objects/Y_train.npy", Y_train)
data = pd.read_csv("../Data/test.csv")
comments = data["comment_text"].tolist()
X_test = T.texts_to_sequences(comments)
with open("Objects/X_test.list", "wb") as fp:
pickle.dump(X_test, fp)
with open("Objects/T.tok", "wb") as fp:
pickle.dump(T, fp)
labels = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
with open("Objects/labels.list", "wb") as fp:
pickle.dump(labels, fp)
| [
"agrawald@goldmail.etsu.edu"
] | agrawald@goldmail.etsu.edu |
8722b14a680985dc607c5e90cf3a7732dc440d27 | f8d2521a88e465eed01adc3981c7a173d5c2554b | /round/round0401-0425/round0408/a1.py | 74f23d763b4d58d544c619ea8758163784228ab0 | [] | no_license | clarinet758/codeforces | b2a8a349bba40e7761a8ce50dd5ff9a57477b60d | d79870c47bdb109547891a0d076dd173d6d647cf | refs/heads/main | 2021-12-15T05:46:51.000160 | 2021-12-01T12:01:33 | 2021-12-01T12:01:33 | 41,968,658 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #!/usr/bin/env pypy3
# -*- coding: UTF-8 -*-
n,m,k=map(int,input().split())
a=[int(i) for i in input().split()]
ans=100
for x,i in enumerate(range(m,n,1)):
if a[i]>0 and a[i]<=k:
ans=x+1
break
for y,j in enumerate(range(m-2,-1,-1)):
if a[j]>0 and a[j]<=k:
ans=min(ans,y+1)
break
print(ans*10)
| [
"clarinet758@gmail.com"
] | clarinet758@gmail.com |
230a5484a1735e92d8f44f8d59512c0924044e05 | 6bf4867b690f59a77f7caddc1238c3bae6b3e1c3 | /rally/benchmark/scenarios/sahara/utils.py | c0acb424077e0f750aafd32e32798f1cb9f58cd6 | [
"Apache-2.0"
] | permissive | kambiz-aghaiepour/rally | 641c044cc24c10eb15e4d6b4ab3bc4885779e076 | be708bacf0bc898a9538b9b6cb0ba4e1c015c1f2 | refs/heads/master | 2021-01-15T19:35:15.318291 | 2014-08-18T23:51:30 | 2014-08-18T23:51:30 | 23,090,342 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,610 | py | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from saharaclient.api import base as sahara_base
from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
from rally.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CREATE_CLUSTER_OPTS = [
cfg.IntOpt("cluster_create_timeout", default=600,
help="A timeout in seconds for a cluster create operation"),
cfg.IntOpt("cluster_check_interval", default=5,
help="Cluster status polling interval in seconds")
]
benchmark_group = cfg.OptGroup(name='benchmark', title='benchmark options')
CONF.register_opts(CREATE_CLUSTER_OPTS, group=benchmark_group)
class SaharaScenario(base.Scenario):
RESOURCE_NAME_LENGTH = 20
# TODO(nkonovalov): Add other provisioning plugins
NODE_PROCESSES = {
"vanilla": {
"1.2.1": {
"master": ["namenode", "jobtracker"],
"worker": ["datanode", "tasktracker"]
},
"2.3.0": {
"master": ["namenode", "resourcemanager", "historyserver"],
"worker": ["datanode", "nodemanager"]
}
}
}
REPLICATION_CONFIGS = {
"vanilla": {
"1.2.1": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.3.0": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
@base.atomic_action_timer('sahara.list_node_group_templates')
def _list_node_group_templates(self):
"""Returns user Node Group Templates list."""
return self.clients("sahara").node_group_templates.list()
@base.atomic_action_timer('sahara.create_master_node_group_template')
def _create_master_node_group_template(self, flavor_id, plugin_name,
hadoop_version):
"""Creates a master Node Group Template with a random name.
:param flavor_id: The required argument for the Template
:param plugin_name: Sahara provisioning plugin name
:param hadoop_version: The version of Hadoop distribution supported by
the plugin
:return: The created Template
"""
name = self._generate_random_name(prefix="master-ngt-")
return self.clients("sahara").node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
node_processes=self.NODE_PROCESSES[plugin_name][hadoop_version]
["master"])
@base.atomic_action_timer('sahara.create_worker_node_group_template')
def _create_worker_node_group_template(self, flavor_id, plugin_name,
hadoop_version):
"""Creates a worker Node Group Template with a random name.
:param flavor_id: The required argument for the Template
:param plugin_name: Sahara provisioning plugin name
:param hadoop_version: The version of Hadoop distribution supported by
the plugin
:return: The created Template
"""
name = self._generate_random_name(prefix="worker-ngt-")
return self.clients("sahara").node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
node_processes=self.NODE_PROCESSES[plugin_name][hadoop_version]
["worker"])
@base.atomic_action_timer('sahara.delete_node_group_template')
def _delete_node_group_template(self, node_group):
"""Deletes a Node Group Template by id.
:param node_group: The Node Group Template to be deleted
:return:
"""
self.clients("sahara").node_group_templates.delete(node_group.id)
@base.atomic_action_timer('sahara.launch_cluster')
def _launch_cluster(self, plugin_name, hadoop_version, flavor_id,
image_id, node_count):
"""Creates a cluster and wait until it becomes Active.
The cluster is created with two node groups. The master Node Group is
created with one instance. The worker node group contains
node_count - 1 instances.
:param plugin_name: The provisioning plugin name
:param hadoop_version: Hadoop version supported by the plugin
:param flavor_id: The flavor which will be used to create instances
:param image_id: The image id that will be used to boot instances
:param node_count: The total number of instances. 1 master node, others
for the workers
:return: The created cluster
"""
node_groups = [
{
"name": "master-ng",
"flavor_id": flavor_id,
"node_processes": self.NODE_PROCESSES[plugin_name]
[hadoop_version]["master"],
"count": 1
}, {
"name": "worker-ng",
"flavor_id": flavor_id,
"node_processes": self.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"],
"count": node_count - 1
}
]
name = self._generate_random_name(prefix="sahara-cluster-")
replication_value = min(node_count - 1, 3)
# 3 is a default Hadoop replication
conf = self.REPLICATION_CONFIGS[plugin_name][hadoop_version]
LOG.debug("Using replication factor: %s" % replication_value)
cluster_object = self.clients("sahara").clusters.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
node_groups=node_groups,
default_image_id=image_id,
cluster_configs={conf["target"]: {
conf["config_name"]: replication_value}
}
)
def is_active(cluster_id):
return self.clients("sahara").clusters.get(
cluster_id).status.lower() == "active"
bench_utils.wait_for(
resource=cluster_object.id, is_ready=is_active,
timeout=CONF.benchmark.cluster_create_timeout,
check_interval=CONF.benchmark.cluster_check_interval)
return self.clients("sahara").clusters.get(cluster_object.id)
@base.atomic_action_timer('sahara.delete_cluster')
def _delete_cluster(self, cluster):
"""Calls a Cluster delete by id and waits for complete deletion.
:param cluster: The Cluster to be deleted
:return:
"""
self.clients("sahara").clusters.delete(cluster.id)
def is_deleted(cl_id):
try:
self.clients("sahara").clusters.get(cl_id)
return False
except sahara_base.APIException:
return True
bench_utils.wait_for(resource=cluster.id, is_ready=is_deleted)
| [
"nkonovalov@mirantis.com"
] | nkonovalov@mirantis.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.