blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f9d62aa2da06ea70094554246c08561f472dcb1 | b5a2aff7633943eda013175c170edfb24e7151cd | /auth-api/migrations/versions/2023_07_12_13121bcf368a_affilation_invitations.py | 4e475a8ceb825ad9403256ffe76a83352827b9c5 | [
"Apache-2.0"
] | permissive | bcgov/sbc-auth | 2b6bbc09b731edc0c986a261ade625d4f8ad344c | 923cb8a3ee88dcbaf0fe800ca70022b3c13c1d01 | refs/heads/main | 2023-08-07T18:41:18.624886 | 2023-08-04T21:58:30 | 2023-08-04T21:58:30 | 168,407,060 | 13 | 127 | Apache-2.0 | 2023-09-14T21:54:11 | 2019-01-30T20:03:54 | Python | UTF-8 | Python | false | false | 3,123 | py | """affilation_invitations
Revision ID: 13121bcf368a
Revises: d53a79e9cc89
Create Date: 2023-07-12 14:23:09.044117
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '13121bcf368a'
down_revision = 'd53a79e9cc89'
branch_labels = None
depends_on = None
def upgrade():
affiliation_invitation_type_table = op.create_table('affiliation_invitation_types',
sa.Column('code', sa.String(length=15), nullable=False),
sa.Column('description', sa.String(length=100), nullable=False),
sa.Column('default', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('code')
)
op.bulk_insert(
affiliation_invitation_type_table,
[
{'code': 'EMAIL', 'description': 'An affiliation invitation initiated through email on file for an entity', 'default': True},
{'code': 'PASSCODE', 'description': 'An affiliation invitation initiated through a valid passcode for an entity', 'default': False}
]
)
op.create_table('affiliation_invitations',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('from_org_id', sa.Integer(), nullable=False),
sa.Column('to_org_id', sa.Integer(), nullable=False),
sa.Column('entity_id', sa.Integer(), nullable=False),
sa.Column('affiliation_id', sa.Integer(), nullable=True),
sa.Column('sender_id', sa.Integer(), nullable=False),
sa.Column('approver_id', sa.Integer(), nullable=True),
sa.Column('recipient_email', sa.String(length=100), nullable=False),
sa.Column('sent_date', sa.DateTime(), nullable=False),
sa.Column('accepted_date', sa.DateTime(), nullable=True),
sa.Column('token', sa.String(length=150), nullable=True),
sa.Column('invitation_status_code', sa.String(length=15), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('created_by_id', sa.Integer(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('login_source', sa.String(length=20), nullable=True),
sa.Column('type', sa.String(length=15), nullable=False),
sa.ForeignKeyConstraint(['created_by_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['modified_by_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['invitation_status_code'], ['invitation_statuses.code'], ),
sa.ForeignKeyConstraint(['sender_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['approver_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['from_org_id'], ['orgs.id'], ),
sa.ForeignKeyConstraint(['to_org_id'], ['orgs.id'], ),
sa.ForeignKeyConstraint(['entity_id'], ['entities.id'], ),
sa.ForeignKeyConstraint(['type'], ['affiliation_invitation_types.code'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('affiliation_invitations')
op.drop_table('affiliation_invitation_types')
| [
"noreply@github.com"
] | bcgov.noreply@github.com |
0f0915539bc2c348b99db0c151703a827352700d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit1970.py | c64955e26b46381d388da22726e8a1db51f4cdbc | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,881 | py | # qubit number=4
# total number=31
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=28
prog.x(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=30
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.y(input_qubit[1]) # number=19
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[3]) # number=20
prog.y(input_qubit[1]) # number=12
prog.rx(-2.158274153016188,input_qubit[3]) # number=24
prog.h(input_qubit[0]) # number=16
prog.cz(input_qubit[2],input_qubit[0]) # number=17
prog.h(input_qubit[0]) # number=18
prog.cx(input_qubit[1],input_qubit[0]) # number=21
prog.z(input_qubit[1]) # number=22
prog.cx(input_qubit[1],input_qubit[0]) # number=23
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[2],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1970.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
c3614c7f16d58de96e7bedd2b614ed8ebe0e7908 | bcd6de19314b6c895caa8697261a1324afed30ab | /Numpy/NumpyRandom/Exercise2.py | 524feeffd60efea372712cbc57a90fa41ebe8d1f | [] | no_license | ErenBtrk/Python-Fundamentals | 2f84c146a1123692181b27aa87547dc13b6b3ad4 | 97cb5fea19c9f17af14c36a646c044ce22cfb058 | refs/heads/master | 2023-08-29T10:38:11.652809 | 2021-10-30T13:22:27 | 2021-10-30T13:22:27 | 398,052,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | '''
2. Write a NumPy program to generate six random integers between 10 and 30.
'''
import numpy as np
np_array = np.random.randint(10,30,6)
print(np_array) | [
"="
] | = |
f96eb8dbb3d2cfaa07c2822db4f73ccfa1989ae0 | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/python/kernel_tests/string_join_op_test.py | 9a0c3c5dd8c36e679023dcb740fa59e868df7650 | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_join_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringJoinOpTest(test.TestCase):
def testStringJoin(self):
input0 = ["a", "b"]
input1 = "a"
input2 = [["b"], ["c"]]
with self.test_session():
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
string_ops.string_join([input0, input2]).eval()
if __name__ == "__main__":
test.main()
| [
"hanshuobest@163.com"
] | hanshuobest@163.com |
5e5b973c105628eeef0fadb521fdad9df59abc3c | a8289cb7273245e7ec1e6079c7f266db4d38c03f | /Django_Ninja_myFirst/djangonautic/djangonautic/settings.py | 2d37838eee629288565d08c59f2c3ed5d0e56a5e | [] | no_license | palmarytech/Python_Snippet | 6acbd572d939bc9d5d765800f35a0204bc044708 | 41b4ebe15509d166c82edd23b713a1f3bf0458c5 | refs/heads/master | 2022-10-06T22:51:00.469383 | 2020-03-13T08:32:11 | 2020-03-13T08:32:11 | 272,350,189 | 1 | 0 | null | 2020-06-15T05:30:44 | 2020-06-15T05:30:44 | null | UTF-8 | Python | false | false | 3,299 | py | """
Django settings for djangonautic project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l&bf_yq5lh^jmpk*kghlr7ok)!$t3!dw@bdx80l)77dq83+yxn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"articles",
"accounts.apps.AccountsConfig",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangonautic.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangonautic.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "assets"),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
| [
"leamon.lee13@gmail.com"
] | leamon.lee13@gmail.com |
f9b7f2a6703e75d3dd2514c1de417aea71516645 | 7174b27cd79cad398ffa1add9b59da6e9adbeae4 | /python_algorithm/02_sort_algorithm_second/05_merge_sort_second.py | dafbfc57b5f73a3e8206cd5f693a5eee4564f450 | [] | no_license | UULIN/py | ddf037021afce04e46d51c133bfa06257ef7200a | a5d32597fc91fbd5ec41f54fb942c82300766299 | refs/heads/master | 2021-07-18T08:20:49.342072 | 2020-10-21T14:41:42 | 2020-10-21T14:41:42 | 222,977,134 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # 归并排序 时间复杂度O(nlogn),算法为稳定的算法
def merge_sort(list):
mid = len(list) // 2
if len(list) <= 1:
return list
return merge(merge_sort(list[:mid]), merge_sort(list[mid:]))
def merge(left, right):
# 设定游标i,j
i = j = 0
temp = []
while i < len(left) and j < len(right):
if left[i] < right[j]:
temp.append(left[i])
i += 1
else:
temp.append(right[j])
j += 1
temp += left[i:]
temp += right[j:]
return temp
if __name__ == '__main__':
list = [44,2,12,5,222,54]
print(merge_sort(list)) | [
"1036190402@qq.com"
] | 1036190402@qq.com |
7411b7839ff70ca1d62490f2a7ecff2155421bd4 | 3da45c63f83b5acac33da5be8d300b384721c26b | /evolution/evolution_019/examples/simple/simple.py | 323e6053295c28eef1c11d1606958fc10ab61be7 | [] | no_license | lanzhiwang/japronto_evolution | f18a9fb46c12add9336838c9d716d427a7845eac | 3948c213d1465f8ad82ad8d21555cf70d9323e16 | refs/heads/master | 2021-06-28T02:20:39.589983 | 2019-11-11T11:28:16 | 2019-11-11T11:28:16 | 217,689,114 | 1 | 0 | null | 2021-02-26T03:06:20 | 2019-10-26T09:56:09 | C | UTF-8 | Python | false | false | 1,673 | py | import asyncio
import uvloop
import argparse
import os.path
import sys
import socket
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + '/../..'))
import protocol.handler
from router.cmatcher import Matcher
from router import Router
from app import Application
def slash(request):
return request.Response(text='Hello slash!')
def hello(request):
return request.Response(text='Hello hello!')
async def sleep(request):
await asyncio.sleep(3)
return request.Response(text='I am sleepy')
async def loop(request):
i = 0
while i < 10:
await asyncio.sleep(1)
print(i)
i += 1
return request.Response(text='Loop finished')
def dump(request):
sock = request.transport.get_extra_info('socket')
no_delay = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
text = """
Method: {0.method}
Path: {0.path}
Version: {0.version}
Headers: {0.headers}
Match: {0.match_dict}
Body: {0.body}
QS: {0.query_string}
query: {0.query}
mime_type: {0.mime_type}
encoding: {0.encoding}
form: {0.form}
keep_alive: {0.keep_alive}
no_delay: {1}
""".strip().format(request, no_delay)
return request.Response(text=text, headers={'X-Version': '123'})
app = Application()
r = app.get_router()
r.add_route('/', slash)
r.add_route('/hello', hello)
r.add_route('/dump/{this}/{that}', dump)
r.add_route('/sleep/{pinch}', sleep)
r.add_route('/loop', loop)
if __name__ == '__main__':
argparser = argparse.ArgumentParser('server')
argparser.add_argument(
'-p', dest='flavor', default='block')
args = argparser.parse_args(sys.argv[1:])
app.serve(protocol.handler.make_class(args.flavor))
| [
"hzhilamp@163.com"
] | hzhilamp@163.com |
0b8c7992c57b64ed235b8758e9031b6bc070b0e4 | 135f293948b38aa8eaa6ac31dde79f8bc091fe5b | /Estruturas de repetição em Python/Exercicio6.py | 1b6f04289d83f3d6c0ad823072233fd274246f12 | [] | no_license | Kaiquenakao/Python | 70e42b5fee08a3b50c34913f2f5763f07e754c25 | 008eefce828522b5107917dc699fc3590c7ef52c | refs/heads/master | 2023-08-12T17:03:24.317996 | 2021-09-25T22:31:46 | 2021-09-25T22:31:46 | 279,083,005 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | """
6. Faça um programa que leia 10 inteiros e imprima a sua média.
"""
import statistics
lista = []
for num in range(1,10+1):
numero = int(input(f'Posição{num}:digite o seu número:'))
lista.append(numero)
res = statistics.mean(lista)
print(res) | [
"noreply@github.com"
] | Kaiquenakao.noreply@github.com |
592bf070a40a376611e2edb7073c328b0f797fea | d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c | /colossalai/auto_parallel/tensor_shard/node_handler/view_handler.py | 7dff89d1d7a39a6e4fe73514bfb16abe2e3e7bea | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | hpcaitech/ColossalAI | a082ed08a3807b53c49d1f86835b9808590d9042 | c7b60f75470f067d1342705708810a660eabd684 | refs/heads/main | 2023-09-01T04:13:13.834565 | 2023-08-30T15:07:21 | 2023-08-30T15:07:21 | 422,274,596 | 32,044 | 4,084 | Apache-2.0 | 2023-09-14T15:19:54 | 2021-10-28T16:19:44 | Python | UTF-8 | Python | false | false | 1,963 | py | from typing import Dict, List
import torch
from ..sharding_strategy import OperationData, OperationDataType
from .node_handler import NodeHandler
from .registry import operator_registry
from .strategy import StrategyGenerator, ViewGenerator
__all__ = ['ViewHandler']
@operator_registry.register(torch.Tensor.reshape)
@operator_registry.register(torch.reshape)
@operator_registry.register(torch.Tensor.view)
class ViewHandler(NodeHandler):
"""
A ViewHandler which deals with the sharding strategies for Reshape Op, such as torch.reshape.
"""
def get_strategy_generator(self) -> List[StrategyGenerator]:
op_data_mapping = self.get_operation_data_mapping()
generators = []
generators.append(ViewGenerator(op_data_mapping, self.device_mesh, self.node.args[0]))
return generators
def get_operation_data_mapping(self) -> Dict[str, OperationData]:
# use transposed shape for strategies
# the strategies will be transformed back to its original shape in self.post_process
# check if the input operand is a parameter
if isinstance(self.node.args[0]._meta_data, torch.nn.parameter.Parameter):
data_type = OperationDataType.PARAM
else:
data_type = OperationDataType.ARG
input_data = self.node.args[0]._meta_data
physical_input_operand = OperationData(name=str(self.node.args[0]), type=data_type, data=input_data)
target_shape = self.node._meta_data.shape
physical_shape_operand = OperationData(name='tgt_shape', type=OperationDataType.ARG, data=target_shape)
output_data = self.node._meta_data
physical_output_operand = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=output_data)
mapping = {
"input": physical_input_operand,
"tgt_shape": physical_shape_operand,
"output": physical_output_operand
}
return mapping
| [
"noreply@github.com"
] | hpcaitech.noreply@github.com |
3da1e5262469df9c14cd952f0747b4e3bef46952 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /v2k5hKnb4d5srYFvE_4.py | 5b7640777bb53834f2eb74dae83f1e5af5b779fc | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | """
The function is given a string containing digits from `2` to `9`. Return a set
of all possible letter combinations that could represent the digit-string.
### Digits to Letters Mapping
d = { "2": "abc", "3": "def", "4": "ghi", "5": "jkl", "6": "mno", "7": "pqrs", "8": "tuv", "9": "wxyz" }
### Examples
letters_combinations("23") ➞ { "ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf" }
letters_combinations("") ➞ set()
letters_combinations("2") ➞ { "a", "b", "c" }
### Notes
N/A
"""
from itertools import product as PD
def letters_combinations(digits):
d = { "2": "abc", "3": "def", "4": "ghi", "5": "jkl", "6": "mno", "7": "pqrs", "8": "tuv", "9": "wxyz" }
A=[d[x] for x in digits]
if len(digits)>0:
return {''.join(list(x)) for x in PD(*A)}
else:
return set()
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
4488f1ad332e4e5b0aa60af09fa2aea20a06dc43 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /qt__pyqt__pyside__pyqode/QImageReader.supportedImageFormats.py | cc5d7e46505ef0e11a9fbb95760f2868b47f8933 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtGui import QImageReader
print(
list(map(lambda x: x.data().decode(), QImageReader.supportedImageFormats()))
)
# ['bmp', 'cur', 'gif', 'icns', 'ico', 'jpeg', 'jpg', 'pbm', 'pgm', 'png', 'ppm', 'svg', 'svgz', 'tga', 'tif', 'tiff', 'wbmp', 'webp', 'xbm', 'xpm']
| [
"ilya.petrash@inbox.ru"
] | ilya.petrash@inbox.ru |
c89128f095fb57cf33b927c35a9545a9bfcbfba0 | 46aa6f17c8c37a2c9e9a4fc7dad71ed2424f9a4c | /cvbase/io.py | 4e1bfacec3f5c1115efc1d107839236124e9277b | [] | no_license | standardgalactic/cvbase | 216d5946acc13a1a53f26f0168d4aa1d8e56a76e | e48d2f76aaeea6472b2175c37780f7b90ae7f1b6 | refs/heads/master | 2023-03-19T07:36:25.891140 | 2018-10-12T16:58:24 | 2018-10-12T16:58:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,906 | py | import json
import os
import sys
try:
import cPickle as pickle
except:
import pickle
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from multiprocessing import Process, Queue
from os import path
def json_load(file):
if isinstance(file, str):
with open(file, 'r') as f:
obj = json.load(f)
elif hasattr(file, 'read'):
obj = json.load(file)
else:
raise TypeError('"file" must be a filename str or a file-object')
return obj
def json_dump(obj, file=None, **kwargs):
if file is None:
return json.dumps(obj, **kwargs)
elif isinstance(file, str):
with open(file, 'w') as f:
json.dump(obj, f, **kwargs)
elif hasattr(file, 'write'):
json.dump(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
def yaml_load(file, **kwargs):
kwargs.setdefault('Loader', Loader)
if isinstance(file, str):
with open(file, 'r') as f:
obj = yaml.load(f, **kwargs)
elif hasattr(file, 'read'):
obj = yaml.load(file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
return obj
def yaml_dump(obj, file=None, **kwargs):
kwargs.setdefault('Dumper', Dumper)
if file is None:
return yaml.dump(obj, **kwargs)
elif isinstance(file, str):
with open(file, 'w') as f:
yaml.dump(obj, f, **kwargs)
elif hasattr(file, 'write'):
yaml.dump(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
def pickle_load(file, **kwargs):
if isinstance(file, str):
with open(file, 'rb') as f:
obj = pickle.load(f, **kwargs)
elif hasattr(file, 'read'):
obj = pickle.load(file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
return obj
def pickle_dump(obj, file=None, **kwargs):
kwargs.setdefault('protocol', 2)
if file is None:
return pickle.dumps(obj, **kwargs)
elif isinstance(file, str):
with open(file, 'wb') as f:
pickle.dump(obj, f, **kwargs)
elif hasattr(file, 'write'):
pickle.dump(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
def load(file, format=None, **kwargs):
"""Load contents from json/yaml/pickle files, and also supports
custom arguments for each file format.
This method provides a unified api for loading from serialized files.
Args:
file(str or file-like object): filename or the file-like object
format(None or str): if it is None, file format is inferred from the
file extension, otherwise use the specified one.
Currently supported formats are "json", "yaml",
"yml", "pickle" and "pkl"
Returns:
The content from the file
"""
processors = {
'json': json_load,
'yaml': yaml_load,
'yml': yaml_load,
'pickle': pickle_load,
'pkl': pickle_load
}
if format is None and isinstance(file, str):
format = file.split('.')[-1]
if format not in processors:
raise TypeError('Unsupported format: ' + format)
return processors[format](file, **kwargs)
def dump(obj, file=None, format=None, **kwargs):
"""Dump contents to json/yaml/pickle strings or files.
This method provides a unified api for dumping to files, and also supports
custom arguments for each file format.
Args:
file(None or str or file-like object): if None, then dump to a str,
otherwise to a file specified by the filename or file-like object
obj(any): the python object to be dumped
format(None or str): same as :func:`load`
Returns:
bool: True for success, False otherwise
"""
processors = {
'json': json_dump,
'yaml': yaml_dump,
'yml': yaml_dump,
'pickle': pickle_dump,
'pkl': pickle_dump
}
if format is None:
if isinstance(file, str):
format = file.split('.')[-1]
elif file is None:
raise ValueError('format must be specified')
if format not in processors:
raise TypeError('Unsupported format: ' + format)
return processors[format](obj, file, **kwargs)
def list_from_file(filename, prefix='', offset=0, max_num=0):
cnt = 0
item_list = []
with open(filename, 'r') as f:
for _ in range(offset):
f.readline()
for line in f:
if max_num > 0 and cnt >= max_num:
break
item_list.append(prefix + line.rstrip('\n'))
cnt += 1
return item_list
def dict_from_file(filename, key_type=str):
mapping = {}
with open(filename, 'r') as f:
for line in f:
items = line.rstrip('\n').split(' ')
assert len(items) >= 2
key = key_type(items[0])
val = items[1:] if len(items) > 2 else items[1]
mapping[key] = val
return mapping
class AsyncDumper(Process):
def __init__(self):
self._io_queue = Queue()
super(AsyncDumper, self).__init__()
def run(self):
while True:
data, out_file = self._io_queue.get()
if data is None:
break
pickle_dump(data, out_file)
def dump(self, obj, filename):
self._io_queue.put((obj, filename))
def check_file_exist(filename, msg_tmpl='file "{}" not exist:'):
if not path.isfile(filename):
if sys.version_info > (3, 3):
raise FileNotFoundError(msg_tmpl.format(filename))
else:
raise IOError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name):
if not path.isdir(dir_name):
os.makedirs(dir_name)
def _scandir_py35(dir_path, suffix=None):
for entry in os.scandir(dir_path):
if not entry.is_file():
continue
filename = entry.name
if suffix is None:
yield filename
elif filename.endswith(suffix):
yield filename
def _scandir_py(dir_path, suffix=None):
for filename in os.listdir(dir_path):
if not path.isfile(path.join(dir_path, filename)):
continue
if suffix is None:
yield filename
elif filename.endswith(suffix):
yield filename
def scandir(dir_path, suffix=None):
if suffix is not None and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
if sys.version[0] == 3 and sys.version[1] >= 5:
return _scandir_py35(dir_path, suffix)
else:
return _scandir_py(dir_path, suffix)
| [
"chenkaidev@gmail.com"
] | chenkaidev@gmail.com |
d019ed1143b156e9db31274659858439040608b0 | 57e35b1595483ae7b3ce248d7b24214111d00bc8 | /tests/tet_inheritance_customizing.py | b743774179ecfd78bd0c4bc9299116649cc494fe | [
"MIT"
] | permissive | bnzk/django-svg-tag | 68814a2057cb0361d7f31d8dedd33ddc2142c07a | 638f00091cb34d3ccb514c472a8f9ed546c6a949 | refs/heads/master | 2021-06-18T00:10:39.799499 | 2019-09-16T16:36:48 | 2019-09-16T16:36:48 | 145,836,201 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,144 | py | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from selenium.webdriver.support.expected_conditions import visibility_of
from selenium.webdriver.support.wait import WebDriverWait
from djangocms_baseplugins.tests.utils.django_utils import create_superuser
from djangocms_baseplugins.tests.utils.selenium_utils import SeleniumTestCase, CustomWebDriver, \
invisibility_of
from djangocms_baseplugins.tests.test_app.models import TestModelSingle, TestModelAdvanced
class FormFieldStashAdminTests(SeleniumTestCase):
def setUp(self):
self.single_empty = TestModelSingle()
self.single_empty.save()
self.single = TestModelSingle(**{'selection': 'octopus', })
self.single.save()
self.advanced_empty = TestModelAdvanced()
self.advanced_empty.save()
self.advanced = TestModelAdvanced(**{'set': 'set1', })
self.advanced.save()
self.superuser = create_superuser()
# Instantiating the WebDriver will load your browser
self.wd = CustomWebDriver()
def tearDown(self):
self.wd.quit()
def test_app_index_get(self):
self.login()
self.open(reverse('admin:index'))
self.wd.find_css(".app-test_app")
def test_single_stash_empty(self):
self.login()
self.open(reverse('admin:test_app_testmodelsingle_change', args=[self.single_empty.id]))
horse = self.wd.find_css("div.field-horse")
# why wait? widget init delays initialization for 20ms, for other widgets to initialize.
wait = WebDriverWait(self.wd, 1)
wait.until(invisibility_of(horse))
# self.assertFalse(horse.is_displayed())
bear = self.wd.find_css("div.field-bear")
wait.until(invisibility_of(bear))
# self.assertFalse(bear.is_displayed())
octo = self.wd.find_css("div.field-octopus")
wait.until(invisibility_of(octo))
# self.assertFalse(octo.is_displayed())
def test_single_stash(self):
self.login()
self.open(reverse('admin:test_app_testmodelsingle_change', args=[self.single.id]))
horse = self.wd.find_css("div.field-horse")
wait = WebDriverWait(self.wd, 1)
wait.until(invisibility_of(horse))
# self.assertFalse(horse.is_displayed())
bear = self.wd.find_css("div.field-bear")
wait.until(invisibility_of(bear))
# self.assertFalse(bear.is_displayed())
octo = self.wd.find_css("div.field-octopus")
wait.until(visibility_of(octo))
# self.assertTrue(octo.is_displayed())
# change select value
self.wd.find_css("div.field-selection select > option[value=horse]").click()
horse = self.wd.find_css("div.field-horse")
self.assertTrue(horse.is_displayed())
octo = self.wd.find_css("div.field-octopus")
self.assertFalse(octo.is_displayed())
def test_multi_stash_empty(self):
self.login()
self.open(reverse('admin:test_app_testmodeladvanced_change', args=[self.advanced_empty.id]))
inline = self.wd.find_css("#testinlinemodel_set-group")
wait = WebDriverWait(self.wd, 1)
wait.until(invisibility_of(inline))
# self.assertFalse(inline.is_displayed())
f11 = self.wd.find_css("div.field-set1_1")
wait.until(invisibility_of(f11))
# self.assertFalse(f11.is_displayed())
f31 = self.wd.find_css("div.field-set3_1")
wait.until(invisibility_of(f31))
# self.assertFalse(f31.is_displayed())
def test_multi_stash(self):
self.login()
self.open(reverse('admin:test_app_testmodeladvanced_change', args=[self.advanced.id]))
inline = self.wd.find_css("#testinlinemodel_set-group")
wait = WebDriverWait(self.wd, 1)
wait.until(visibility_of(inline))
# self.assertTrue(inline.is_displayed())
f11 = self.wd.find_css("div.field-set1_1")
wait.until(visibility_of(f11))
# self.assertTrue(f11.is_displayed())
f31 = self.wd.find_css("div.field-set3_1")
wait.until(invisibility_of(f31))
# self.assertFalse(f31.is_displayed())
| [
"bnzk@bnzk.ch"
] | bnzk@bnzk.ch |
8b78350da9457b861ef7bd05ee58e3fafe4f9d12 | a2ed68f3ccd04ab77912601740a3ad2ff8514c9f | /www/site/webroot/app/controller/__init__.py | f1d6f97bb349c37d3ecc9c024ccad6aa9c282fc2 | [] | no_license | lincolnhuang/www-tp4a-com | 1701c757a9d3469fa52bf9445b4557292ca87b73 | 49de46741ff43fed510653d1bd2b2fb7778c47ef | refs/heads/master | 2023-04-07T15:39:42.869839 | 2021-04-15T12:03:05 | 2021-04-15T12:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | # -*- coding: utf-8 -*-
from . import download
from . import support
from . import index
from . import donate
from . import util
__all__ = ['controllers']
controllers = [
(r'/', index.IndexHandler),
# ====================================================
# 下载页面
# ====================================================
(r'/download/', download.IndexHandler),
(r'/download', download.IndexHandler),
(r'/download/old', download.OldHandler),
(r'/download/refresh', download.RefreshHandler),
(r'/download/refresh-old', download.RefreshOldHandler),
(r'/download/get-file/(.*)', download.GetFileHandler),
# ====================================================
# 捐助页面
# ====================================================
(r'/donate/', donate.IndexHandler),
(r'/donate', donate.IndexHandler),
# ====================================================
# 支持页面
# ====================================================
(r'/support/', support.IndexHandler),
(r'/support', support.IndexHandler),
(r'/util/myip/(.*)/(.*)', util.MyIpHandler),
# 最后加入一个 catch all 的handler,展示404错误
(r'/.*', index.CatchAllHandler),
]
| [
"apex.liu@qq.com"
] | apex.liu@qq.com |
a16d08cd7865026335a2f835ad573cba8f62b30d | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/DeeplabV3_for_Pytorch/configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py | 7ac5db1493bf51068871fffb4456d8054c307791 | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,044 | py | # Copyright 2021 Huawei
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_base_ = [
'../_base_/models/upernet_swin.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
pretrained=\
'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth', # noqa
backbone=dict(
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
use_abs_pos_embed=False,
drop_path_rate=0.3,
patch_norm=True,
pretrain_style='official'),
decode_head=dict(in_channels=[96, 192, 384, 768], num_classes=150),
auxiliary_head=dict(in_channels=384, num_classes=150))
# AdamW optimizer, no weight decay for position embedding & layer norm
# in backbone
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(
_delete_=True,
policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0,
min_lr=0.0,
by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(samples_per_gpu=2)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
3bfcc4ee3ccb467ff52a7e4705fa06b8973d0d66 | df11a41c67dae050e4b11026e92d914c02141d79 | /spinoffs/oryx/oryx/bijectors/__init__.py | 449240cd826b7b53ee7797fea84742c6b3eb9fe0 | [
"Apache-2.0"
] | permissive | frankfan007/probability | 65824ed405e134c0d496befb47a50f181864ac1d | 257e614b4b658f48aea32f8be9721077d5acdf5c | refs/heads/master | 2022-11-08T07:35:43.534546 | 2020-06-26T21:42:43 | 2020-06-26T21:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for probability bijectors and related functions."""
import inspect
from tensorflow_probability.python.experimental.substrates import jax as tfp
from oryx.bijectors import bijector_extensions
tfb = tfp.bijectors
_bijectors = {}
for name in dir(tfb):
bij = getattr(tfb, name)
if inspect.isclass(bij) and issubclass(bij, tfb.Bijector):
if bij is not tfb.Bijector:
bij = bijector_extensions.make_type(bij)
_bijectors[name] = bij
for key, val in _bijectors.items():
locals()[key] = val
del _bijectors
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
bc652ec645dff6961d5dd2ac88a1811ec724b9e0 | eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd | /homeassistant/components/yamaha_musiccast/__init__.py | d984aaceb96f9f32ea3889a425b31b2058a8666f | [
"Apache-2.0"
] | permissive | JeffLIrion/home-assistant | 53966b81b5d5816679f12fc761f79e8777c738d6 | 8f4ec89be6c2505d8a59eee44de335abe308ac9f | refs/heads/dev | 2023-08-22T09:42:02.399277 | 2022-02-16T01:26:13 | 2022-02-16T01:26:13 | 136,679,169 | 5 | 2 | Apache-2.0 | 2023-09-13T06:59:25 | 2018-06-09T00:58:35 | Python | UTF-8 | Python | false | false | 7,680 | py | """The MusicCast integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from aiomusiccast import MusicCastConnectionException
from aiomusiccast.capabilities import Capability
from aiomusiccast.musiccast_device import MusicCastData, MusicCastDevice
from homeassistant.components import ssdp
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC, format_mac
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
BRAND,
CONF_SERIAL,
CONF_UPNP_DESC,
DEFAULT_ZONE,
DOMAIN,
ENTITY_CATEGORY_MAPPING,
)
PLATFORMS = [Platform.MEDIA_PLAYER, Platform.NUMBER, Platform.SELECT]
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=60)
async def get_upnp_desc(hass: HomeAssistant, host: str):
"""Get the upnp description URL for a given host, using the SSPD scanner."""
ssdp_entries = await ssdp.async_get_discovery_info_by_st(hass, "upnp:rootdevice")
matches = [w for w in ssdp_entries if w.ssdp_headers.get("_host", "") == host]
upnp_desc = None
for match in matches:
if upnp_desc := match.ssdp_location:
break
if not upnp_desc:
_LOGGER.warning(
"The upnp_description was not found automatically, setting a default one"
)
upnp_desc = f"http://{host}:49154/MediaRenderer/desc.xml"
return upnp_desc
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up MusicCast from a config entry."""
if entry.data.get(CONF_UPNP_DESC) is None:
hass.config_entries.async_update_entry(
entry,
data={
CONF_HOST: entry.data[CONF_HOST],
CONF_SERIAL: entry.data["serial"],
CONF_UPNP_DESC: await get_upnp_desc(hass, entry.data[CONF_HOST]),
},
)
client = MusicCastDevice(
entry.data[CONF_HOST],
async_get_clientsession(hass),
entry.data[CONF_UPNP_DESC],
)
coordinator = MusicCastDataUpdateCoordinator(hass, client=client)
await coordinator.async_config_entry_first_refresh()
coordinator.musiccast.build_capabilities()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
await coordinator.musiccast.device.enable_polling()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][entry.entry_id].musiccast.device.disable_polling()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Reload config entry."""
await hass.config_entries.async_reload(entry.entry_id)
class MusicCastDataUpdateCoordinator(DataUpdateCoordinator[MusicCastData]):
"""Class to manage fetching data from the API."""
def __init__(self, hass: HomeAssistant, client: MusicCastDevice) -> None:
"""Initialize."""
self.musiccast = client
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)
self.entities: list[MusicCastDeviceEntity] = []
async def _async_update_data(self) -> MusicCastData:
"""Update data via library."""
try:
await self.musiccast.fetch()
except MusicCastConnectionException as exception:
raise UpdateFailed() from exception
return self.musiccast.data
class MusicCastEntity(CoordinatorEntity):
"""Defines a base MusicCast entity."""
coordinator: MusicCastDataUpdateCoordinator
def __init__(
self,
*,
name: str,
icon: str,
coordinator: MusicCastDataUpdateCoordinator,
enabled_default: bool = True,
) -> None:
"""Initialize the MusicCast entity."""
super().__init__(coordinator)
self._enabled_default = enabled_default
self._icon = icon
self._name = name
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
class MusicCastDeviceEntity(MusicCastEntity):
"""Defines a MusicCast device entity."""
_zone_id: str = DEFAULT_ZONE
@property
def device_id(self):
"""Return the ID of the current device."""
if self._zone_id == DEFAULT_ZONE:
return self.coordinator.data.device_id
return f"{self.coordinator.data.device_id}_{self._zone_id}"
@property
def device_name(self):
"""Return the name of the current device."""
return self.coordinator.data.zones[self._zone_id].name
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this MusicCast device."""
device_info = DeviceInfo(
name=self.device_name,
identifiers={
(
DOMAIN,
self.device_id,
)
},
manufacturer=BRAND,
model=self.coordinator.data.model_name,
sw_version=self.coordinator.data.system_version,
)
if self._zone_id == DEFAULT_ZONE:
device_info["connections"] = {
(CONNECTION_NETWORK_MAC, format_mac(mac))
for mac in self.coordinator.data.mac_addresses.values()
}
else:
device_info["via_device"] = (DOMAIN, self.coordinator.data.device_id)
return device_info
async def async_added_to_hass(self):
"""Run when this Entity has been added to HA."""
await super().async_added_to_hass()
# All entities should register callbacks to update HA when their state changes
self.coordinator.musiccast.register_callback(self.async_write_ha_state)
async def async_will_remove_from_hass(self):
"""Entity being removed from hass."""
await super().async_will_remove_from_hass()
self.coordinator.musiccast.remove_callback(self.async_write_ha_state)
class MusicCastCapabilityEntity(MusicCastDeviceEntity):
"""Base Entity type for all capabilities."""
def __init__(
self,
coordinator: MusicCastDataUpdateCoordinator,
capability: Capability,
zone_id: str = None,
) -> None:
"""Initialize a capability based entity."""
if zone_id is not None:
self._zone_id = zone_id
self.capability = capability
super().__init__(name=capability.name, icon="", coordinator=coordinator)
self._attr_entity_category = ENTITY_CATEGORY_MAPPING.get(capability.entity_type)
@property
def unique_id(self) -> str:
"""Return the unique ID for this entity."""
return f"{self.device_id}_{self.capability.id}"
| [
"noreply@github.com"
] | JeffLIrion.noreply@github.com |
a4bec651ffed468d41715fb02ab6959c496ee803 | e014e5324ac8c05f14fc96227295dbf2007054e5 | /2018/delete_inteface_18058.py | a1481b2476747ba5d4a6c69ad8dddeb3144cd99a | [] | no_license | analylx/Py_script | 6fa75c02caa53d57c3ffdf5f098de43a62b38346 | 68098875a2fb3668e4d15ba11079de6adebc6b7f | refs/heads/master | 2020-06-22T11:29:47.660370 | 2019-07-19T06:18:00 | 2019-07-19T06:18:00 | 197,707,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | import time
import os
path1 = os.path.dirname(__file__)
path2 = os.path.abspath(path1)
file_path = path2 + "\\script\\"+ os.path.basename(__file__) + time.strftime('%Y-%m-%d_%H_%M_%S',time.localtime(time.time()))+".ps"
def do_write(unit):
with open(file_path, 'a') as fi:
fi.write(""" delete interfaces ge-ts14/2 unit {0}
""".format(unit))
for unit in range(2000,2254):
do_write(unit) | [
"a322494@ecitele.com"
] | a322494@ecitele.com |
d1d80510462153641b3db531d5ee83d2bbdb7e9e | aff88e0922ae5c75f18b624cb1c81c263d12f2af | /demo1.py | ca6ff371ae12957bb3281cbb1a7044d31462a285 | [] | no_license | TianJin85/Qtwindow | 44f42c8972382bcdbde7bc26a4a7f5121736e0aa | 3af712d8528d825cb3cecd6bc21c8f836232e775 | refs/heads/master | 2020-10-01T23:05:30.271773 | 2020-02-09T14:39:12 | 2020-02-09T14:39:12 | 227,642,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from functools import reduce
import multiprocessing as mp
def power(x):
return pow(x, 2)
if __name__ == '__main__':
with mp.Pool(processes=4) as mpp:
print(reduce(lambda a,b:a+b, mpp.map(power, range(1000000)), 0))
| [
"307440205@qq.com"
] | 307440205@qq.com |
e5c0da5013ff544c40fa5ea29e684739ca379b5b | ade7b1699bbe32d028c5de31ce46456279495be3 | /hr_ykp_appraisal/models/survey.py | bfcb1bf292ae9dac2be6d950a6aacaa751c7d75f | [] | no_license | Jacky-odoo/HRIS-YKP-Flectra | 1a3806f672cfb85fc5ccdb2aae03875dc323a2a3 | 3528dd0d7999f66ed1f891fc09a9fd19b54568f2 | refs/heads/main | 2023-06-22T18:23:52.746293 | 2021-07-16T06:48:37 | 2021-07-16T06:48:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,263 | py | import datetime
from flectra import models, fields, api, exceptions
class survey_department(models.Model):
_inherit = 'survey.survey'
survey_department = fields.Boolean('Survey Department ?')
survey_department_pengurus = fields.Boolean('Survey Department ?')
class survey_department_input_employee(models.Model):
_inherit = 'survey.user_input'
semester = fields.Selection([
('semester_1', 'Semester 1'),
('semester_2', 'Semester 2')
], string='Semester', default='semester_1')
quarter = fields.Selection([
('q1', 'Kuartal 1'),
('q2', 'Kuartal 2'),
('q3', 'Kuartal 3'),
('q4', 'Kuartal 4'),
], string='Kuartal', default="q1")
year = fields.Char('Tahun')
employee_id = fields.Many2one('hr.employee')
class survey_department_input_line_employee(models.Model):
_inherit = 'survey.user_input_line'
quarter = fields.Selection(related='user_input_id.quarter', store=True)
job_id = fields.Many2one(related='user_input_id.employee_id.job_id', store=True)
survey_department_pengurus = fields.Boolean(related='survey_id.survey_department_pengurus',
string='Survey Department Pengurus ?')
class DepartmentAppraisal(models.Model):
_name = 'hr.department.appraisal'
def _default_employee(self):
return self.env['hr.employee'].search([('user_id', '=', self.env.user.id)])
def _default_survey_department(self):
return self.env['survey.survey'].search([('survey_department', '=', True)], limit=1)
def _default_survey_department_pengurus(self):
return self.env['survey.survey'].search([('survey_department_pengurus', '=', True)], limit=1)
def name_get(self):
result = []
for record in self:
name = 'Appraisal {} {}'.format(record.semester, record.year)
result.append((record.id, name))
return result
semester = fields.Selection([
('semester_1', 'Semester 1'),
('semester_2', 'Semester 2')
], string='Semester', default='semester_1')
quarter = fields.Selection([
('q1', 'Kuartal 1'),
('q2', 'Kuartal 2'),
('q3', 'Kuartal 3'),
('q4', 'Kuartal 4'),
], string='Kuartal', default="q1")
year = fields.Char('Tahun', default=datetime.datetime.now().year)
employee_id = fields.Many2one('hr.employee', default=_default_employee, string='Karyawan')
user_id = fields.Many2one(related='employee_id.user_id')
survey_id = fields.Many2one('survey.survey', domain=[('survey_department', '=', True)],
default=_default_survey_department)
survey_department_pengurus = fields.Boolean(related='survey_id.survey_department_pengurus',
string='Survey Department Pengurus ?')
answer_id = fields.Many2one('survey.user_input', 'Jawaban')
@api.model
def create(self, vals):
quarter = vals['quarter']
year = vals['year']
employee_id = self._default_employee().id
appraisal = self.env['hr.department.appraisal'].search(
[('quarter', '=', quarter), ('year', '=', year), ('employee_id', '=', employee_id)], limit=1)
if appraisal:
raise exceptions.ValidationError('Penilaian untuk {} dan Tahun {} sudah ada'.format(quarter, year))
if self.env.user.has_group('hr_ykp_employees.group_hr_pengurus') or self.env.user.has_group(
'hr_ykp_training.group_pengurus'):
vals['survey_id'] = self._default_survey_department_pengurus().id
else:
vals['survey_id'] = self._default_survey_department().id
result = super(DepartmentAppraisal, self).create(vals)
response = self.env['survey.user_input'].create(
{'survey_id': result.survey_id.id, 'employee_id': result.employee_id.id, 'quarter': result.quarter,
'year': result.year})
result.write({'answer_id': response.id})
result.survey_id.response_ids = [(6, 0, [response.id])]
return result
@api.multi
def action_start_stage_survey(self):
self.ensure_one()
if self.survey_id and self.employee_id:
response = self.env['survey.user_input'].search([('employee_id', '=', self.employee_id.id)], limit=1)
if not response.id:
response = self.env['survey.user_input'].create(
{'survey_id': self.survey_id.id, 'employee_id': self.employee_id.id, 'quarter': self.quarter,
'year': self.year})
self.survey_id.response_ids = [(6, 0, [response.id])]
# grab the token of the response and start surveying
return self.survey_id.with_context(survey_token=response.token).action_start_survey()
class SurveyUserInputLine(models.Model):
_inherit = 'survey.user_input_line'
value_suggested = fields.Many2one('survey.label', string="Suggested answer", group_operator='max')
quizz_mark = fields.Float('Skor', digits=(12, 2), group_operator='avg')
employee_id = fields.Many2one(related='user_input_id.employee_id', store=True)
page_id = fields.Many2one(related='question_id.page_id', string="Page", store=True)
| [
"87510026+bambangbc@users.noreply.github.com"
] | 87510026+bambangbc@users.noreply.github.com |
9c4aed42fc02d50ba07c50728a9b03ece7bb1970 | 9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7 | /submission - lab9/set 2/CAMPBELL BRUCE MOUSSEAU_19357_assignsubmission_file_mousseau lab 9/Lab 9/F_lab9.py | bb9c65726ec9bdb0d3488718768f1c37904455b6 | [] | no_license | sendurr/spring-grading | 90dfdced6327ddfb5c311ae8f42ae1a582768b63 | 2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54 | refs/heads/master | 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from math import sin, exp
class F:
def __init__(self, a, w):
self.a=a
self.w=w
def value(self, x):
a=self.a
w=self.w
return (exp(-a*w) * sin(w*x))
from math import *
f=F(a=1.0, w=0.1)
print (f.value(x=pi))
f.a=2
print (f.value(pi)) | [
"sendurr@hotmail.com"
] | sendurr@hotmail.com |
c0827833120ab94824dfba053e146e015ac45730 | 2426d928a67177cfb19a4ff292003c825ed47867 | /ipfy.py | b43a7fcbe3d8b930c4c08d1bdbd470664930b061 | [] | no_license | Cicadadenis/ipfy | b6678e59802bce4656a94affd1f00f8b94d2289b | e8c07e930132419e2810d9d84f6e445ddf420096 | refs/heads/master | 2023-04-11T02:16:48.125099 | 2021-04-24T09:53:30 | 2021-04-24T09:53:30 | 245,186,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | #!/data/data/com.termux/files/bin/python
# Importing Modules \(o,,,o)/
import argparse
import requests, json
import sys
from sys import argv
import os
# Arguments *_*
parser = argparse.ArgumentParser()
parser.add_argument("-t", help="target ip address", type=str, dest='target', required=True)
args = parser.parse_args()
# I love Colors !
lightblue = '\033[94m'
lightgreen = '\033[92m'
clear = '\033[0m'
boldblue = '\033[01m''\033[94m'
cyan = '\033[36m'
bold = '\033[01m'
red = '\033[31m'
lightcyan = '\033[96m'
yellow = '\033[93m'
# Clear The Terminal
os.system('clear')
ip = args.target
# Let's Begin
api = "http://ip-api.com/json/"
# Sending Requests And Getting Data
try:
data = requests.get(api+ip).json()
sys.stdout.flush()
a = yellow+bold+"[~]"
# Printing,Not Phising ; P
print(a, "Target:", data['query'])
print(a, "ISP:", data['isp'])
print(a, "Organisation:", data['org'])
print(a, "City:", data['city'])
print(a, "Region:", data['region'])
print(a, "Region name:", data['regionName'])
print(a, "Latitude:", data['lat'])
print(a, "Longitude:", data['lon'])
print(a, "Timezone:", data['timezone'])
print(a, "Zip code:", data['zip'])
print(" "+clear)
# Error Handling
except KeyboardInterrupt:
print('Exiting,Good Bye'+clear)
sys.exit(0)
except requests.exceptions.ConnectionError as e:
print(red+bold+"[!]"+" Please Check Your Internet Connection!"+clear)
sys.exit(1)
# Done!
| [
"noreply@github.com"
] | Cicadadenis.noreply@github.com |
86cd49263a94bae1a4f9756c13dc8b219517d7a6 | ee8c4c954b7c1711899b6d2527bdb12b5c79c9be | /stride/code/test_model_isnumber.py | 20c13f6595ef3ad50a89ca6a482a549016b2dc7a | [] | no_license | sqlconsult/byte | 02ac9899aebea4475614969b594bfe2992ffe29a | 548f6cb5038e927b54adca29caf02c981fdcecfc | refs/heads/master | 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | #!/usr/bin/env python3
import model as model
def test_int():
"""
Tests string to integer conversion method
:return:
"""
valid = model.is_number('123', 'int')
invalid = model.is_number('abc', 'int')
assert valid
assert not invalid
def test_float():
"""
Tests string to float conversion method
:return:
"""
valid = model.is_number('12.3', 'float')
invalid = model.is_number('abc', 'float')
assert valid
assert not invalid
def test_invalid_type():
"""
Tests invalid type converions fail
:return:
"""
invalid = model.is_number(123, 'int_xxx')
assert not invalid
invalid = model.is_number(12.3, 'float_xxx')
assert not invalid
| [
"sqlconsult@hotmail.com"
] | sqlconsult@hotmail.com |
2e80a8ba7a2b19d5882eeb8be77b4fba4f3fc1aa | dd3b8bd6c9f6f1d9f207678b101eff93b032b0f0 | /basis/AbletonLive10.1_MIDIRemoteScripts/Push2/device_decorator_factory.py | 5bac61c8d827735830b66c528955aade31520982 | [] | no_license | jhlax/les | 62955f57c33299ebfc4fca8d0482b30ee97adfe7 | d865478bf02778e509e61370174a450104d20a28 | refs/heads/master | 2023-08-17T17:24:44.297302 | 2019-12-15T08:13:29 | 2019-12-15T08:13:29 | 228,120,861 | 3 | 0 | null | 2023-08-03T16:40:44 | 2019-12-15T03:02:27 | Python | UTF-8 | Python | false | false | 1,633 | py | # uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Push2/device_decorator_factory.py
# Compiled at: 2019-05-08 17:06:57
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.control_surface import DeviceDecoratorFactory as DeviceDecoratorFactoryBase
from .auto_filter import AutoFilterDeviceDecorator
from .compressor import CompressorDeviceDecorator
from .device_decoration import SamplerDeviceDecorator, PedalDeviceDecorator, DrumBussDeviceDecorator, UtilityDeviceDecorator
from .delay import DelayDeviceDecorator
from .echo import EchoDeviceDecorator
from .eq8 import Eq8DeviceDecorator
from .operator import OperatorDeviceDecorator
from .simpler import SimplerDeviceDecorator
from .wavetable import WavetableDeviceDecorator
class DeviceDecoratorFactory(DeviceDecoratorFactoryBase):
DECORATOR_CLASSES = {'OriginalSimpler': SimplerDeviceDecorator,
'Operator': OperatorDeviceDecorator,
'MultiSampler': SamplerDeviceDecorator,
'AutoFilter': AutoFilterDeviceDecorator,
'Eq8': Eq8DeviceDecorator,
'Compressor2': CompressorDeviceDecorator,
'Pedal': PedalDeviceDecorator,
'DrumBuss': DrumBussDeviceDecorator,
'Echo': EchoDeviceDecorator,
'InstrumentVector': WavetableDeviceDecorator,
'StereoGain': UtilityDeviceDecorator,
'Delay': DelayDeviceDecorator} | [
"jharrington@transcendbg.com"
] | jharrington@transcendbg.com |
cd9e73d41dc2241b16dfbc7daaa4630b0dd1ec66 | 535503dc18c38b92f8520289da5b4fa42b0a722a | /code/alp/dick_vs_full_rabi.py | 2f173bc6a297c98315ac4e5f63c8fee61dbd48e7 | [] | no_license | jamesbate/phd_code | fbbbf7657c428a0a1f18768edca1dfce56801cc1 | 7e71d7f041835497fb421dd741c644ab5c8e3805 | refs/heads/master | 2023-05-07T10:31:22.168217 | 2021-05-26T15:00:40 | 2021-05-26T15:00:40 | 371,073,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | """This code produces plot comparing full rabi flops to dicke approxiamtion
"""
import matplotlib.pyplot as plt
import numpy as np
from lib import rabi_flops_LD_blue, rabi_flops_LD_red, rabi_flops_LD_carrier, full_Rabi
#prepare figure
plt.rcParams.update({'font.size': 16})
colours = ['b','m','c','r','tab:orange', 'tab:pink']
fig, axes = plt.subplots(1 ,2, constrained_layout=True, figsize=(18, 9))
plt.grid()
t = np.linspace(0,1000,1000)
#in microseconds
#Now let out pi time be 30us (i.e. how well you can identify the carrier)
Rabi = np.pi/50
#Rabi now in MHz
#corresponds to axial frequency of 1MHz
eta = 0.068
n_av = 20
params_dict = {
'Rabi': np.pi/50,
'dicke_factor': 0.068,
'n_av': 20,
'detuning': 0,
'phase': 0,
'amplitude': 1,
}
#These are the plots in the lambe dicke regime (up to 2nd order)
axes[0].plot(t, rabi_flops_LD_blue(t, Rabi, eta, n_av), ':', c = colours[0])
axes[0].plot(t, rabi_flops_LD_red(t, Rabi, eta, n_av), ':', c = colours[1])
axes[0].plot(t, rabi_flops_LD_carrier(t, Rabi, eta, n_av), ':', c = colours[2])
#This is the full solution
axes[0].plot(full_Rabi(t, params_dict , m = 1), c = colours[0], label = "BSB")
axes[0].plot(full_Rabi(t, params_dict , m = 0), c = colours[2], label = "Carrier")
axes[0].plot(full_Rabi(t, params_dict , m = -1), c = colours[1], label = "RSB")
axes[0].set_title("$n_{av} = 20$")
n_av = 0.5
params_dict.update({'n_av': 0.5})
#These are the plots in the lambe dicke regime (up to 2nd order)
axes[1].plot(t, rabi_flops_LD_blue(t, Rabi, eta, n_av), ':', c = colours[0])
axes[1].plot(t, rabi_flops_LD_red(t, Rabi, eta, n_av), ':', c = colours[1])
axes[1].plot(t, rabi_flops_LD_carrier(t, Rabi, eta, n_av), ':', c = colours[2])
#This is the full solution
axes[1].plot(full_Rabi(t, params_dict , m = 1), c = colours[0], label = "BSB")
axes[1].plot(full_Rabi(t, params_dict , m = 0), c = colours[2], label = "Carrier")
axes[1].plot(full_Rabi(t, params_dict, m = -1), c = colours[1], label = "RSB")
axes[1].set_title("$n_{av} = 0.5$")
plt.legend()
plt.savefig('C:/Users/James/OneDrive - OnTheHub - The University of Oxford/phd/images/dickevsfabiflops.png', dpi = 1000)
plt.show()
| [
"james.bate@oriel.ox.ac.uk"
] | james.bate@oriel.ox.ac.uk |
87d7fd11524661149b94c7e22c6ec66379ffd56d | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=51/sched.py | c6369ed663e2d0e6f57eee467dc23159100ebf39 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | -S 0 -X RUN -Q 0 -L 3 79 250
-S 0 -X RUN -Q 0 -L 3 69 200
-S 0 -X RUN -Q 0 -L 3 65 400
-S 1 -X RUN -Q 1 -L 3 60 200
-S 1 -X RUN -Q 1 -L 3 60 300
-S 2 -X RUN -Q 2 -L 2 55 200
-S 2 -X RUN -Q 2 -L 2 49 300
-S 1 -X RUN -Q 3 -L 2 40 150
-S 1 -X RUN -Q 3 -L 2 38 200
-S 3 38 200
-S 3 28 150
-S 3 26 125
-S 4 25 175
-S 4 24 175
-S 3 16 100
-S 3 15 100
-S 4 13 125
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
5482ff8f6ceaa7941208ff40c42c458cd189178e | 2c8ef7156d4aae04da7b88722e71acc69f19d532 | /tests/test_logout.py | 8a2e051d1a5f66109620e54af7f144950a274b20 | [
"ISC"
] | permissive | iSevenDays/aiohttp-login | 4b02c1239385b506f8afb461a290a34fc83a6bc8 | 77ed94def31f8e8ba12683a9f8431f68b76662a5 | refs/heads/master | 2021-06-18T12:24:37.416232 | 2021-06-11T22:19:54 | 2021-06-11T22:19:54 | 84,658,045 | 0 | 0 | ISC | 2021-06-11T22:20:17 | 2017-03-11T15:00:36 | Python | UTF-8 | Python | false | false | 764 | py | from utils import log_client_in
from utils import * # noqa
from aiohttp_login import cfg, url_for
async def test_logout(client):
login_url = url_for('auth_login')
change_email_url = url_for('auth_change_email')
user = await log_client_in(client)
# try to access protected page
r = await client.get(change_email_url)
assert r.url_obj.path == change_email_url.path
# logout
r = await client.get(url_for('auth_logout'))
assert r.status == 200
assert r.url_obj.path == login_url.path
# and try again
r = await client.get(change_email_url)
assert r.url_obj.path == login_url.path
await cfg.STORAGE.delete_user(user)
if __name__ == '__main__':
import pytest
pytest.main([__file__, '--maxfail=1'])
| [
"imbolc@imbolc.name"
] | imbolc@imbolc.name |
c214386013b9ebdaba8ab7e3a0eb31018ac59dcd | a331955dc07caab0d29199cdc3b7b410d37ec4cc | /api/migrations/0065_auto_20191220_1849.py | cf0160ed208867ef7e02c253ee5b29765568e851 | [] | no_license | JobCore/api | 7a890be853ada4fd24bb5e809ad7d48f9573fbcc | 188585150f2a969aa1303381bf5ddd0897f5444c | refs/heads/master | 2023-02-23T18:18:09.186768 | 2022-04-15T18:02:14 | 2022-04-15T18:02:14 | 143,315,392 | 3 | 6 | null | 2023-02-10T22:46:18 | 2018-08-02T15:46:46 | Python | UTF-8 | Python | false | false | 971 | py | # Generated by Django 2.2.6 on 2019-12-20 18:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0064_clockin_automatically_closed'),
]
operations = [
migrations.AddField(
model_name='employer',
name='status',
field=models.CharField(blank=True, choices=[('NOT_APPROVED', 'Not Approved'), ('PENDING', 'Pending'), ('BEING_REVIEWED', 'Being Reviewed'), ('DELETED', 'Deleted'), ('APPROVED', 'Approved')], default='APPROVED', max_length=25),
),
migrations.AlterField(
model_name='employee',
name='employment_verification_status',
field=models.CharField(blank=True, choices=[('NOT_APPROVED', 'Not Approved'), ('PENDING', 'Pending'), ('MISSING_DOCUMENTS', 'Missing Documents'), ('BEING_REVIEWED', 'Being Reviewed'), ('APPROVED', 'Approved')], default='NOT_APPROVED', max_length=25),
),
]
| [
"aalejo@gmail.com"
] | aalejo@gmail.com |
72cc13393588d1c4fe146f9c3048f8ea8edcdfdd | 9ceacf33fd96913cac7ef15492c126d96cae6911 | /regress/sys/netinet/frag/frag_overreplace.py | 0335d94638555699e30f12aa9b282a63aaed7d3f | [
"ISC"
] | permissive | openbsd/src | ab97ef834fd2d5a7f6729814665e9782b586c130 | 9e79f3a0ebd11a25b4bff61e900cb6de9e7795e9 | refs/heads/master | 2023-09-02T18:54:56.624627 | 2023-09-02T15:16:12 | 2023-09-02T15:16:12 | 66,966,208 | 3,394 | 1,235 | null | 2023-08-08T02:42:25 | 2016-08-30T18:18:25 | C | UTF-8 | Python | false | false | 2,096 | py | #!/usr/local/bin/python3
print("ping fragment that overlaps fragment at index boundary and replace it")
# index boundary 4096 |
# |--------------|
# ....
# |--------------|
# |XXXX-----|
# |--------------|
# |--------------|
# this should trigger "frag tail overlap %d" and "frag head overlap %d"
import os
from addr import *
from scapy.all import *
pid=os.getpid()
eid=pid & 0xffff
payload=b"ABCDEFGHIJKLMNOP"
dummy=b"01234567"
fragsize=1024
boundary=4096
fragnum=int(boundary/fragsize)
packet=IP(src=LOCAL_ADDR, dst=REMOTE_ADDR)/ \
ICMP(type='echo-request', id=eid)/ \
(int((boundary+fragsize)/len(payload)) * payload)
frag=[]
fid=pid & 0xffff
for i in range(fragnum-1):
frag.append(IP(src=LOCAL_ADDR, dst=REMOTE_ADDR, proto=1, id=fid,
frag=(i*fragsize)>>3, flags='MF')/
bytes(packet)[20+i*fragsize:20+(i+1)*fragsize])
frag.append(IP(src=LOCAL_ADDR, dst=REMOTE_ADDR, proto=1, id=fid,
frag=(boundary-8)>>3, flags='MF')/
(dummy+bytes(packet)[20+boundary:20+boundary+8]))
frag.append(IP(src=LOCAL_ADDR, dst=REMOTE_ADDR, proto=1, id=fid,
frag=(boundary-fragsize)>>3, flags='MF')/
bytes(packet)[20+boundary-fragsize:20+boundary])
frag.append(IP(src=LOCAL_ADDR, dst=REMOTE_ADDR, proto=1, id=fid,
frag=(boundary)>>3)/bytes(packet)[20+boundary:])
eth=[]
for f in frag:
eth.append(Ether(src=LOCAL_MAC, dst=REMOTE_MAC)/f)
if os.fork() == 0:
time.sleep(1)
for e in eth:
sendp(e, iface=LOCAL_IF)
time.sleep(0.001)
os._exit(0)
ans=sniff(iface=LOCAL_IF, timeout=3, filter=
"ip and src "+REMOTE_ADDR+" and dst "+LOCAL_ADDR+" and icmp")
for a in ans:
if a and a.type == ETH_P_IP and \
a.payload.proto == 1 and \
a.payload.frag == 0 and \
icmptypes[a.payload.payload.type] == 'echo-reply':
id=a.payload.payload.id
print("id=%#x" % (id))
if id != eid:
print("WRONG ECHO REPLY ID")
exit(2)
exit(0)
print("NO ECHO REPLY")
exit(1)
| [
"bluhm@openbsd.org"
] | bluhm@openbsd.org |
fd667264c7818081662e5f557191bd01a6e2da41 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/volumes.py | e00a4177cdd72396a0fad3a1a9a4a47204bc18bd | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 7,030 | py | import os
from pathlib import Path
from typing import Any
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.services import RunID
from models_library.users import UserID
from servicelib.docker_constants import PREFIX_DYNAMIC_SIDECAR_VOLUMES
from settings_library.r_clone import S3Provider
from ...core.settings import RCloneSettings
from .errors import DynamicSidecarError
DY_SIDECAR_SHARED_STORE_PATH = Path("/shared-store")
def _get_s3_volume_driver_config(
r_clone_settings: RCloneSettings,
project_id: ProjectID,
node_uuid: NodeID,
storage_directory_name: str,
) -> dict[str, Any]:
assert "/" not in storage_directory_name # nosec
driver_config: dict[str, Any] = {
"Name": "rclone",
"Options": {
"type": "s3",
"s3-access_key_id": r_clone_settings.R_CLONE_S3.S3_ACCESS_KEY,
"s3-secret_access_key": r_clone_settings.R_CLONE_S3.S3_SECRET_KEY,
"s3-endpoint": r_clone_settings.R_CLONE_S3.S3_ENDPOINT,
"path": f"{r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME}/{project_id}/{node_uuid}/{storage_directory_name}",
"allow-other": "true",
"vfs-cache-mode": r_clone_settings.R_CLONE_VFS_CACHE_MODE.value,
# Directly connected to how much time it takes for
# files to appear on remote s3, please se discussion
# SEE https://forum.rclone.org/t/file-added-to-s3-on-one-machine-not-visible-on-2nd-machine-unless-mount-is-restarted/20645
# SEE https://rclone.org/commands/rclone_mount/#vfs-directory-cache
"dir-cache-time": f"{r_clone_settings.R_CLONE_DIR_CACHE_TIME_SECONDS}s",
"poll-interval": f"{r_clone_settings.R_CLONE_POLL_INTERVAL_SECONDS}s",
},
}
extra_options: dict[str, str] | None = None
if r_clone_settings.R_CLONE_PROVIDER == S3Provider.MINIO:
extra_options = {
"s3-provider": "Minio",
"s3-region": "us-east-1",
"s3-location_constraint": "",
"s3-server_side_encryption": "",
}
elif r_clone_settings.R_CLONE_PROVIDER == S3Provider.CEPH:
extra_options = {
"s3-provider": "Ceph",
"s3-acl": "private",
}
elif r_clone_settings.R_CLONE_PROVIDER == S3Provider.AWS:
extra_options = {
"s3-provider": "AWS",
"s3-region": "us-east-1",
"s3-acl": "private",
}
else:
raise DynamicSidecarError(
f"Unexpected, all {S3Provider.__name__} should be covered"
)
assert extra_options is not None # nosec
options: dict[str, Any] = driver_config["Options"]
options.update(extra_options)
return driver_config
class DynamicSidecarVolumesPathsResolver:
BASE_PATH: Path = Path("/dy-volumes")
@classmethod
def target(cls, path: Path) -> str:
"""Returns a folder path within `/dy-volumes` folder"""
target_path = cls.BASE_PATH / path.relative_to("/")
return f"{target_path}"
@classmethod
def _volume_name(cls, path: Path) -> str:
return f"{path}".replace(os.sep, "_")
@classmethod
def source(cls, path: Path, node_uuid: NodeID, run_id: RunID) -> str:
"""Returns a valid and unique volume name that is composed out of identifiers, namely
- relative target path
- node_uuid
- run_id
Guarantees that the volume name is unique between runs while also
taking into consideration the limit for the volume name's length
(255 characters).
SEE examples in `tests/unit/test_modules_dynamic_sidecar_volumes_resolver.py`
"""
# NOTE: issues can occur when the paths of the mounted outputs, inputs
# and state folders are very long and share the same subdirectory path.
# Reversing volume name to prevent these issues from happening.
reversed_volume_name = cls._volume_name(path)[::-1]
unique_name = f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{run_id}_{node_uuid}_{reversed_volume_name}"
return unique_name[:255]
@classmethod
def mount_entry(
cls,
swarm_stack_name: str,
path: Path,
node_uuid: NodeID,
run_id: RunID,
project_id: ProjectID,
user_id: UserID,
volume_size_limit: str | None,
) -> dict[str, Any]:
"""
Creates specification for mount to be added to containers created as part of a service
"""
return {
"Source": cls.source(path, node_uuid, run_id),
"Target": cls.target(path),
"Type": "volume",
"VolumeOptions": {
"Labels": {
"source": cls.source(path, node_uuid, run_id),
"run_id": f"{run_id}",
"node_uuid": f"{node_uuid}",
"study_id": f"{project_id}",
"user_id": f"{user_id}",
"swarm_stack_name": swarm_stack_name,
},
"DriverConfig": (
{"Options": {"size": volume_size_limit}}
if volume_size_limit is not None
else None
),
},
}
@classmethod
def mount_shared_store(
cls,
run_id: RunID,
node_uuid: NodeID,
project_id: ProjectID,
user_id: UserID,
swarm_stack_name: str,
has_quota_support: bool,
) -> dict[str, Any]:
return cls.mount_entry(
swarm_stack_name=swarm_stack_name,
path=DY_SIDECAR_SHARED_STORE_PATH,
node_uuid=node_uuid,
run_id=run_id,
project_id=project_id,
user_id=user_id,
volume_size_limit="1M" if has_quota_support else None,
)
@classmethod
def mount_r_clone(
cls,
swarm_stack_name: str,
path: Path,
node_uuid: NodeID,
run_id: RunID,
project_id: ProjectID,
user_id: UserID,
r_clone_settings: RCloneSettings,
) -> dict[str, Any]:
return {
"Source": cls.source(path, node_uuid, run_id),
"Target": cls.target(path),
"Type": "volume",
"VolumeOptions": {
"Labels": {
"source": cls.source(path, node_uuid, run_id),
"run_id": f"{run_id}",
"node_uuid": f"{node_uuid}",
"study_id": f"{project_id}",
"user_id": f"{user_id}",
"swarm_stack_name": swarm_stack_name,
},
"DriverConfig": _get_s3_volume_driver_config(
r_clone_settings=r_clone_settings,
project_id=project_id,
node_uuid=node_uuid,
storage_directory_name=cls._volume_name(path).strip("_"),
),
},
}
| [
"noreply@github.com"
] | ITISFoundation.noreply@github.com |
c5ef0a3265d4cd7a91dfae206cdb325501e60056 | 132b261b16338cb7b9297bd04eaaaafe34bde89e | /sendSMSSkillLambda/package/ask_sdk_model/slu/entityresolution/resolution.py | 23cdb26aa058fc10f7e1bea83da45533228073ea | [
"Apache-2.0"
] | permissive | ziniman/aws-alexa-lambda-workshop | 2835b998272b01856d3dbea6481e9ee4457da2f2 | d1e291ebd3e20132098541c92735d29491bfc932 | refs/heads/master | 2020-06-25T22:58:04.814822 | 2019-09-08T10:37:00 | 2019-09-08T10:37:00 | 199,446,036 | 0 | 3 | Apache-2.0 | 2019-09-05T09:03:12 | 2019-07-29T12:11:58 | Python | UTF-8 | Python | false | false | 4,131 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.slu.entityresolution.status import Status
from ask_sdk_model.slu.entityresolution.value_wrapper import ValueWrapper
class Resolution(object):
"""
Represents a possible authority for entity resolution
:param authority:
:type authority: (optional) str
:param status:
:type status: (optional) ask_sdk_model.slu.entityresolution.status.Status
:param values:
:type values: (optional) list[ask_sdk_model.slu.entityresolution.value_wrapper.ValueWrapper]
"""
deserialized_types = {
'authority': 'str',
'status': 'ask_sdk_model.slu.entityresolution.status.Status',
'values': 'list[ask_sdk_model.slu.entityresolution.value_wrapper.ValueWrapper]'
} # type: Dict
attribute_map = {
'authority': 'authority',
'status': 'status',
'values': 'values'
} # type: Dict
def __init__(self, authority=None, status=None, values=None):
# type: (Optional[str], Optional[Status], Optional[List[ValueWrapper]]) -> None
"""Represents a possible authority for entity resolution
:param authority:
:type authority: (optional) str
:param status:
:type status: (optional) ask_sdk_model.slu.entityresolution.status.Status
:param values:
:type values: (optional) list[ask_sdk_model.slu.entityresolution.value_wrapper.ValueWrapper]
"""
self.__discriminator_value = None # type: str
self.authority = authority
self.status = status
self.values = values
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Resolution):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"oritalul@amazon.com"
] | oritalul@amazon.com |
7ad29cf02e7da5b8275d98000702b09da5d173cd | 6c92fddbdfce8407758632d48b77a01c77ab49c8 | /contratospr/users/migrations/0001_initial.py | 03329cc854caa139ea22f8960674e0814b7a2538 | [
"Apache-2.0"
] | permissive | jycordero/contratospr-api | 1ed0245b4b2291ed83c5bb32fb0d55d6f797c3d2 | 6778b02b42305aa7ce65c956a0d89029ddd857a4 | refs/heads/main | 2023-03-25T01:05:13.170671 | 2021-02-22T16:48:43 | 2021-02-22T16:48:43 | 343,995,328 | 0 | 0 | Apache-2.0 | 2021-03-03T03:57:23 | 2021-03-03T03:57:23 | null | UTF-8 | Python | false | false | 4,835 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-20 13:29
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("auth", "0008_alter_user_username_max_length")]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
verbose_name="username",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=30, verbose_name="first name"
),
),
(
"last_name",
models.CharField(
blank=True, max_length=30, verbose_name="last name"
),
),
(
"email",
models.EmailField(
blank=True, max_length=254, verbose_name="email address"
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="date joined"
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name_plural": "users",
"verbose_name": "user",
"abstract": False,
},
managers=[("objects", django.contrib.auth.models.UserManager())],
)
]
| [
"jpadilla@webapplicate.com"
] | jpadilla@webapplicate.com |
3371395815c8d3c4a5fde21fed0ed3cf0ae7156e | 0eeec1dc7d76f40be7e048480dcfeaebb2638706 | /src/box.py | 3d697459431650eba947026561a68c842b908b38 | [] | no_license | amlucas/meshTools | 6e69f56d9e7c9074378f8030fe73290b829d6df7 | 15523e4719935606a3aa47ef04eb2d6bc23aacdf | refs/heads/master | 2020-04-07T12:39:54.960705 | 2019-08-09T13:06:44 | 2019-08-09T13:06:44 | 158,376,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | #! /usr/bin/env python
import pymesh, argparse
def create_box_simple(box_min, box_max):
import numpy as np
vertices = np.array([
[box_min[0], box_min[1], box_min[2]],
[box_max[0], box_min[1], box_min[2]],
[box_max[0], box_max[1], box_min[2]],
[box_min[0], box_max[1], box_min[2]],
[box_min[0], box_min[1], box_max[2]],
[box_max[0], box_min[1], box_max[2]],
[box_max[0], box_max[1], box_max[2]],
[box_min[0], box_max[1], box_max[2]]])
faces = np.array([
[0, 2, 1],
[0, 3, 2],
[0, 1, 4],
[1, 5, 4],
[0, 7, 3],
[0, 4, 7],
[3, 7, 6],
[3, 6, 2],
[2, 6, 5],
[2, 5, 1],
[4, 5, 6],
[4, 6, 7]])
mesh = pymesh.form_mesh(vertices, faces)
return mesh
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='create a simple recatangel mesh')
parser.add_argument('--out', help='output file name', type=str, required=True)
parser.add_argument('--box_min', type=float, nargs=3, default = [-1, -2, -3])
parser.add_argument('--box_max', type=float, nargs=3, default = [ 1, 2, 3])
args = parser.parse_args()
mesh = create_box_simple(args.box_min, args.box_max)
pymesh.save_mesh(args.out, mesh)
| [
"lucas.amoudruz@wanadoo.fr"
] | lucas.amoudruz@wanadoo.fr |
3e85d0358cd1bb8e64df6ae82dc60a06a038a5f8 | ce083128fa87ca86c65059893aa8882d088461f5 | /python/pytest-labs/.venv/lib/python3.6/site-packages/facebook_business/adobjects/pagepaymentoptions.py | f1879619b1cdd28a05be92d0a9ad31805bdfff03 | [] | no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 2,036 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class PagePaymentOptions(
AbstractObject,
):
def __init__(self, api=None):
super(PagePaymentOptions, self).__init__()
self._isPagePaymentOptions = True
self._api = api
class Field(AbstractObject.Field):
amex = 'amex'
cash_only = 'cash_only'
discover = 'discover'
mastercard = 'mastercard'
visa = 'visa'
_field_types = {
'amex': 'unsigned int',
'cash_only': 'unsigned int',
'discover': 'unsigned int',
'mastercard': 'unsigned int',
'visa': 'unsigned int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| [
"marcosptf@yahoo.com.br"
] | marcosptf@yahoo.com.br |
272af5cd8cfc1ba7d535185b62c6a901be3a0963 | 428ef63ed64396b5a5b7f43fb231048651ec182b | /project_test/project/settings.py | 017e4aa20099fbc544eec6618d13b4052cb87a2a | [
"MIT"
] | permissive | Air-Mark/djangocodemirror | fbda8fe07fbe5b923def48e542fe10bdd3f7ead6 | 977386e63526f7ea260d14eebb163cb430dd9d44 | refs/heads/master | 2021-01-19T21:06:12.190215 | 2017-04-18T09:33:29 | 2017-04-18T09:33:29 | 88,607,558 | 0 | 0 | null | 2017-04-18T09:33:07 | 2017-04-18T09:33:07 | null | UTF-8 | Python | false | false | 4,016 | py | """
Django settings for sample project.
Generated by 'django-admin startproject' using Django 1.8.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
NOTE: Currently not used yet.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cLiI!d*X=(%#?HyW]0!v"T-DFRk>JaukodHalf]&BLO5qkwB}S-_2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
#'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
#'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
"""
NOTE:
* Every things above comes from default generated settings file (from Django startproject);
* Every things below are needed settings for djangocodemirror;
* Don't edit default generated settings, instead override them below;
"""
PROJECT_PATH = os.path.join(BASE_DIR, 'project')
# Absolute filesystem path to the directory that contain tests fixtures files
TESTS_FIXTURES_DIR = os.path.join('project_test', 'tests', 'data_fixtures')
INSTALLED_APPS = INSTALLED_APPS+(
'djangocodemirror',
)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, 'static')
# Template dir
TEMPLATES[0]['DIRS'] = (os.path.join(PROJECT_PATH, "templates"),)
#
# DjangoCodemirror settings
#
from djangocodemirror.settings import *
from djangocodemirror_app_settings import *
| [
"sveetch@gmail.com"
] | sveetch@gmail.com |
489dd39f8d643749a7275fcd58a8d90f341dafe0 | 254ef44b90485767a3aea8cbe77dc6bf77dddaeb | /38外观数列.py | 1ae7c83502ef1ff137cd74dc3076b10236396e59 | [] | no_license | XinZhaoFu/leetcode_moyu | fae00d52a52c090901021717df87b78d78192bdb | e80489923c60ed716d54c1bdeaaf52133d4e1209 | refs/heads/main | 2023-06-19T02:50:05.256149 | 2021-07-09T00:50:41 | 2021-07-09T00:50:41 | 331,243,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | """
给定一个正整数 n ,输出外观数列的第 n 项。
「外观数列」是一个整数序列,从数字 1 开始,序列中的每一项都是对前一项的描述。
你可以将其视作是由递归公式定义的数字字符串序列:
countAndSay(1) = "1"
countAndSay(n) 是对 countAndSay(n-1) 的描述,然后转换成另一个数字字符串。
"""
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
count_and_say = '1'
for _ in range(n-1):
count_and_say_length = len(count_and_say)
temp_count_and_say = ''
temp_count = 1
index = 0
while index < count_and_say_length:
while (index+temp_count) < count_and_say_length and count_and_say[index] == count_and_say[index+temp_count]:
temp_count += 1
temp_count_and_say = temp_count_and_say + str(temp_count) + count_and_say[index]
index += temp_count
temp_count = 0
count_and_say = temp_count_and_say
return count_and_say
| [
"948244817@qq.com"
] | 948244817@qq.com |
d6c0124d31535c92b743ddd1f398fb6f785d9e54 | 95739fd7e64b0b733793d30c99e2f1a5ebe1a382 | /Strings/splitIntoLRStrings/split.py | 5d8b176b55bfb524e562d18463a78b2415fda763 | [] | no_license | vcchang-zz/coding-practice | 183f2ef6cc0aa76c819ec69e2bfbe4310ee95109 | 67ef6117a0e475345ddd6c1010885d6942da8b67 | refs/heads/master | 2022-02-28T19:10:33.297383 | 2019-11-18T08:26:11 | 2019-11-18T08:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | # Find maximum number of 'LR'/'RL' substrings
# Time: O(n) -> iterate through each char in given s of len n
# Space: O(n) -> build stack of at most len n
def split(s: str):
result = 0
stack = []
i = 0
while i < len(s):
if len(stack) == 0 and s[i] == 'L':
while i < len(s) and s[i] == 'L':
stack.append(s[i])
i += 1
continue
if len(stack) == 0 and s[i] == 'R':
while i < len(s) and s[i] == 'R':
stack.append(s[i])
i += 1
continue
if stack[-1] == 'L' and s[i] == 'R':
result += 1
while len(stack) != 0:
stack.pop()
i += 1
elif stack[-1] == 'R' and s[i] == 'L':
result += 1
while len(stack) != 0:
stack.pop()
i += 1
return result
if __name__ == "__main__":
s = "RLRRLLRLRL"
expected = 4
actual = split(s)
assert actual == expected
print(f"Split {s} into {actual} LR/RL strings!")
s = "RLLLLRRRLR"
expected = 3
actual = split(s)
assert actual == expected
print(f"Split {s} into {actual} LR/RL strings!")
s = "LLLLRRRR"
expected = 1
actual = split(s)
assert actual == expected
print(f"Split {s} into {actual} LR/RL strings!")
s = "RRLRRLRLLLRL"
expected = 4
actual = split(s)
assert actual == expected
print(f"Split {s} into {actual} LR/RL strings!") | [
"vcchang@users.noreply.github.com"
] | vcchang@users.noreply.github.com |
00998cfd683190bc9daba43cd0c849f733c23379 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_buttressing.py | 496291bd1bd114cdd3783ae3bd5eccbebcb82839 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py |
from xai.brain.wordbase.verbs._buttress import _BUTTRESS
#calss header
class _BUTTRESSING(_BUTTRESS, ):
def __init__(self,):
_BUTTRESS.__init__(self)
self.name = "BUTTRESSING"
self.specie = 'verbs'
self.basic = "buttress"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
abd55fff1b001ff3becce11b8f04db976569f22e | 1f3194a70bb89affa0281e5463269df7f64fa994 | /hackerrank/maximise-it.py | e5ab8e61d86568a799b6cab5351249a2aad38723 | [] | no_license | deepakchandh/competitive | 58e4143c232c0a343d0747c022548f23990c2fc4 | 1a18e7470b6c2887e49361dd464a1706c371f0be | refs/heads/master | 2020-04-06T18:18:54.928405 | 2018-11-15T10:39:23 | 2018-11-15T10:39:23 | 157,693,702 | 1 | 0 | null | 2018-11-15T10:31:04 | 2018-11-15T10:31:03 | null | UTF-8 | Python | false | false | 218 | py | k, m = map(int, raw_input().split())
import itertools
ls = []
for _ in xrange(k):
ls.append(map(lambda x:x**2, map(int, raw_input().split()[1:])))
print max(map(lambda x:x%m, map(sum, itertools.product(*ls)))) | [
"avi.aryan123@gmail.com"
] | avi.aryan123@gmail.com |
1d278c8edab4c51c36daaf1d3b3a55ac92845f91 | 48da1cc4ff2148f42a9713920b7e4e8cd5240b2b | /booker/settings.py | fa9bfbc6588524f7b485618a87fc9c2ffb287dd8 | [] | no_license | win777g/booker_backend | 91ff197688d025c4a7286ac70014a5b19a001685 | 593417eef1ccb1779e4eb0eabfe67fab2ed37b33 | refs/heads/master | 2022-06-08T15:30:22.757206 | 2020-01-28T12:35:27 | 2020-01-28T12:35:27 | 236,739,191 | 0 | 0 | null | 2022-05-25T02:44:17 | 2020-01-28T13:16:17 | Python | UTF-8 | Python | false | false | 5,070 | py | """
Django settings for dj_backend project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$%vp&u!q&j5zm6j5rj9mt)=g%-$v6h=#n*7eaj)tm(qidygx$s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['sergbooker.pythonanywhere.com']
# ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'djoser',
'index',
'api',
'bill',
'categories',
'events',
'users'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# добавим corsheaders
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
# добавим corsheaders
CORS_ORIGIN_ALLOW_ALL = True
# CORS_URLS_REGEX = r'^/api/.*$'
CORS_ORIGIN_WHITELIST = (
'http://localhost:4200/',
)
ROOT_URLCONF = 'booker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'booker.wsgi.application'
# ACCOUNT_USER_MODEL_USERNAME_FIELD = None
# ACCOUNT_EMAIL_REQUIRED = True
# ACCOUNT_USERNAME_REQUIRED = False
# ACCOUNT_AUTHENTICATION_METHOD = 'email'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES':(
# 'rest_framework.permissions.IsAdminUser',
'rest_framework.permissions.AllowAny',
# 'rest_framework.permissions.IsAuthenticated',
),
# 'PAGE_SIZE':10,
'DEFAULT_AUTHENTICATION_CLASSES':(
# 'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.TokenAuthentication',
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)
# 'EXCEPTION_HANDLER':
# 'rest_framework_json_api.exceptions.exception_handler',
# # 'DEFAULT_PAGINATION_CLASS':
# # 'rest_framework_json_api.pagination.PageNumberPagination',
# 'DEFAULT_PARSER_CLASSES':(
# # 'rest_framework_json_api.parsers.JSONParser',
# 'rest_framework.parsers.FormParser',
# 'rest_framework.parsers.MultiPartParser',
# ),
# 'DEFAULT_RENDERER_CLASSES':(
# # 'rest_framework_json_api.renderers.JSONRenderer',
# 'rest_framework.renderers.BrowsableAPIRenderer',
# ),
# 'DEFAULT_METADATA_CLASS':'rest_framework_json_api.metadata.JSONAPIMetadata'
}
| [
"win21g@mail.ru"
] | win21g@mail.ru |
ec2d9e087a2156a013066577811e66b731e52bfb | dd6297fc583860c9bf5f47b407e3430cda004dde | /tests/test_darts.py | 6005768f7cc5402241cb0ec3dcd72452951351da | [
"Apache-2.0"
] | permissive | veritynoob/python-darts | 0d32e4b92d91c709dee97dc14066347889e85061 | fbebf1b3fc379afa99944cca50acbc02ba72411f | refs/heads/master | 2020-05-23T08:10:48.165357 | 2014-03-07T10:21:21 | 2014-03-07T14:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # -*- coding: utf-8 -*-
import sys
import os
import unittest
from nose.tools import *
err = sys.stderr
from darts import Darts
class TestDarts(unittest.TestCase):
def setUp(self):
data_path = os.path.join(os.path.dirname(__file__), 'mock_data.darts')
self._ins = Darts(data_path)
def test_common_prefix_search(self):
eq_(['star'], self._ins.common_prefix_search('star'))
eq_(['star', 'star wars'], self._ins.common_prefix_search('star wars'))
def test_longest_match(self):
eq_('star', self._ins.longest_match('star'))
eq_('star wars', self._ins.longest_match('star wars'))
| [
"ikuya@ikuya.net"
] | ikuya@ikuya.net |
157c3a0c40e118adfd794b9feb813995acaf6ae4 | 8e5d44c70978b7c9ac259adc31f6f9156ac936db | /helusers/urls.py | 9f886f236d9467ccc2837ccac5458d9adc260dc9 | [
"BSD-2-Clause"
] | permissive | tommimanbytes/django-helusers | 60a950c5030a63356b7e2c75e74336474ddfd047 | f31d22a59e78a5a119f721d7ae5afa16602028de | refs/heads/master | 2021-03-08T13:22:57.787460 | 2020-01-28T12:21:40 | 2020-01-28T12:21:40 | 246,348,419 | 0 | 0 | BSD-2-Clause | 2020-03-10T16:12:00 | 2020-03-10T16:11:59 | null | UTF-8 | Python | false | false | 563 | py | """URLs module"""
from django.urls import path
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from . import views
app_name = 'helusers'
urlpatterns = [
path('logout/', views.LogoutView.as_view(), name='auth_logout'),
path('logout/complete/', views.LogoutCompleteView.as_view(), name='auth_logout_complete'),
path('login/', views.LoginView.as_view(), name='auth_login'),
]
if not settings.LOGOUT_REDIRECT_URL:
raise ImproperlyConfigured("You must configure LOGOUT_REDIRECT_URL to use helusers views.")
| [
"juha.yrjola@iki.fi"
] | juha.yrjola@iki.fi |
4e801cd7f9fa6def00c0bec6926f95c343631736 | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/tests/unit/scheduler/filters/test_retry_filters.py | 03b0e77d37fadcfea2b203a179a5741c78eff48b | [
"Apache-2.0"
] | permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from patron.scheduler.filters import retry_filter
from patron import test
from patron.tests.unit.scheduler import fakes
class TestRetryFilter(test.NoDBTestCase):
def setUp(self):
super(TestRetryFilter, self).setUp()
self.filt_cls = retry_filter.RetryFilter()
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
# Node not previously tried.
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
hosts=[['host1', 'node1'], # same host, different node
['host2', 'node2'], # different host and node
])
filter_properties = dict(retry=retry)
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
# Node was already tried.
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
hosts=[['host1', 'node1']])
filter_properties = dict(retry=retry)
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
| [
"hsluoyz@qq.com"
] | hsluoyz@qq.com |
201462d73fbc31fcfb0a63611fb330dc99024f9a | 5cec1ff43bf38cf31316254dabe3f972d38744ad | /src/hydrat/corpora/langid/acl2011.py | fdbd31fb88a5161451f55b4d3ed1863efe994fce | [] | no_license | eyadsibai/hydrat | 7fb63f3c54f1fca25d04ab7266712c1077ffa2e3 | 5a68c6b8f32bc6bad59c3f002340bf7ef62e868c | refs/heads/master | 2016-09-06T14:16:46.082697 | 2013-08-06T05:14:02 | 2013-08-06T05:14:02 | 33,199,904 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from hydrat import config
from hydrat.dataset.iso639 import ISO639_1
from hydrat.dataset.text import ByteUBT, SingleDir
from hydrat.dataset.encoded import CodepointUBT, UTF8
from hydrat.dataset.split import CrossValidation
from hydrat.configuration import Configurable, DIR
class Wiki10k(Configurable, ISO639_1, UTF8, CrossValidation, ByteUBT, CodepointUBT, SingleDir):
requires={ ('corpora', 'acl2011-langid-wiki10k') : DIR('wiki10k') }
def data_path(self):
return config.getpath('corpora', 'acl2011-langid-wiki10k')
def cm_iso639_1(self):
ids = self.tokenstream('byte').keys()
return dict( (i,[i.split('_')[0]]) for i in ids )
def sp_crossvalidation(self):
from numpy.random.mtrand import RandomState
return self.crossvalidation('iso639_1', 10, RandomState(61383441363))
| [
"saffsd@gmail.com"
] | saffsd@gmail.com |
e0eb80c1116dd85abdd4c4f684c80f82dcb013ec | 53e99ac63c093cf9d6b6061386229a52ae851fef | /_1_PythonBasic/Modules.py | 8e65b767e7cfd787c7ca9b8a70bfc054a4b84f08 | [] | no_license | zouwen198317/TensorflowPY36CPU | d33c1ef3fd0580f498caf512eb5e2db078202c53 | 97d1d815efbef7e531aa6fbd10c8806bef0710e7 | refs/heads/master | 2021-08-31T04:22:19.828203 | 2017-12-20T10:11:05 | 2017-12-20T10:11:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | import math
class ModulesDemo():
def buildin_Modules(self):
print(math.sqrt(100))
print(math.sqrt(200))
m = ModulesDemo()
m.buildin_Modules() | [
"em3888@gmail.com"
] | em3888@gmail.com |
c233bacb97efe39e05892848a99b0eee5d880136 | 7702446393583be7e29e26ca0911c938bb61f79f | /tests/test_objects.py | 2b208055b4d50b376b3c7879d888bceb2bd2b9f3 | [] | no_license | RussellLuo/easyconfig | d554953600d78a0563d5b2f035e561541b41d5f6 | a599ae95750be92547f6b5df15872cd347222f61 | refs/heads/master | 2020-05-19T12:57:22.751638 | 2017-11-02T01:57:36 | 2017-11-02T01:57:36 | 28,290,302 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,258 | py | # -*- coding: utf-8 -*-
import os
import pytest
from easyconfig import str_object, envvar_object
class TestObjects(object):
def test_valid_str_object(self):
obj = str_object('configs.default_config')
assert obj.DEBUG
assert obj.PORT == 5000
assert obj.SECRET_KEY == '123***456'
def test_invalid_str_object(self):
with pytest.raises(ValueError) as exc:
str_object('')
assert str(exc.value) == 'Empty module name'
def test_invalid_str_object_in_silent(self):
obj = str_object('', silent=True)
assert obj is None
def test_valid_str_object_from_envvar(self):
os.environ['EASY_CONFIG'] = 'configs.default_config'
obj = str_object('EASY_CONFIG', is_envvar=True)
assert obj.DEBUG
assert obj.PORT == 5000
assert obj.SECRET_KEY == '123***456'
def test_nonexistent_str_object_from_envvar(self):
os.environ.pop('EASY_CONFIG', None)
with pytest.raises(RuntimeError) as exc:
str_object('EASY_CONFIG', silent=False, is_envvar=True)
assert str(exc.value) == (
"The environment variable 'EASY_CONFIG' is not set "
"and as such configuration could not be "
"loaded. Set this variable and make it "
"point to a configuration file"
)
def test_nonexistent_str_object_from_envvar_in_silent(self):
os.environ.pop('EASY_CONFIG', None)
obj = str_object('EASY_CONFIG', silent=True, is_envvar=True)
assert obj is None
def test_empty_str_object_from_envvar(self):
os.environ['EASY_CONFIG'] = ''
with pytest.raises(ValueError) as exc:
str_object('EASY_CONFIG', silent=False, is_envvar=True)
assert str(exc.value) == 'Empty module name'
def test_empty_str_object_from_envvar_in_silent(self):
os.environ['EASY_CONFIG'] = ''
obj = str_object('EASY_CONFIG', silent=True, is_envvar=True)
assert obj is None
def test_valid_envvar_object(self):
os.environ['EASY_CONFIG'] = 'configs.default_config'
obj = envvar_object('EASY_CONFIG')
assert obj.DEBUG
assert obj.PORT == 5000
assert obj.SECRET_KEY == '123***456'
| [
"luopeng.he@gmail.com"
] | luopeng.he@gmail.com |
387482384e4dd046d7add4e9d91323b0feff67a1 | 90d06e9cacd52f2ba07d55dd852cb3cb7171c452 | /leetcode/_380_InsertDeleteGetRandomO1.py | 13512bbfe2e5287bd9d8b1aa137a925219712dc1 | [] | no_license | shahidul2k9/problem-solution | efd91af08e103b552a225bca37660c51c60b98f2 | 0e970ac9c72f2ba13e66c180b208a2ec53886cd1 | refs/heads/master | 2022-10-16T04:04:31.556454 | 2022-09-24T08:09:46 | 2022-09-24T08:09:46 | 19,431,424 | 51 | 92 | null | 2022-09-24T08:09:47 | 2014-05-04T17:13:28 | C++ | UTF-8 | Python | false | false | 1,013 | py | import random
class RandomizedSet:
def __init__(self):
self.index_map = dict()
self.val_seq = []
def insert(self, val: int) -> bool:
index_map = self.index_map
if val in index_map:
return False
else:
index = len(self.val_seq)
index_map[val] = index
self.val_seq.append(val)
return True
def remove(self, val: int) -> bool:
index_map = self.index_map
if val not in index_map:
return False
else:
val_seq = self.val_seq
index = index_map[val]
del index_map[val]
if index != len(val_seq) - 1:
last_val = val_seq[len(val_seq) - 1]
val_seq[index] = last_val
index_map[last_val] = index
val_seq.pop()
return True
def getRandom(self) -> int:
random_index = random.randrange(len(self.val_seq))
return self.val_seq[random_index]
| [
"shahidul2k9@gmail.com"
] | shahidul2k9@gmail.com |
c4b8c62f5834dc6196a1cdfc3d83e3eda3d86ded | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/ForescoutEyeInspect/Scripts/ForescoutEyeInspectButtonGetVulnerabilityInfo/ForescoutEyeInspectButtonGetVulnerabilityInfo.py | 6d2dfea341558f8ac93a1816c1bfe95c7672c67c | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 595 | py | import demistomock as demisto
from CommonServerPython import *
def get_cve():
cve_id = demisto.args().get('cve_id')
if not cve_id:
return_error('Missing CVE ID.')
return demisto.executeCommand('forescout-ei-vulnerability-info-get', {'cve_id': cve_id})
def main():
try:
return_results(get_cve())
except Exception as e:
demisto.error(fix_traceback_line_numbers(traceback.format_exc()))
return_error(f'Failed to get pcap from Forescout EyeInspect incident.\nError:\n{e}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
556da0869ff035d8d7bacb7fd7b934185e14653b | a55a2d6af33fcb46283bee27a930cbae67cc0d83 | /hparams.py | e68e76ebe0c7e89f09f8e07cc47911b861a89152 | [] | no_license | xcmyz/GST-Tacotron | b5a2eed58def49120d3dc2fd0588ed17c783bf0e | 7ff97fe6b3ca522d42dd32f1089ee829dc801cd4 | refs/heads/master | 2020-04-24T14:33:13.384029 | 2019-02-22T14:04:56 | 2019-02-22T14:04:56 | 172,024,521 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # Preprocess
cleaners = 'english_cleaners'
# Audio:
num_mels = 80
n_mels = 80
num_freq = 1025
sample_rate = 20000
frame_length_ms = 50
frame_shift_ms = 12.5
preemphasis = 0.97
min_level_db = -100
ref_level_db = 20
griffin_lim_iters = 60
power = 1.5
n_fft = 2048
n_iter = 50
# max_db = 100
# ref_db = 20
# Model:
E = 256
r = 5
hidden_size = 128
embedding_size = 256
teacher_forcing_ratio = 1.0
max_iters = 200
max_Ty = 200
# reference encoder
ref_enc_filters = [32, 32, 64, 64, 128, 128]
ref_enc_size = [3, 3]
ref_enc_strides = [2, 2]
ref_enc_pad = [1, 1]
ref_enc_gru_size = E // 2
# style token layer
token_num = 10
num_heads = 8
K = 16
decoder_K = 8
embedded_size = E
dropout_p = 0.5
num_banks = 15
num_highways = 4
vocab = "PE abcdefghijklmnopqrstuvwxyz'.?"
# Training:
outputs_per_step = 5
batch_size = 12
epochs = 100
lr = 0.00005
clip_value = 1.
loss_weight = 0.5
# decay_step = [500000, 1000000, 2000000]
decay_step = [20, 60]
# save_step = 2000
save_step = 100
# log_step = 200
log_step = 5
clear_Time = 20
checkpoint_path = './model_new'
| [
"noreply@github.com"
] | xcmyz.noreply@github.com |
85363835deab82e3430cbf858b2eb89c7ee9e4c6 | 5f4e13201d4c5b7edc8dbbda289380682a187bec | /nlpfr/nltk/stem/wordnet.py | e34292f9efaa40eaf268bd03c313c4d2f95de5ba | [] | no_license | intellivoid/CoffeeHousePy | 92f4fb344de757837c3d3da05cb5513e90408039 | 57c453625239f28da88b88ddd0ae5f1ecdd4de3c | refs/heads/master | 2023-02-23T14:32:01.606630 | 2021-01-28T02:57:10 | 2021-01-28T02:57:10 | 324,419,067 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | # Natural Language Toolkit: WordNet stemmer interface
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from nltk.corpus.reader.wordnet import NOUN
from nltk.corpus import wordnet
class WordNetLemmatizer(object):
"""
WordNet Lemmatizer
Lemmatize using WordNet's built-in morphy function.
Returns the input word unchanged if it cannot be found in WordNet.
>>> from nltk.stem import WordNetLemmatizer
>>> wnl = WordNetLemmatizer()
>>> print(wnl.lemmatize('dogs'))
dog
>>> print(wnl.lemmatize('churches'))
church
>>> print(wnl.lemmatize('aardwolves'))
aardwolf
>>> print(wnl.lemmatize('abaci'))
abacus
>>> print(wnl.lemmatize('hardrock'))
hardrock
"""
def __init__(self):
pass
def lemmatize(self, word, pos=NOUN):
lemmas = wordnet._morphy(word, pos)
return min(lemmas, key=len) if lemmas else word
def __repr__(self):
return "<WordNetLemmatizer>"
# unload wordnet
def teardown_module(module=None):
from nltk.corpus import wordnet
wordnet._unload()
| [
"netkas@intellivoid.info"
] | netkas@intellivoid.info |
cb9151a77cd1ec635a88b99df80f378fbcf242fb | eb9597c78d4bee2a0ebdcf147490eb514cb7a565 | /samples/module_sample/usqlite/test_python/test_python.py | c70fda3f32e239bfd1594dfb9f508081bab05fca | [] | no_license | Bayonetta5/socketpro | fede51bcc6d1e6f91d8060e1e9c4149b6a8c2163 | ddefe87e0572fde6f12930d867c2c10aed8a980e | refs/heads/master | 2020-03-09T15:51:15.000640 | 2018-04-07T18:48:01 | 2018-04-07T18:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,711 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from spa import Pair
from spa.udb import *
from spa.clientside import CSocketPool, CConnectionContext, CSqlite, CUQueue
import datetime
with CSocketPool(CSqlite) as spSqlite:
print('Remote async sqlite server host: ')
cc = CConnectionContext(sys.stdin.readline(), 20901, 'PythonUser', 'TooMuchSecret')
ok = spSqlite.StartSocketPool(cc, 1, 1)
sqlite = spSqlite.AsyncHandlers[0]
if not ok:
print('No connection error code = ' + str(sqlite.AttachedClientSocket.ErrorCode))
spSqlite.ShutdownPool()
exit(0)
def cb(sqlite, res, errMsg):
print('res = ' + str(res) + ', errMsg: ' + errMsg)
def cbExecute(sqlite, res, errMsg, affected, fail_ok, lastRowId):
print('affected = ' + str(affected) + ', fails = ' + str(fail_ok >> 32) + ', oks = ' + str(fail_ok & 0xffffffff) + ', res = ' + str(res) + ', errMsg: ' + errMsg + ', last insert id = ' + str(lastRowId))
def TestCreateTables():
ok = sqlite.ExecuteSql('CREATE TABLE COMPANY(ID INT8 PRIMARY KEY NOT NULL, name CHAR(64) NOT NULL, ADDRESS varCHAR(256) not null, Income float not null)', cbExecute)
ok = sqlite.ExecuteSql("CREATE TABLE EMPLOYEE(EMPLOYEEID INT8 PRIMARY KEY NOT NULL unique, CompanyId INT8 not null, name NCHAR(64) NOT NULL, JoinDate DATETIME not null default(datetime('now')), IMAGE BLOB, DESCRIPTION NTEXT, Salary real, FOREIGN KEY(CompanyId) REFERENCES COMPANY(id))", cbExecute)
ra = []
def cbRows(sqlite, lstData):
index = len(ra) - 1
ra[index].second.append(lstData)
def cbRowHeader(sqlite):
vColInfo = sqlite.ColumnInfo
ra.append(Pair(vColInfo, []))
def TestPreparedStatements():
sql_insert_parameter = "Select datetime('now');INSERT OR REPLACE INTO COMPANY(ID, NAME, ADDRESS, Income) VALUES (?, ?, ?, ?)"
ok = sqlite.Prepare(sql_insert_parameter, cb)
vData = []
vData.append(1)
vData.append("Google Inc.")
vData.append("1600 Amphitheatre Parkway, Mountain View, CA 94043, USA")
vData.append(66000000000.0)
vData.append(2)
vData.append("Microsoft Inc.")
vData.append("700 Bellevue Way NE- 22nd Floor, Bellevue, WA 98804, USA")
vData.append(93600000000.0)
vData.append(3)
vData.append("Apple Inc.")
vData.append("1 Infinite Loop, Cupertino, CA 95014, USA")
vData.append(234000000000.0)
return sqlite.ExecuteParameters(vData, cbExecute, cbRows, cbRowHeader)
def InsertBLOBByPreparedStatement():
wstr = ""
while len(wstr) < 128 * 1024:
wstr += u'近日,一则极具震撼性的消息,在中航工业的干部职工中悄然流传:中航工业科技委副主任、总装备部先进制造技术专家组组长、原中航工业制造所所长郭恩明突然失联。老郭突然失联,在中航工业和国防科技工业投下了震撼弹,也给人们留下了难以解开的谜团,以正面形象示人的郭恩明,为什么会涉足谍海,走上不归路,是被人下药被动失足?还是没能逃过漂亮“女间谍“的致命诱惑?还是仇视社会主义,仇视航空工业,自甘堕落与国家与人民为敌?'
str = ""
while len(str) < 256 * 1024:
str += 'The epic takedown of his opponent on an all-important voting day was extraordinary even by the standards of the 2016 campaign -- and quickly drew a scathing response from Trump.'
sqlInsert = u'insert or replace into employee(EMPLOYEEID, CompanyId, name, JoinDate, image, DESCRIPTION, Salary) values(?, ?, ?, ?, ?, ?, ?);select * from employee where employeeid = ?'
ok = sqlite.Prepare(sqlInsert, cb)
vData = []
sbBlob = CUQueue()
#first set of data
vData.append(1)
vData.append(1) #google company id
vData.append("Ted Cruz")
vData.append(datetime.datetime.now())
sbBlob.SaveString(wstr)
vData.append(sbBlob.GetBuffer())
vData.append(wstr)
vData.append(254000.0)
vData.append(1)
#second set of data
vData.append(2)
vData.append(1) #google company id
vData.append("Donald Trump")
vData.append(datetime.datetime.now())
sbBlob.SetSize(0)
sbBlob.SaveAString(str)
vData.append(sbBlob.GetBuffer())
vData.append(str)
vData.append(20254000.0)
vData.append(2)
#third set of data
vData.append(3)
vData.append(2) #Microsoft company id
vData.append("Hillary Clinton")
vData.append(datetime.datetime.now())
sbBlob.SaveString(wstr)
vData.append(sbBlob.GetBuffer())
vData.append(wstr)
vData.append(6254000.0)
vData.append(3)
return sqlite.ExecuteParameters(vData, cbExecute, cbRows, cbRowHeader)
ok = sqlite.Open(u'', cb)
ok = TestCreateTables()
ok = sqlite.BeginTrans(tagTransactionIsolation.tiReadCommited, cb)
ok = TestPreparedStatements()
ok = InsertBLOBByPreparedStatement()
ok = sqlite.EndTrans(tagRollbackPlan.rpDefault, cb)
sqlite.WaitAll()
print('')
print('+++++ Start rowsets +++')
index = 0
for a in ra:
if len(a.first) > 0:
print('Statement index = ' + str(index) + ', rowset with columns = ' + str(len(a.first)) + ', records = ' + str(len(a.second)) + '.')
else:
print('Statement index = ' + str(index) + ', no rowset received.')
index += 1
print('+++++ End rowsets +++')
print('')
print('Press any key to close the application ......')
sys.stdin.readline()
| [
"support@udaparts.com"
] | support@udaparts.com |
1f502fe59baef7019192d84d80899a3a869ae157 | 0072acbd082e994d7fc51b00ba7ef1470042dac3 | /movies/movie/models.py | f5fa2ed7b40db0347c64788bf3157dccc6eec7be | [] | no_license | jaybenaim/day22-data-modeling | fd8a5e512115f1f235c987a93afadbb610a86ee6 | cdf8e087c17e4b47e2f32fd24e1e14452a0b61f8 | refs/heads/master | 2022-11-04T00:33:59.103810 | 2019-08-01T04:28:27 | 2019-08-01T04:28:27 | 199,929,719 | 0 | 1 | null | 2022-10-20T08:58:40 | 2019-07-31T21:12:04 | Python | UTF-8 | Python | false | false | 701 | py | from django.db import models
class Viewer(models.Model):
name = models.CharField(max_length=255)
films = models.ManyToManyField('Film')
def __str__(self):
return f'{self.name} '
class Film(models.Model):
name_of_film = models.CharField(max_length=255)
# viewers = models.ForeignKey(Viewer, on_delete=models.CASCADE)
Viewer = models.ManyToManyField('Viewer')
def __str__(self):
return f'{self.name_of_film}'
class Movies_Viewed(models.Model):
viewer = models.ForeignKey(Viewer, on_delete=models.CASCADE)
film = models.ForeignKey(Film, on_delete=models.CASCADE)
def __str__(self):
return f'{self.viewer} {self.film}'
| [
"benaimjacob@gmail.com"
] | benaimjacob@gmail.com |
7e64fba06f1c66052bced856711f1e9be047b79e | 0a6d0ea3a60ece86ec477e490605b063b2e8c060 | /calcAverageDurationAs2ndWordInFile.py | 0acddb31ed92ef920406e9a3d975e9e4ebb603b1 | [] | no_license | alclass/bin | bdbe063d629c05e17f64dc27f71211e5881e1f3b | b4c5642c8d5843846d529630f8d93a7103676539 | refs/heads/master | 2023-07-24T11:36:50.009838 | 2023-07-09T20:26:42 | 2023-07-09T20:26:42 | 17,493,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,490 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob, sys
DEFAULT_EXTENSION = 'mp4'
def calcAverageDurationAs2ndWordInFile(files):
'''
'''
total_duration = 0; n_counts = 0
for eachFile in files:
try:
word = eachFile.split()[1]
if word.find('h') > -1:
hours, minutes = word.split('h')
if minutes == '': minutes = 0 # 1h splits into ['1', '']
introunded_individual_duration = int(hours)*60 + int(minutes)
else:
word = word.strip(" ',;-\r")
introunded_individual_duration = int(word)
print(introunded_individual_duration, eachFile)
except(ValueError, IndexError) as error:
continue
total_duration += introunded_individual_duration
n_counts += 1
if n_counts == 0:
return 0
average = total_duration / n_counts # do not use len(files) because of the try/except block
average = round(average)
return average
def show_cli_help():
print('''
This scripts calculates the average duration in minutes from
files that have names based on the following conventions:
1) the duration is put as a second word in the filename;
2) the duration is:
2a) either <minutes>' (ie number of minutes [from 0 to 59] followed by ' (plics)
example: 31' (meaning 31 minutes integer rounded)
2b) or number of hours followed by an "h" followed by number of minutes [from 0 to 59]
example: 2h17 (meaning 2 hours and 17 minutes, minutes integer rounded)
Argument required:
The argument required is -e=<extension>
-e=mp4 (ie, take the average duration for mp4 files,
all those in which name convention holds)
''')
def get_extension_from_args():
ext = None
for arg in sys.argv:
if arg in ['-h', '--help']:
return show_cli_help()
elif arg.startswith('-e='):
ext = arg[len('-e=') : ]
return ext
def get_files_from_args():
'''
'''
ext = get_extension_from_args()
if ext is None:
ext = DEFAULT_EXTENSION
files = glob.glob("*." + ext)
return files
def process():
'''
'''
files = get_files_from_args()
print('Calculation duration average calculation with %d files.' %len(files))
if len(files) > 0:
average = calcAverageDurationAs2ndWordInFile(files)
print('Duration average is', average)
else:
print('Please use the -e=<extension> to pick up a file extension to be used for the duration average calculation.')
if __name__ == '__main__':
process()
| [
"livrosetc@yahoo.com.br"
] | livrosetc@yahoo.com.br |
4e8647f661d6e79fbed4f1c02d6a128b5a187d7b | cc87a78151e3229ab408f8562fb2ee0c90af9d43 | /hothouse/cli.py | 2d889b2baa4df074a8e517df5d50809000252903 | [
"MIT"
] | permissive | cropsinsilico/hothouse | c6823bbde426d2dfced10e314af1b4a34181f474 | 11bda5941ee84ad024f8b73a4cdbe82b4be16f18 | refs/heads/master | 2023-01-12T08:59:11.871734 | 2021-06-09T17:51:06 | 2021-06-09T17:51:06 | 179,533,155 | 0 | 3 | MIT | 2022-12-26T20:47:17 | 2019-04-04T16:12:12 | Python | UTF-8 | Python | false | false | 401 | py | # -*- coding: utf-8 -*-
"""Console script for hothouse."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for hothouse."""
click.echo("Replace this message by putting your code into " "hothouse.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| [
"matthewturk@gmail.com"
] | matthewturk@gmail.com |
e7a32e42f05f7c64f62178c14d630f5e48ad7c81 | 2a4ad073755ff447926e44b7c2e0b56b5ded37d2 | /NowCoder/56J_撤除链表中的重复节点_A.py | e8d1baf1e93c34bbd24a4c52285a92849ae3f96c | [] | no_license | wcb2213/Learning_notes | 3a9b3fdb7df5c6844a9031db8dd7e9dd858e093c | d481e1754c15c91557027bee872f4d97da3c0fca | refs/heads/master | 2021-07-06T15:54:56.199655 | 2020-09-04T14:05:50 | 2020-09-04T14:05:50 | 174,832,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | #!/usr/bin/env/ python
# -*- coding:utf-8 -*-
# Created by: Vanish
# Created on: 2019/4/2
# 30ms
# 在一个排序的链表中,存在重复的结点,请删除该链表中重复的结点,重复的结点不保留,返回链表头指针。 例如,链表1->2->3->3->4->4->5 处理后为 1->2->5
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# 哈希
def deleteDuplication(self, pHead):
# write code here
p=pHead
if not p or not p.next: return p
dic={}
while p:
if p.val not in dic: dic[p.val]=True
else: dic[p.val]=False
p=p.next
res=ListNode(0)
q,p=res,pHead
while p:
if dic[p.val]:
q.next=p
q=q.next
p=p.next
q.next=None
return res.next
# # 直接处理
# def deleteDuplication(self, pHead):
# # write code here
# p = pHead
# if not p or not p.next: return p
# res = ListNode(0)
# q = res
# while p:
# if not p.next or p.val != p.next.val:
# q.next = p
# q = q.next
# p = p.next
# else:
# val=p.val
# while p.next and p.next.val==val:
# p=p.next
# p = p.next
# if not p:
# q.next=None
# break
# return res.next | [
"wcb2213@163.com"
] | wcb2213@163.com |
05ec1415c241cba02bf60e5d92821b0af3d152b0 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/battle_royale/scripts/client/battle_royale/gui/prb_control/entities/regular/squad/actions_validator.py | adb24bb413320f6de9fbdcfab72bae70f4f1fdfc | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 2,704 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: battle_royale/scripts/client/battle_royale/gui/prb_control/entities/regular/squad/actions_validator.py
from gui.prb_control.entities.base.actions_validator import ActionsValidatorComposite
from gui.prb_control.entities.base.squad.actions_validator import SquadActionsValidator, SquadVehiclesValidator
from gui.prb_control.entities.base.unit.actions_validator import UnitSlotsValidator, CommanderValidator
from gui.prb_control.items import ValidationResult
from gui.prb_control.settings import UNIT_RESTRICTION
from helpers import dependency
from skeletons.gui.game_control import IBattleRoyaleController
from gui.periodic_battles.models import PrimeTimeStatus
from constants import IS_DEVELOPMENT
class _BattleRoyaleVehiclesValidator(SquadVehiclesValidator):
def _isValidMode(self, vehicle):
return vehicle.isOnlyForBattleRoyaleBattles
def _isVehicleSuitableForMode(self, vehicle):
return ValidationResult(False, UNIT_RESTRICTION.UNSUITABLE_VEHICLE) if not self._isValidMode(vehicle) else None
class _UnitSlotsValidator(UnitSlotsValidator):
def _validate(self):
stats = self._entity.getStats()
return ValidationResult(False, UNIT_RESTRICTION.UNIT_NOT_FULL) if stats.freeSlotsCount > 0 else super(_UnitSlotsValidator, self)._validate()
class _BattleRoyaleValidator(CommanderValidator):
def _validate(self):
brController = dependency.instance(IBattleRoyaleController)
status, _, _ = brController.getPrimeTimeStatus()
return ValidationResult(False, UNIT_RESTRICTION.CURFEW) if status != PrimeTimeStatus.AVAILABLE else super(_BattleRoyaleValidator, self)._validate()
class BattleRoyaleSquadActionsValidator(SquadActionsValidator):
def _createVehiclesValidator(self, entity):
validators = [_BattleRoyaleVehiclesValidator(entity), _UnitSlotsValidator(entity), _BattleRoyaleValidator(entity)]
if not IS_DEVELOPMENT:
validators.append(_UnitSlotsValidator(entity))
return ActionsValidatorComposite(entity, validators=validators)
def _createSlotsValidator(self, entity):
baseValidator = super(BattleRoyaleSquadActionsValidator, self)._createSlotsValidator(entity)
return ActionsValidatorComposite(entity, validators=[baseValidator, BattleRoyalSquadSlotsValidator(entity)])
class BattleRoyalSquadSlotsValidator(CommanderValidator):
def _validate(self):
stats = self._entity.getStats()
pInfo = self._entity.getPlayerInfo()
return ValidationResult(False, UNIT_RESTRICTION.COMMANDER_VEHICLE_NOT_SELECTED) if stats.occupiedSlotsCount > 1 and not pInfo.isReady else None
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
7a4f3c708d4bdec13d5f9cd966a09d72fcc35f87 | 9827086f2a2a21d5b21bed9ee3b0ca888dc1bb50 | /m01_basics/l_08_enums.py | 77804d1a7a61664461be0cb37d0e42babe1beaf9 | [
"MIT"
] | permissive | decolnz/python-52-weeks | d011918e39660b3b96ee5bbe1f9978c6d1c3679c | b604b47983df16a6e931ed241ce0439dd1764c56 | refs/heads/main | 2023-01-22T19:46:44.697916 | 2020-11-16T18:46:54 | 2020-11-16T18:46:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | from random import choice
import string
from tabulate import tabulate
from operator import itemgetter
from enum import Enum
class Vendor(Enum):
cisco = "cisco"
juniper = "juniper"
arista = "arista"
devices = list() # CREATE EMPTY LIST FOR HOLDING DEVICES
# FOR LOOP TO CREATE LARGE NUMBER OF DEVICES
for index in range(20):
# CREATE DEVICE DICTIONARY
device = dict()
# RANDOM DEVICE NAME
device["name"] = (
choice(["r2", "r3", "r4", "r6", "r10"])
+ choice(["L", "U"])
+ choice(string.ascii_letters)
)
# RANDOM VENDOR FROM CHOICE OF CISCO, JUNIPER, ARISTA
device["vendor"] = choice([Vendor.cisco.value, Vendor.juniper.value, Vendor.arista.value])
if device["vendor"] == Vendor.cisco.value:
device["os"] = choice(["ios", "iosxe", "iosxr", "nexus"])
device["version"] = choice(["12.1(T).04", "14.07X", "8.12(S).010", "20.45"])
elif device["vendor"] == Vendor.juniper.value:
device["os"] = "junos"
device["version"] = choice(["J6.23.1", "8.43.12", "6.45", "6.03"])
elif device["vendor"] == Vendor.arista.value:
device["os"] = "eos"
device["version"] = choice(["2.45", "2.55", "2.92.145", "3.01"])
device["ip"] = "10.0.0." + str(index)
# NICELY FORMATTED PRINT OF THIS ONE DEVICE
print()
for key, value in device.items():
print(f"{key:>16s} : {value}")
# ADD THIS DEVICE TO THE LIST OF DEVICES
devices.append(device)
# USE 'TABULATE' TO PRINT TABLE OF DEVICES
print()
print(tabulate(sorted(devices, key=itemgetter("vendor", "os", "version")), headers="keys"))
| [
"chuck.a.black@gmail.com"
] | chuck.a.black@gmail.com |
1a13ba5ba28bd37616d3053fdcef0493368a562c | 1c594498900dd6f25e0a598b4c89b3e33cec5840 | /iqps/upload/google_connect.py | 22e9bcdd1ea0b105c7b3a968296f7235797b4dcf | [
"MIT"
] | permissive | thealphadollar/iqps | cef42ed8c86e4134e724a5f4967e96a83d672fcd | 187f6b134d82e2dce951b356cb0c7151994ca3ab | refs/heads/master | 2023-07-14T04:41:13.190595 | 2020-06-25T14:51:17 | 2020-06-25T14:51:17 | 277,360,692 | 0 | 0 | MIT | 2020-07-05T18:29:17 | 2020-07-05T18:29:16 | null | UTF-8 | Python | false | false | 3,088 | py | from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# Source: Google Drive API Python Quickstart
SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly',
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive.appdata']
def connect():
creds = None
if os.path.exists(os.path.join('conf', 'token.pickle')):
with open(os.path.join('conf', 'token.pickle'), 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
os.path.join('conf', 'credentials.json'), SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(os.path.join('conf', 'token.pickle'), 'wb') as token:
pickle.dump(creds, token)
service = build('drive', 'v3', credentials=creds)
return service
def upload_file(local_path, remote_name, folderId=None, service=None):
if service is None:
service = connect()
file_metadata = {'name': remote_name}
if folderId:
file_metadata['parents'] = [folderId]
media = MediaFileUpload(local_path)
file = service.files().create(body=file_metadata,
media_body=media,
fields='webContentLink').execute()
return file.get('webContentLink', None)
def get_or_create_folder(remote_name, public=False, service=None):
if service is None:
service = connect()
folder_id = None
query = "name='{}' and mimeType='application/vnd.google-apps.folder'".format(remote_name)
try:
response = service.files().list(q=query,
spaces='drive',
fields='nextPageToken, files(id, name)',
pageToken=None).execute()
assert response.get('files', []) != []
for folder in response.get('files'):
folder_id = folder.get('id')
break
except Exception as e:
file_metadata = {
'name': remote_name,
'mimeType': 'application/vnd.google-apps.folder'
}
folder = service.files().create(body=file_metadata, fields='id').execute()
folder_id = folder.get('id')
if public:
user_permission = {
'type': 'anyone',
'role': 'reader'
}
folder = service.permissions().create(
fileId=folder_id,
body=user_permission,
fields='id').execute()
finally:
return folder_id
| [
"smishra99.iitkgp@gmail.com"
] | smishra99.iitkgp@gmail.com |
56ce6a890dccd4e3bb8a690789eead2575122f23 | e406b247a64ad3981eae234de9cb0a933b5df6ec | /07. Passing Networks/01. Basic Passing Networks/passing_network.py | bad793b8c09dedd49fe9da3a794924452095f8ac | [] | no_license | Slothfulwave612/Football-Analytics-With-Python | 973f333be3ccb42cd04d3506e16f475dc16f27de | 31de88dd7303fa5b781eb2bbb1d3a2c7db5bceb3 | refs/heads/master | 2023-08-15T17:31:46.548650 | 2023-07-24T11:39:17 | 2023-07-24T11:39:17 | 255,636,264 | 30 | 12 | null | null | null | null | UTF-8 | Python | false | false | 2,581 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 12 20:31:09 2020
@author: slothfulwave612
Modules Used(4):-
1. numpy -- numerical computing library.
2. matplotlib -- plotting library for the Python.
3. utility_function_io -- Python module for i/o operation.
4. utility_function_viz -- Python module for visualization.
"""
import numpy as np
import matplotlib.pyplot as plt
import utility_function_io as ufio
import utility_function_viz as ufvz
## making dataframe for competitions
comp_df = ufio.get_competitions()
## picking competition_id = 11 and season_id = 22 from comp df
## La Liga Competition, 2010-11 season
comp_id = 11
season_id = 22
## getting match dataframe from our required competiton and season
match_df = ufio.get_matches(comp_id, season_id)
## renaming the required columns
match_df_cols = list(match_df.columns) ## making list of the columns
match_df_cols = ufio.renaming_columns(match_df_cols) ## new list with renamed columns
match_df.columns = match_df_cols ## renaming the columns
## getting required match id
match_id = ufio.getting_match_id(match_df, 'Barcelona', 'Real Madrid')
## making event dataframe for the particular match
events, lineups = ufio.make_event_df(match_id)
## getting our home team players
home_lineup = lineups[0]
players = ufio.get_straters(home_lineup)
## getting event values for Barcelona
team_id = home_lineup['team']['id']
event_barca = [e for e in events if e['team']['id'] == team_id]
## getting the pass events for Barcelona
pass_barca = [e for e in event_barca if e['type']['name'] == 'Pass']
## generating passing matrix
pass_matrix = ufio.passing_matrix(pass_barca)
## generating average player location
avg_location = ufio.get_avg_player_pos(event_barca, players)
## generating volumes of passes exchanged between player
lines, weights = ufio.vol_passes_exchanged(pass_matrix, players, avg_location)
## creating a pitchmap
fig, ax = plt.subplots(figsize=(20, 12))
fig, ax = ufvz.createPitch(120, 80, 'yards', 'gray', fig, ax)
## defining lambda functions
fill_adj = lambda x: 0.8 / (1 + np.exp(-(x-20)*0.2))
weight_adj = lambda x: 2 / (1 + np.exp(-(x-10)*0.2))
## creating lines for passes
ax = ufvz.show_lines(ax, lines, weights, weight_adj, fill_adj)
## drawing each player's position
ax = ufvz.draw_points(ax, [xy for k, xy in avg_location.items()])
## assigning jersey numbers to each player position
ax = ufvz.draw_numbers(ax, avg_location, players)
## setting labels
ax = ufvz.label_players(ax)
## saving the figure
fig.savefig('Passing Network.jpg')
| [
"noreply@github.com"
] | Slothfulwave612.noreply@github.com |
28facf418ae9ab812b638f0ed6bd12ef0dce123d | 777b5c266360b29b6d4af916726abd5d364b74a1 | /src/utils/oauth/facebook.py | cade1f947ec4bd4075814e55b1e2dab7e32b98ce | [] | no_license | uryyyyyyy/django-graphql | 44d08afc3e44514270d1d5c183caa9d1c1cf3f88 | f3d6513d2325a8e675e47500cc71d8ef56c01537 | refs/heads/master | 2021-06-10T11:11:45.110271 | 2019-02-28T07:39:54 | 2019-02-28T07:39:54 | 172,325,424 | 0 | 0 | null | 2021-04-20T17:56:57 | 2019-02-24T10:44:31 | Python | UTF-8 | Python | false | false | 907 | py | import os
from typing import Optional
import requests
from requests import Response
OAUTH2_ID = os.environ['OAUTH2_FACEBOOK_ID']
OAUTH2_SECRET = os.environ['OAUTH2_FACEBOOK_SECRET']
OAUTH2_REDIRECT = os.environ['OAUTH2_FACEBOOK_REDIRECT']
def oauth(code: str)-> Optional[dict]:
url = f'https://graph.facebook.com/v3.2/oauth/access_token?client_id={OAUTH2_ID}' \
f'&redirect_uri={OAUTH2_REDIRECT}&client_secret={OAUTH2_SECRET}&code={code}'
response: Response = requests.get(url)
if response.status_code != 200:
print(response.json())
return None
return response.json()
def get_user_detail(token: str) -> Optional[dict]:
url = f'https://graph.facebook.com/v3.2/me?access_token={token}&fields=id,email'
response: Response = requests.get(url)
if response.status_code != 200:
print(response.json())
return None
return response.json()
| [
"koki@anymindgroup.com"
] | koki@anymindgroup.com |
fd3e0dcb98417ef93842c6bd574ee434d02e33c4 | a5fabc6d6341925b587fecb082dc70c0d1b95619 | /Stage4_Linux_dm_Mod.py | 3be18daac9a82eb2f31f2368fb98b5ea7a4446e5 | [] | no_license | illuminous/pythonScripts | fcfef9c0fb9bd3155edcf02b56bbec563ff08b2a | 1bb69a1bb1e10f6041274c027cc0ab06c7a9efed | refs/heads/master | 2022-10-22T09:35:48.088907 | 2022-09-28T22:35:32 | 2022-09-28T22:35:32 | 120,963,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,141 | py | """Stage4.py Creates the input files for fireharm and renames the ascii output files from Stage3.aml. Created by Jason M.\n
Herynk 2010"""
import os
import sys
import shutil
import tempfile
resrange = []
zones =[]
res = []
##############################################################
def chunkRange():
for chunk in range (0, 16):
if chunk < 10:
item1 = '0%s' %(chunk)
res.append(item1)
else:
res.append(chunk)
def createInFiles(zone, path0, path1, path2):
for chunk in res:
myfile = path0 + zone+'_'+ '%s' %(chunk)+'.in'
infile = open(myfile, 'w')
infile.write('FIREHARM Driver File: Filenames and general simulation input parameters\n')
infile.write(path2 +'infiles/%spoly%s.in\n' %(zone, chunk)) ##Name of input file that stores polygon values
infile.write(path1 + 'site.in\n') ##Name of input file that stores site parameters
infile.write(path1 + 'NONE\n') ##Name of input file that stores weather parms day of event (use NONE if temporal simulation)
infile.write(path1 + 'daymet/reference/quadreg.in\n') ##Name of input file that stores DAYMET Quad tiles
infile.write(path1 + 'daymet/maps/daymet_grid_pc.regimg\n') ##Name of input map that stores DAYMET mask location values
infile.write(path1 + 'daymet/maps/daymet_master_index_pc.regimg\n') ##Name of input map that stores DAYMET index values
infile.write(path1 + 'daymet/maps/daymet_us_dem_pc.regimg\n') ##Name of input map that stores DAYMET dem elevation values
infile.write(path1 + 'nfdrsfuel.in\n') ##Name of input file that stores NFDRS fuel parameters
infile.write(path1 + 'fbfmfuel40.in\n') ##Name of input file that stores Fire behavior fuel parameters
infile.write(path1 + 'flmfuel.in\n') ##Name of input file that stores Fuel Char Class fuel parameters
infile.write(path1 + 'tree_list.asc\n') ##Name of input file that stores the tree list
infile.write(path1 + 'fofemspp.in\n') ##Name of input file that stores FOFEM species information
infile.write(path2 +'outfiles/%s_dm_%s.out\n' %(zone, chunk)) ##Name of output file for FIREHARM results
infile.write(""" 1 Simulation option (1-simulate for entire wx record, 2-simulated for wx condition)
0 Verbose flag (0-no intermediate messages, 1-some benchmark messages, 2-all messages)
3 Lapse rate option: (1-use machine constants, 2-user-specified next, 3-daily dynamic computations)
-0.003 0.0004 Starting and ending year for DAYMET weather in FIREHARM analysis
1980 1997 Starting and ending year for DAYMET weather in FIREHARM analysis
5.0 200 Wind speed (mph) and direction (azmiuth) for simulaiton
Parameters for computing probabilities of a fire event assuming ignition
Stat Begday Endday MinDays Threshold Name of the parameter (STAT=0-dont compute,1-ave,2-sum), Begday=beginning day, Endday=Ending Day, Minday=Minimum number days for computation, Threshold=minimum value for computation
1 150 250 1 100.0 NFDRS KDBI
1 150 250 1 6.0 NFDRS Spread Component SC
1 150 250 1 400.0 NFDRS Energy Release Component ERC
1 150 250 1 5.0 NFDRS Burning index BI
1 150 250 1 0.01 NFDRS Ignition Component IC
1 150 250 1 10.0 CFFWIS FFMC Fine fuel moisture code
1 150 250 1 100.0 CFFWIS DMC Duff moisture code
1 150 250 1 10.0 CFFWIS DC Drought code
1 150 250 1 5.0 CFFWIS ISI Initial spread index
1 150 250 1 5.0 CFFWIS FWI Fire Weather index
1 150 250 1 10.0 Spread rate (km\hr)
1 150 250 1 350.0 Fireline intensity (kw\m)
1 150 250 1 2.0 Flame length (m)
1 150 250 1 1000.0 Crown fire intensity (kw\m)
1 150 250 1 50.0 Fuel consumption (%)
1 150 250 1 50.0 Tree mortality (%)
1 150 250 1 60.0 Soil heat at 2 cm
1 150 250 1 10.0 PM2.5 smoke emissions (t\ac)
1 150 250 1 5.0 Scorch height (m)
1 150 250 1 2.0 Burn Severity""")
infile.close()
def createBashFile(zone, path0, path2):
myfile = path0 + zone + '_' + 'fireharm' + '.bash'
print myfile
infile = open(myfile, 'w')
infile.write('#!/bin/bash' + '\n')
infile.write('datadir='+ path2 + '\n')
infile.write('cd ${datadir}\n')
directions = """
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_00.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_01.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_02.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_03.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_04.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_05.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_06.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_07.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_08.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_09.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_10.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_11.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_12.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_13.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_14.bat
qsub -cwd -S /bin/bash -l hostname=compute-0-** ./%s_15.bat
""" %(zone, zone, zone, zone, zone, zone, zone, zone, zone, zone, zone,
zone, zone, zone, zone, zone)
infile.write(directions)
infile.close()
def createBATS(zone, path0, path2, path3):
for chunk in res:
batfile = path0 + '%s_%s' %(zone, chunk) + '.bat'
print batfile
infile = open(batfile, 'w')
infile.write(path3 + 'FIREHARM_v6/bin/fireharm6.0T' + ' ')
infile.write(path2 + '%s_%s' %(zone,chunk) + '.in')
infile.close()
def copyASCII():
for chunk in res:
filename1 = prep +'/' + zone +'/'+'cleanFiles/'+zone + '_' + '%s' %(chunk)
print filename1
filename2 = root + zone + '/' + 'infiles' + '/'+zone + 'poly' + '%s' %(chunk)+'.in'
print filename2
shutil.copy(filename1, filename2)
def buildDirectories(zone_number_lower, zone_number_upper):
for zone in range(zone_number_lower, zone_number_upper):
resrange.append(zone)
if zone < 10: #fix the formating if the zone number is less than 10
path2 = 'z0%s' %(zone)
zones.append(path2)
else:
path1 = 'z%s' %(zone)
zones.append(path1)
##########Run Calls#####
def main():
buildDirectories(32,36)
for z in zones:
try:
path0 = 'N:/data/work/FH/d.daymet/%s/' %(z)
path1 = '/share/jherynk/data/work/FH/d.infiles/'#/mnt/pvfs2/jherynk/' + zone + '/infiles/'
path2 = '/share/jherynk/data/work/FH/d.daymet/%s/' %(z)
path3 = '/share/jherynk/data/work/FH/'
chunkRange()
createInFiles(z, path0, path1, path2)
createBashFile(z, path0, path2)
createBATS(z, path0, path2, path3)
except:
pass
if __name__ == "__main__":
main()
##copyASCII()
| [
"noreply@github.com"
] | illuminous.noreply@github.com |
be35ddd0d45e132bb15e23130777452ff8896226 | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /pacbiolib/pacbio/pythonpkgs/pbreports/bin/ccs_report | 4c5af46e1f2b872922db35a77f270f162a0c83b2 | [
"BSD-2-Clause"
] | permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | #! python
# -*- coding: utf-8 -*-
import re
import sys
from pbreports.report.ccs import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"409511038@qq.com"
] | 409511038@qq.com | |
a0fafc33252870bd04f6a5089915d8837494b042 | 553b309379b95ee0fa411e90cabd1cecada21daf | /tests/training/test_parameters.py | aad49b921270c42f334c1fcba797956bc3332387 | [
"BSD-2-Clause"
] | permissive | gitter-badger/delira | 523c2a4f0ab4866c9862086a136c65e2b8622d62 | e79072eb33ba76bb444d93b424a5318232d5fba3 | refs/heads/master | 2020-04-25T23:37:56.562572 | 2019-02-26T12:44:34 | 2019-02-26T12:44:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,327 | py | import pytest
from delira.training import Parameters
from delira.utils import LookupConfig
@pytest.mark.parametrize("fixed_model_params,variable_model_params,"
"fixed_training_params,variable_training_params,"
"valid_nested_key,valid_nested_value,"
"doubled_key,invalid_key",
[
(
{"a": 1, "b": [1, 2], "c": {"d": 3, "e": 56}},
{"f": 1, "g": {"h": {"i": {"a": 3}}}},
{"j": 1, "k": 2},
{},
"e",
56,
"a",
"q"
)
])
def test_parameters(fixed_model_params, variable_model_params,
fixed_training_params, variable_training_params,
valid_nested_key, valid_nested_value,
doubled_key, invalid_key):
def to_lookup_config(dictionary):
tmp = LookupConfig()
tmp.update(dictionary)
return tmp
fixed_model_params = to_lookup_config(fixed_model_params)
variable_model_params = to_lookup_config(variable_model_params)
fixed_training_params = to_lookup_config(fixed_training_params)
variable_training_params = to_lookup_config(variable_training_params)
params = Parameters(
fixed_params={
"model": fixed_model_params,
"training": fixed_training_params
},
variable_params={
"model": variable_model_params,
"training": variable_training_params
}
)
assert params.training_on_top == False
assert params.variability_on_top
assert params.fixed == to_lookup_config({
"model": fixed_model_params,
"training": fixed_training_params
})
assert params.variable == to_lookup_config({
"model": variable_model_params,
"training": variable_training_params
})
params = params.permute_training_on_top()
assert params.variability_on_top == False
assert params.training_on_top
print(params.model.difference_config(to_lookup_config({
"fixed": fixed_model_params,
"variable": variable_model_params
})))
assert params.model == to_lookup_config({
"fixed": fixed_model_params,
"variable": variable_model_params
})
assert params.training == to_lookup_config({
"fixed": fixed_training_params,
"variable": variable_training_params
})
params_copy = params.deepcopy()
params = params.permute_variability_on_top().permute_training_on_top()
assert params_copy == params
assert params.nested_get(valid_nested_key) == valid_nested_value
try:
params.nested_get(doubled_key)
assert False
except KeyError:
assert True
try:
params.nested_get(invalid_key)
assert False
except KeyError:
assert True
assert "default" == params.nested_get(invalid_key, "default")
assert "default" == params.nested_get(invalid_key, default="default")
if __name__ == '__main__':
test_parameters()
| [
"justus.schock@rwth-aachen.de"
] | justus.schock@rwth-aachen.de |
8304d52d475e946ac42da523f39ddf4b264e53cd | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.11.24/7/1569573204.py | 13cb49303c014c62aa005ac69bc3e71d01c49d55 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def divisors(n: int) -> list:
"""Gibt alle Divisoren einer zahl zurück
Arg: n: int > 0
Return: list von allen teilern
leere Liste falls n <= 0
"""
if n <= 0:
return []
else:
returnlist = []
for i in range(1, n+1):
if n % i == 0:
returnlist.append(i)
return returnlist
######################################################################
## Lösung Teil 2. (Tests)
def test_divisors():
assert divisors(-2) == []
assert divisors(1) == [1]
######################################################################
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
6cc1749926f29e51919453e49fa53f1bc15a3b34 | 2b502aae9bc33bac6c4b28d1e702591f2cbed690 | /terrascript/cobbler/r.py | 7711613b1868ac135b7e9876ca62f7612f9aa84b | [
"Python-2.0",
"BSD-2-Clause"
] | permissive | LeeroyC710/python-terrascript | 4c8fbe032e9b7dd8844d962f888c28f87a26ff77 | b8f3c3549b149c124e3e48e0cea0396332ad1a1d | refs/heads/master | 2020-12-28T03:58:04.502969 | 2020-01-19T21:46:52 | 2020-01-19T21:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # terrascript/cobbler/r.py
import terrascript
class cobbler_distro(terrascript.Resource):
pass
class cobbler_kickstart_file(terrascript.Resource):
pass
class cobbler_profile(terrascript.Resource):
pass
class cobbler_repo(terrascript.Resource):
pass
class cobbler_snippet(terrascript.Resource):
pass
class cobbler_system(terrascript.Resource):
pass
| [
"markus@juenemann.net"
] | markus@juenemann.net |
3587d1ceb6ec5894fae9423eda952a68b75d5044 | b4477b89c0e43c85a8e91b152a29003a89d0211e | /game_stats.py | f43c631024928ce718c71d13d0abdb3c3023bad7 | [] | no_license | WillMartin32/Project-1-Alien-Invasion | 0b1935e27a7e63075167ac274e66beb9b3bb2681 | 5197a40c5eebf7cdab7d9958b7e05d65ee3663b6 | refs/heads/master | 2023-08-30T22:03:16.511054 | 2021-10-24T00:37:51 | 2021-10-24T00:37:51 | 410,123,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | class GameStats:
"""Track statistics for Alien Invasion."""
def __init__(self, ai_game):
"""Initialize statistics."""
self.settings = ai_game.settings
self.reset_stats()
# Start Alien Invasion in an inactive state.
self.game_active = False
# High score should never be reset.
self.high_score = 0
def reset_stats(self):
"""Initialize statistics that can change during the game."""
self.ships_left = self.settings.ship_limit
self.score = 0
self.level = 1 | [
"you@example.com"
] | you@example.com |
429f458c11766686359fa797c199a0e1934200de | 71894f980d1209017837d7d02bc38ffb5dbcb22f | /multi/DIYSmartphone/apps/settings/settings.py | 967fb98c0538c57b96db03efc7e40f24ff09a816 | [
"MIT"
] | permissive | masomel/py-iot-apps | 0f2418f8d9327a068e5db2cdaac487c321476f97 | 6c22ff2f574a37ba40a02625d6ed68d7bc7058a9 | refs/heads/master | 2021-03-22T04:47:59.930338 | 2019-05-16T06:48:32 | 2019-05-16T06:48:32 | 112,631,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,073 | py | #settings App
#copyright (c) 2015 Tyler Spadgenske
#MIT License
###############################
#To be packaged with stock TYOS
###############################
from subprocess import Popen
import sys
import pygame
class Run():
def __init__(self, fona):
self.fona = fona
self.headset = False
self.get_audio_mode()
#Setup colors
self.RED = (255,0,0)
self.GREEN = (0,255,0)
self.WHITE = (255,255,255)
self.menu = pygame.image.load('/home/pi/tyos/apps/settings/menu.png')
self.menu_rect = self.menu.get_rect()
self.font = pygame.font.Font('/home/pi/tyos/fonts/arial.ttf', 32)
self.off = self.font.render('OFF', True, self.RED, self.WHITE)
# fona power Text
self.fona_power = self.font.render('ON', True, self.GREEN, self.WHITE)
self.fona_power_rect = self.off.get_rect()
self.fona_power_rect.centerx = 280
self.fona_power_rect.centery = 223
self.on = self.font.render('ON', True, self.GREEN, self.WHITE)
self.rect = self.off.get_rect()
self.rect.centerx = 280
self.rect.y = 158
#Stuff to follow app protocol
self.exit = False
self.blit_one_surface = {'surface':[], 'rects':[]}
self.blit = {'surfaces':[self.menu, self.fona_power, self.off], 'rects':[self.menu_rect, self.fona_power_rect, self.rect]}
#Set audio mode text
if self.headset:
self.blit['surfaces'][2] = self.on
else:
self.blit['surfaces'][2] = self.off
self.next_app = None
def get_audio_mode(self):
audio_config = open('/home/pi/tyos/configure/audio.conf', 'r')
file = audio_config.readlines()
for i in range(0, len(file)):#Parse file
if file[i][0] == '#':
pass
#Do Nothing. Line is comment
else:
file[i] = file[i].rstrip()
if 'mode' in file[i]: #Extract audio mode: 1=Built in, 0=External
mode = file[i]
mode = mode.split('=')
self.mode = int(mode[1])
if self.mode == 1:
self.headset = False
else:
self.headset = True
def run_app(self):
pass
def get_events(self, event):
if event.pos[1] > 95 and event.pos[1] < 152:
self.delete_sms()
if event.pos[1] > 153 and event.pos[1] < 201:
self.set_headset()
if event.pos[1] > 251 and event.pos[1] < 303:
self.exit = True
def on_first_run(self):
self.exit = False
def delete_sms(self):
self.fona.transmit('AT+CMGD=1,4')
self.exit = True
def set_headset(self):
if self.headset:
self.blit['surfaces'][2] = self.off
self.headset = False
self.fona.transmit('AT+CHFA=1')
else:
self.blit['surfaces'][2] = self.on
self.headset = True
self.fona.transmit('AT+CHFA=0')
| [
"msmelara@gmail.com"
] | msmelara@gmail.com |
a669aae80ce81db10f67b0a18af9b9f2aae71c7c | 7134e45563b2045837296cb5c4f1974a025e4f2b | /.history/dices_throw_20200311233054.py | 5e54f81580878e0bf8757aa4c5e2a992cce563d7 | [] | no_license | Nordenbox/Nordenbox_Python_Fundmental | dca175c471ac2c64453cc4bcf291dd0773be4add | 9c79fd5d0dada580072b523d5aa1d72f996e3a22 | refs/heads/master | 2022-01-21T06:37:15.084437 | 2022-01-06T13:55:30 | 2022-01-06T13:55:30 | 240,154,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | """n 个骰子都扔出某一面的概率计算"""
import random
class ThrowDices:
def __init__(self, dices_numbers, face_want, throw_times):
self.dices_numbers = dices_numbers
self.face_want = face_want
self.throw_times = throw_times
def get_probabilities(self):
list_calculate = self.create_dices_throwing()
faces_final_get = list_calculate.values()
dices_log = list_calculate.keys()
print(faces_final_get, len(faces_final_get))
# print(dices_log)
n = 0
faces_final_get_cut =[faces_final_get[s:s+self.throw_times] for s in range(0, len(faces_final_get),self.throw_times)]
for i in faces_final_get_cut:
if self.face_want in i:
n += 1
print('{}次以后,{}个骰子掷出{}点面的次数为{},占比为{}%'.format(self.throw_times,
self.dices_numbers, face_want, n, 100*n/len(faces_final_get)))
def create_dices_throwing(self):
dices_throwing_final= []
for i in range(3):
dices_throwing = []
for s in range(11):
dices_throwing.append(random.randint(1, 6))
print(dices_throwing, '\n')
dices_throwing_final.append(dices_throwing)
print(dices_throwing_final)
return dices_throwing
dices_numbers = int(input('你要几个骰子: '))
throw_times = int(input('你要投掷几次: '))
face_want = int(input('你要哪个点数(1-6): '))
task1 = ThrowDices(dices_numbers, face_want, throw_times)
task1.get_probabilities()
| [
"nordenbox@gmail.com"
] | nordenbox@gmail.com |
f8a87828e500d77821f9c8ce31080c6413b22225 | b28d13b2e785398f1a8074e0034080539009c837 | /django-rest-doc/snippets/views.py | f3fd32d0a2886ad4eedfc9fcc007937511625611 | [] | no_license | sdugaro/django | c58f1c290a1cadf90d723083c1bceefbbac99073 | 1704f1796cb3f25cac260c6120becd70e9f1c33f | refs/heads/main | 2023-02-06T22:06:41.872202 | 2020-12-27T09:04:12 | 2020-12-27T09:04:12 | 311,162,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | from rest_framework import generics
from rest_framework import permissions
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.metadata import BaseMetadata
from rest_framework import renderers
from django.contrib.auth.models import User
from snippets.models import Snippet
from snippets.serializers import SnippetSerializer
from snippets.serializers import UserSerializer
from snippets.permissions import IsOwnerOrReadOnly
# Root View
class APIRoot(APIView):
"""
This is the landing page for the Snippet API.
It allows you to create and save snippets of code that
will have syntax highlighting applied by `pygments`
"""
def get(self, request, format=None):
return Response({
'users': reverse('user-list', request=request, format=format),
'snippets': reverse('snippet-list', request=request, format=format)
})
# Create, List, Detail, Delete Snippet Views
# Custom Highlight View for a particular Snippet's final HTML
class SnippetViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides the following actions
`list` `create` `retrieve` `update` and `destroy`
We additionally provide an extra `highlight` action
"""
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
@action(detail=True, renderer_classes=[renderers.StaticHTMLRenderer])
def highlight(self, request, *args, **kwargs):
snippet = self.get_object()
return Response(snippet.highlighted)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `retrieve` actions
"""
queryset = User.objects.all()
serializer_class = UserSerializer
| [
"sdugaro@yahoo.com"
] | sdugaro@yahoo.com |
b766e18d258b7d090406b2c976ae0eb267fbddda | 3b4c4ed90545d848bc6fd536a36a8ccea065ed33 | /backend/home/admin/home_page.py | 19fff037cc67ec64caf463d0d920fbf3da7ea943 | [] | no_license | garpixcms/example-corp-templates | 43e2478687e3aa6d4b30985f6cb4e2566254bcdd | 56e0897dfc3d26853b2117cad22074e32e565af9 | refs/heads/master | 2023-07-19T00:17:26.397663 | 2021-09-18T19:10:40 | 2021-09-18T19:10:40 | 407,945,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from django.contrib import admin
from garpix_page.admin import BasePageAdmin
from ..models.home_page import HomePage
from .carousel_item import CarouselItemInline
@admin.register(HomePage)
class HomePageAdmin(BasePageAdmin):
inlines = (CarouselItemInline,)
| [
"crusat@yandex.ru"
] | crusat@yandex.ru |
50250c4996d256760a59ad157fd528f12325bcd7 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/lb_snat_translation.py | 55afbc1decee3f8c6fa2737cf58f6a9315d8a193 | [] | no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,382 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class LbSnatTranslation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'port_overload': 'int'
}
attribute_map = {
'type': 'type',
'port_overload': 'port_overload'
}
discriminator_value_class_map = {
'LbSnatIpPool': 'LbSnatIpPool',
'LbSnatAutoMap': 'LbSnatAutoMap'
}
def __init__(self, type=None, port_overload=1): # noqa: E501
"""LbSnatTranslation - a model defined in Swagger""" # noqa: E501
self._type = None
self._port_overload = None
self.discriminator = 'type'
self.type = type
if port_overload is not None:
self.port_overload = port_overload
@property
def type(self):
"""Gets the type of this LbSnatTranslation. # noqa: E501
Load balancers may need to perform SNAT to ensure reverse traffic from the server can be received and processed by them. There are two modes: LbSnatAutoMap uses the load balancer interface IP and an ephemeral port as the source IP and port of the server side connection. LbSnatIpPool allows user to specify one or more IP addresses along with their subnet masks that should be used for SNAT while connecting to any of the servers in the pool. # noqa: E501
:return: The type of this LbSnatTranslation. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this LbSnatTranslation.
Load balancers may need to perform SNAT to ensure reverse traffic from the server can be received and processed by them. There are two modes: LbSnatAutoMap uses the load balancer interface IP and an ephemeral port as the source IP and port of the server side connection. LbSnatIpPool allows user to specify one or more IP addresses along with their subnet masks that should be used for SNAT while connecting to any of the servers in the pool. # noqa: E501
:param type: The type of this LbSnatTranslation. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["LbSnatAutoMap", "LbSnatIpPool"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def port_overload(self):
"""Gets the port_overload of this LbSnatTranslation. # noqa: E501
Both SNAT automap and SNAT IP list modes support port overloading which allows the same SNAT IP and port to be used for multiple backend connections as long as the tuple (source IP, source port, destination IP, destination port, IP protocol) after SNAT is performed is unique. The valid number is 1, 2, 4, 8, 16, 32. # noqa: E501
:return: The port_overload of this LbSnatTranslation. # noqa: E501
:rtype: int
"""
return self._port_overload
@port_overload.setter
def port_overload(self, port_overload):
"""Sets the port_overload of this LbSnatTranslation.
Both SNAT automap and SNAT IP list modes support port overloading which allows the same SNAT IP and port to be used for multiple backend connections as long as the tuple (source IP, source port, destination IP, destination port, IP protocol) after SNAT is performed is unique. The valid number is 1, 2, 4, 8, 16, 32. # noqa: E501
:param port_overload: The port_overload of this LbSnatTranslation. # noqa: E501
:type: int
"""
if port_overload is not None and port_overload > 32: # noqa: E501
raise ValueError("Invalid value for `port_overload`, must be a value less than or equal to `32`") # noqa: E501
if port_overload is not None and port_overload < 1: # noqa: E501
raise ValueError("Invalid value for `port_overload`, must be a value greater than or equal to `1`") # noqa: E501
self._port_overload = port_overload
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[self.discriminator].lower()
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LbSnatTranslation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tcraft@pivotal.io"
] | tcraft@pivotal.io |
d1003e312f68da8ca9e65ebc3e28108892f9de3b | 667cec9312ac63e329b0249bf57b2c4f4246b896 | /django_lair/urls.py | d9ce71a6079b9c6802c368637d329991f9f3f62c | [
"MIT"
] | permissive | narfman0/django_lair | 3cb09e3c4156476a3a181a595c0734b6d42440f7 | f731f4048463d6b0e2cf37e67ac1a3fa8062a5b1 | refs/heads/master | 2023-01-11T18:43:53.108839 | 2016-12-05T05:25:51 | 2016-12-05T05:25:51 | 75,147,732 | 1 | 0 | MIT | 2022-12-26T20:26:31 | 2016-11-30T03:32:06 | Python | UTF-8 | Python | false | false | 655 | py | # -*- coding: utf-8 -*-
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns = [
url(
regex="^datum/$",
view=views.DatumListView.as_view(),
name='datum_list',
),
url(
regex="^datum/create/$",
view=csrf_exempt(views.DatumCreateView.as_view()),
name='datum_create',
),
url(
regex="^datum/(?P<pk>\d+)/$",
view=views.DatumDetailView.as_view(),
name='datum_detail',
),
url(
regex="^user/(?P<pk>\d+)/$",
view=views.UserDetailView.as_view(),
name='user_detail',
),
]
| [
"narfman0@gmail.com"
] | narfman0@gmail.com |
a40f971d91c9c8f7a33407864a4035996e808564 | 3f80628d7fc96db14bd3e46f67a264d172d78bed | /project_name/settings_prod.py | 5aeda66c4d9a89a313cba8509ef692f6d2220b74 | [] | no_license | jeremyjbowers/django-project-template | a33de39fe83ee909d3ea907407d62a43b0dd6132 | 3631b6b8760b4fe629f0069784d3f84ae2c54d6a | refs/heads/master | 2021-01-15T10:06:17.070154 | 2013-12-10T02:01:28 | 2013-12-10T02:01:28 | 15,066,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | DEBUG = False
DEVELOPMENT, PRODUCTION = False, True
DEBUG_TOOLBAR = False
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5433',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 60 * 30,
'OPTIONS': {
'MAX_ENTRIES': 1500
}
}
}
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
STATIC_URL = ''
| [
"ben.welsh@gmail.com"
] | ben.welsh@gmail.com |
c7c17f1a1331a54d890e1aa3c688e33f09f0395f | 3775102a3f59bc8aac9b8121ba2aef87409724ee | /Medium/email_validation.py | a2d415d1ce133906ac158b4b47155fd08aa7e3b1 | [] | no_license | csikosdiana/CodeEval | a446ec6673e9f97439662bfccbd7454e5740d509 | 15cdd9ca454939e93c77d5ed5076595ecc7e4301 | refs/heads/master | 2016-08-11T14:49:27.565799 | 2016-03-22T17:48:20 | 2016-03-22T17:48:20 | 46,176,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | data = ['foo@bar.com', 'this is not an email id', 'admin#codeeval.com', 'good123@bad.com', 'A@b@c@example.com', 'b@d.net', 'a"b(c)d,e:f;g<h>i[j\k]l@example.com',
'"very.unusual.@.unusual.com"@example.com', 'Abc.example.com']
import string
#print string.ascii_lowercase
#print string.ascii_uppercase
#import sys
#test_cases = open(sys.argv[1], 'r')
#data = test_cases.readlines()
for test in data:
address = test.rstrip()
#print address
if ' ' in address:
print 'false'
elif not address.endswith('.com') and not address.endswith('.net'):
print 'false'
elif '@' not in address:
print 'false'
else:
idx = address.rfind('@')
A = address[:idx]
if (A.startswith('"') and A.endswith('"')):
print 'true'
else:
spec_char = [".", "_", "-", "+", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
R = ''
for l in A:
if ((not l in string.ascii_lowercase) and (l not in string.ascii_uppercase) and (l not in spec_char)):
R = 'false'
break
if R == 'false':
print 'false'
else:
print 'true'
#test_cases.close() | [
"csikosdiana@gmail.com"
] | csikosdiana@gmail.com |
6b05463668a33fe1f943e09e558af61cf216efe7 | 52f4426d2776871cc7f119de258249f674064f78 | /baekjoon/bfs/3197.py | e95e691708be4642280c20ba66324c1f967e3be8 | [] | no_license | namhyun-gu/algorithm | 8ad98d336366351e715465643dcdd9f04eeb0ad2 | d99c44f9825576c16aaca731888e0c32f2ae6e96 | refs/heads/master | 2023-06-06T02:28:16.514422 | 2021-07-02T10:34:03 | 2021-07-02T10:34:03 | 288,646,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | # region Input redirection
import io
import sys
example = """
8 17
...XXXXXX..XX.XXX
....XXXXXXXXX.XXX
...XXXXXXXXXXXX..
..XXXXX.LXXXXXX..
.XXXXXX..XXXXXX..
XXXXXXX...XXXX...
..XXXXX...XXX....
....XXXXX.XXXL...
"""
sys.stdin = io.StringIO(example.strip())
# endregion
#
# ⛔ DO NOT COPY ABOVE CONTENTS
#
import sys
from collections import deque
dirs = [(1, 0), (-1, 0), (0, 1), (0, -1)]
def melt_and_meet(lake: list, water: deque, swan: deque):
swan_b = swan.pop()
day = 1
while swan:
temp = deque()
# Melt
while water:
r, c = water.popleft()
for dr, dc in dirs:
nr, nc = r + dr, c + dc
if nr in range(R) and nc in range(C):
if lake[nr][nc] == "X":
lake[nr][nc] = "."
temp.append((nr, nc))
water = temp
# Meet
temp = deque()
while swan:
swan_a = swan.popleft()
r, c = swan_a
if swan_a == swan_b:
return day
if lake[r][c] == "L":
lake[r][c] = day
for dr, dc in dirs:
nr, nc = r + dr, c + dc
if nr in range(R) and nc in range(C):
if lake[nr][nc] == "." or lake[nr][nc] == "L":
lake[nr][nc] = day
swan.append((nr, nc))
temp.append((nr, nc))
swan = temp
day += 1
return day
if __name__ == "__main__":
input = sys.stdin.readline
R, C = map(int, input().split())
lake = []
water = deque()
swan = deque()
for r in range(R):
lake.append([*input().rstrip()])
for c in range(C):
if lake[r][c] == "L":
swan.append((r, c))
water.append((r, c))
elif lake[r][c] == ".":
water.append((r, c))
print(melt_and_meet(lake, water, swan)) | [
"mnhan0403@gmail.com"
] | mnhan0403@gmail.com |
08c411d4c50b99bfb273e6f1fc5b5aae83b99172 | d0ad071c259151bf8cce28d5a36c65ee53960b8d | /Session8E.py | 57b68ed71ed0801054f8bfdf3e64ea8cf7073cd4 | [] | no_license | ishantk/GW2020P1 | 705f44ddd634a408690db6a3ecfccb0a6801da8e | 682c57e930b38d6f597861e23d0ba5a3b409715a | refs/heads/master | 2022-11-23T18:13:14.453946 | 2020-07-31T03:13:40 | 2020-07-31T03:13:40 | 272,337,327 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # Conversions
numbers1 = (10, 20, 30, 40, 50, 20, 10)
print(numbers1, type(numbers1), id(numbers1))
print()
# Converting Tuple into list: Creating a new list out of tuple at new memory location
numbers2 = list(numbers1)
print(numbers2, type(numbers2), id(numbers2))
print()
numbers3 = set(numbers2)
print(numbers3, type(numbers3), id(numbers3))
# Below is Error: as dictionary works on key value pair
# numbers4 = dict(numbers3)
# print(numbers4, type(numbers4), id(numbers4))
# Explore, where dict() function will be used and how ?
print()
print(numbers1)
print(numbers1[::-1]) # Reverse the List
| [
"er.ishant@gmail.com"
] | er.ishant@gmail.com |
e4850e41d1bbf8e67a5984a07863c2a1f41ebb3a | df789505c99974c0ba45adc57e52fc7865ff2a28 | /面向对象/类的公有属性&私有属性.py | 4eb77736d2ed78f62971df23ee866940e46a806d | [] | no_license | zhiwenwei/python | 6fc231e47a9fbb555efa287ac121546e07b70f06 | 76d267e68f762ee9d7706e1800f160929544a0a3 | refs/heads/master | 2021-01-20T04:21:44.825752 | 2018-12-19T06:20:10 | 2018-12-19T06:20:10 | 89,676,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | #-*- coding:utf-8 -*-
#Author:Kevin
'''
在类里面定义的属性叫做公有属性
'''
class Dog(object):
kind = "哈士奇" #类的公有属性
def __init__(self,name):
self.N = name
self.__heart = 'normal'
def sayhi(self):
#self.__heart = 'Die'
print('i am a dog,my name is ',self.N,self.__heart)
# def shiyan(self): #对外部提供访问私有属性接口
# return self.__heart
d = Dog('xiaohei')
print(d.kind)
d.kind = "秋田" #修改类的公有属性
print(d.kind)
d.sayhi()
print(d._Dog__heart) #强制访问私有属性
# print(d.shiyan())
'''
类的私有属性:
'''
| [
"ddzhiwenwei@163.com"
] | ddzhiwenwei@163.com |
dc4cfaf631cc0c2788c085ed481f39c32c2e3ff1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/28/usersdata/133/8585/submittedfiles/serie1.py | 5633ea4ecf4bf2e4534779b5e71da2bd577544a1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n=input('Quantidade de termos:')
while ( n!=a):
n=input('Quantidade de termos:')
S=0
for i in range (1, n+1, 1):
S = S + (math.pow(-1, i+1))/ i
print(' %.5f' %S) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
82484e521db18fd2670726ff82cde13c8262b5bb | 954663b85752f2578c85bc146c123078addd42a5 | /oscar/apps/basket/migrations/0002_auto_20140729_1113.py | 06c793abab5cdea0b6269a7d31ca359248eee5ad | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marcoaacoliveira/labweb | d7ca4542315480fb801053d6052fe4d800180916 | 99be44ce4f09a753ee5d6202e19919e2b1eb7192 | refs/heads/master | 2021-05-28T00:02:06.672881 | 2014-08-05T21:03:03 | 2014-08-05T21:03:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,813 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
('voucher', '0001_initial'),
('basket', '0001_initial'),
('partner', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='basket',
name='vouchers',
field=models.ManyToManyField(to='voucher.Voucher', null=True, verbose_name='Vouchers', blank=True),
preserve_default=True,
),
migrations.CreateModel(
name='Line',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('line_reference', models.SlugField(max_length=128, verbose_name='Line Reference')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='Quantity')),
('price_currency', models.CharField(default=b'GBP', max_length=12, verbose_name='Currency')),
('price_excl_tax', models.DecimalField(null=True, verbose_name='Price excl. Tax', max_digits=12, decimal_places=2)),
('price_incl_tax', models.DecimalField(null=True, verbose_name='Price incl. Tax', max_digits=12, decimal_places=2)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('basket', models.ForeignKey(verbose_name='Basket', to='basket.Basket')),
('product', models.ForeignKey(verbose_name='Product', to='catalogue.Product')),
('stockrecord', models.ForeignKey(to='partner.StockRecord')),
],
options={
'abstract': False,
'verbose_name': 'Basket line',
'verbose_name_plural': 'Basket lines',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='line',
unique_together=set([(b'basket', b'line_reference')]),
),
migrations.CreateModel(
name='LineAttribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=255, verbose_name='Value')),
('line', models.ForeignKey(verbose_name='Line', to='basket.Line')),
('option', models.ForeignKey(verbose_name='Option', to='catalogue.Option')),
],
options={
'abstract': False,
'verbose_name': 'Line attribute',
'verbose_name_plural': 'Line attributes',
},
bases=(models.Model,),
),
]
| [
"m@maikhoepfel.de"
] | m@maikhoepfel.de |
47d11fd5c17b0c2cfe8a940cd224d8cc10045b97 | 387be4647a519caa396c4c16c78a6f69cc0999a4 | /tests/plugins/test_tags.py | 90d45cea34c8adcb6030995031d1c33a55cd23b4 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | fedj/biggraphite | dba6aef439ea375aafaac7097734a9659aed5388 | 723cd0da70f4bd03c144b3c4ea4a78ef131810df | refs/heads/master | 2020-03-22T07:45:13.135235 | 2018-07-25T08:01:31 | 2018-07-25T08:01:31 | 139,722,687 | 0 | 0 | Apache-2.0 | 2018-07-04T13:00:35 | 2018-07-04T13:00:35 | null | UTF-8 | Python | false | false | 1,574 | py | #!/usr/bin/env python
# Copyright 2017 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific lanbg_guage governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import unittest
from biggraphite import test_utils as bg_test_utils
# This needs to run before we import the plugin.
bg_test_utils.prepare_graphite()
try:
from biggraphite.plugins import tags as bg_tags # noqa
HAVE_TAGS = True
except ImportError:
HAVE_TAGS = False
@unittest.skipUnless(HAVE_TAGS, "This version of Graphite doesn't support tags")
class TestTags(bg_test_utils.TestCaseWithFakeAccessor):
def setUp(self):
super(TestTags, self).setUp()
self.accessor.connect()
from django.conf import settings as django_settings
self.tagdb = bg_tags.BigGraphiteTagDB(
settings=django_settings,
accessor=self.accessor,
metadata_cache=self.metadata_cache,
)
def testBasic(self):
# FIXME: add more tests when things are implemented
pass
if __name__ == "__main__":
unittest.main()
| [
"c.chary@criteo.com"
] | c.chary@criteo.com |
f3a4b28c98c1b1f793c86a89883d877934da3bc3 | 103ef307c8485355aef679c2b0e1010c10b7320b | /solutions/0417-Pacific-Atlantic-Water-Flow/0417.py | 45956d408c7a6162cca55e7f895367872c763720 | [
"MIT"
] | permissive | leetcode-notebook/wonz | 649d63551851d8a75470852945814cca678d4be1 | 9ffd2ce9b5f3a544ee958f5a0673215afd176c2b | refs/heads/master | 2021-05-19T00:03:10.261045 | 2020-04-12T14:41:47 | 2020-04-12T14:41:47 | 251,487,131 | 0 | 0 | MIT | 2020-04-12T14:41:48 | 2020-03-31T03:06:16 | null | UTF-8 | Python | false | false | 1,424 | py | from typing import List
class Solution:
def pacificAtlantic(self, matrix: List[List[int]]) -> List[List[int]]:
if not matrix or not matrix[0]:
return []
m, n = len(matrix), len(matrix[0])
p_visited = [[False] * n for x in range(m)] # Pacific
a_visited = [[False] * n for x in range(m)] # Atlantic
# left and right
for i in range(m):
self.dfs(p_visited, matrix, m, n, i, 0)
self.dfs(a_visited, matrix, m, n, i, n - 1)
# up and down
for j in range(n):
self.dfs(p_visited, matrix, m, n, 0, j)
self.dfs(a_visited, matrix, m, n, m - 1, j)
ans = []
for i in range(m):
for j in range(n):
if p_visited[i][j] and a_visited[i][j]:
ans.append([i, j])
return ans
def dfs(self, visited, matrix, m, n, i, j):
visited[i][j] = True
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
for d in directions:
x, y = i + d[0], j + d[1]
if x < 0 or x >= m or y < 0 or y >= n or visited[x][y] or matrix[x][y] < matrix[i][j]: # [x, y] < [i, j]
continue
self.dfs(visited, matrix, m, n, x, y)
if __name__ == "__main__":
matrix = [[1,2,2,3,5],[3,2,3,4,4],[2,4,5,3,1],[6,7,1,4,5],[5,1,1,2,4]]
print(Solution().pacificAtlantic(matrix)) | [
"wangzhou5130@gmail.com"
] | wangzhou5130@gmail.com |
ebd96819dbbb3ad1ee0c8c177ad14fc9ee15c4e5 | 53b47cbfea75afd22f37a2a9c8af4573165a0515 | /Week1/Week1Day4/sample-code/csv/csv_read.py | be9b816378084cfbb698c95cd5293db2eeb346f2 | [] | no_license | bmolina-nyc/ByteAcademyWork | d757ed04033e23a4ec7aa8d09283f65b4cebcb17 | b7a6790c2905afc9532b348149b730b7ea71de44 | refs/heads/master | 2022-12-06T19:17:02.164451 | 2019-03-11T15:31:10 | 2019-03-11T15:31:10 | 169,432,884 | 0 | 1 | null | 2022-11-18T15:08:12 | 2019-02-06T16:00:41 | Python | UTF-8 | Python | false | false | 399 | py | import csv
"""
with open('airtravel.csv', 'r') as file_object:
reader = csv.reader(file_object)
for row in reader:
print(row)
"""
with open('hurricanes.csv', 'r') as file_object:
reader = csv.DictReader(file_object)
for row in reader:
print(row['Month'])
for key, value in row.items():
print('{}={}'.format(key, value), end=', ')
print()
| [
"bruce.molina.81@gmail.com"
] | bruce.molina.81@gmail.com |
c86d45e58fed16be3cabbb5a71d138a77e3db976 | 69329efb6a668e3e11fe3d1104e076c635be63f1 | /sage/model_g_symbolic_integrator.py | fc15921a4963b53cd7868da99fb707356e47a600 | [
"MIT"
] | permissive | frostburn/tf2-model-g | 8354853a760fa60fd9cbebdf75cd0ecb0f40aa42 | ea90d84754265b47af383b17e347e4f76bddf10c | refs/heads/master | 2021-07-10T14:50:23.233075 | 2020-05-02T17:32:08 | 2020-05-02T17:32:08 | 239,732,547 | 0 | 3 | MIT | 2021-03-25T23:41:20 | 2020-02-11T10:15:48 | Python | UTF-8 | Python | false | false | 1,602 | py | from sage.all import *
G, X, Y, A, B, k2, k_2, k5 = var("G, X, Y, A, B, k2, k_2, k5")
G0 = A*(k5 + k_2)/(k2*k5)
X0 = A/k5
Y0 = B*k5/A
g = G + G0
x = X + X0
y = Y + Y0
gx_flow = k_2*x - k2*g
xy_flow = x*x*y - B*x
v_G = A + gx_flow
v_X = -gx_flow + xy_flow - k5 * x
v_Y = -xy_flow
t = var("t")
coefs_a = var(",".join("a%d" % i for i in range(5)))
coefs_b = var(",".join("b%d" % i for i in range(5)))
coefs_c = var(",".join("c%d" % i for i in range(5)))
poly_g = sum(a * t**i for i, a in enumerate(coefs_a))
poly_x = sum(b * t**i for i, b in enumerate(coefs_b))
poly_y = sum(c * t**i for i, c in enumerate(coefs_c))
v_poly_g = v_G.substitute(G==poly_g, X==poly_x, Y==poly_y).coefficients(t, sparse=False)
v_poly_x = v_X.substitute(G==poly_g, X==poly_x, Y==poly_y).coefficients(t, sparse=False)
v_poly_y = v_Y.substitute(G==poly_g, X==poly_x, Y==poly_y).coefficients(t, sparse=False)
d_poly_g = diff(poly_g, t).coefficients(t, sparse=False)
d_poly_x = diff(poly_x, t).coefficients(t, sparse=False)
d_poly_y = diff(poly_y, t).coefficients(t, sparse=False)
print("def polynomial_order_4_centered(a0, b0, c0, t, A, B, k2, k_2, k5):")
for i in range(4):
sol = solve(
[d_poly_g[i] == v_poly_g[i], d_poly_x[i] == v_poly_x[i], d_poly_y[i] == v_poly_y[i]],
coefs_a[i+1], coefs_b[i+1], coefs_c[i+1])
for s in sol[0]:
print(" " + str(s.expand()).replace("==", "=").replace("^", "**"))
print(""" return (
a0 + t * (a1 + t * (a2 + t * (a3 + t*a4))),
b0 + t * (b1 + t * (b2 + t * (b3 + t*b4))),
c0 + t * (c1 + t * (c2 + t * (c3 + t*c4))),
)""") | [
"lumi.pakkanen@gmail.com"
] | lumi.pakkanen@gmail.com |
461492da713d11f0820cfa38a68a4c1272591fb3 | b5fabc6c6de064690f8d4ee423001cf9365a3d9f | /flash/pointcloud/segmentation/open3d_ml/app.py | e9fbfd4c975b99b7eff4ee7347ef9cebb853b49d | [
"Apache-2.0"
] | permissive | dmarx/lightning-flash | 021dfd76bde6e30309f14feb5853020b0babe90d | 4cda031c1f9c8d8754fd36b5720d2a5a7d866765 | refs/heads/master | 2023-09-06T06:24:29.856354 | 2021-11-24T23:38:14 | 2021-11-24T23:38:14 | 422,352,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,017 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from flash.core.data.data_module import DataModule
from flash.core.data.io.input import DataKeys
from flash.core.utilities.imports import _POINTCLOUD_AVAILABLE
if _POINTCLOUD_AVAILABLE:
from open3d._ml3d.torch.dataloaders import TorchDataloader
from open3d._ml3d.vis.visualizer import LabelLUT
from open3d._ml3d.vis.visualizer import Visualizer as Open3dVisualizer
else:
Open3dVisualizer = object
class Visualizer(Open3dVisualizer):
def visualize_dataset(self, dataset, split, indices=None, width=1024, height=768):
"""Visualize a dataset.
Example:
Minimal example for visualizing a dataset::
import open3d.ml.torch as ml3d # or open3d.ml.tf as ml3d
dataset = ml3d.datasets.SemanticKITTI(dataset_path='/path/to/SemanticKITTI/')
vis = ml3d.vis.Visualizer()
vis.visualize_dataset(dataset, 'all', indices=range(100))
Args:
dataset: The dataset to use for visualization.
split: The dataset split to be used, such as 'training'
indices: An iterable with a subset of the data points to visualize, such as [0,2,3,4].
width: The width of the visualization window.
height: The height of the visualization window.
"""
# Setup the labels
lut = LabelLUT()
color_map = dataset.color_map
for id, val in dataset.label_to_names.items():
lut.add_label(val, id, color=color_map[id])
self.set_lut("labels", lut)
self._consolidate_bounding_boxes = True
self._init_dataset(dataset, split, indices)
self._visualize("Open3D - " + dataset.name, width, height)
class App:
def __init__(self, datamodule: DataModule):
self.datamodule = datamodule
self._enabled = True # not flash._IS_TESTING
def get_dataset(self, stage: str = "train"):
dataloader = getattr(self.datamodule, f"{stage}_dataloader")()
dataset = dataloader.dataset.dataset
if isinstance(dataset, TorchDataloader):
return dataset.dataset
return dataset
def show_train_dataset(self, indices=None):
if self._enabled:
dataset = self.get_dataset("train")
viz = Visualizer()
viz.visualize_dataset(dataset, "all", indices=indices)
def show_predictions(self, predictions):
if self._enabled:
dataset = self.get_dataset("train")
color_map = dataset.color_map
predictions_visualizations = []
for pred in predictions:
predictions_visualizations.append(
{
"points": pred[DataKeys.INPUT],
"labels": pred[DataKeys.TARGET],
"predictions": torch.argmax(pred[DataKeys.PREDS], axis=-1) + 1,
"name": pred[DataKeys.METADATA]["name"],
}
)
viz = Visualizer()
lut = LabelLUT()
color_map = dataset.color_map
for id, val in dataset.label_to_names.items():
lut.add_label(val, id, color=color_map[id])
viz.set_lut("labels", lut)
viz.set_lut("predictions", lut)
viz.visualize(predictions_visualizations)
def launch_app(datamodule: DataModule) -> "App":
return App(datamodule)
| [
"noreply@github.com"
] | dmarx.noreply@github.com |
bbc6ca99f40557885574f31d97132d1f809b6cad | 2bc83e48bfeab7a164bf5406f7c19c08a3b86782 | /src/step_2/search_with_tag_name_and_other_attribute.py | b64fc57c68ec5c0a0c85b35fdbd4b3b67114b23f | [
"MIT"
] | permissive | Rezwanul-Haque/web_scraping_with_bs4_selenium | a5683e6acab2ce9cd73dbf39f66a8a767c3b4873 | c445200aa2f63a73f1e413151d5de9c671b74443 | refs/heads/master | 2020-04-26T08:11:32.053055 | 2019-03-28T05:03:21 | 2019-03-28T05:03:21 | 173,415,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | from bs4 import BeautifulSoup
html_doc = """
<html lang="en" dir="ltr">
<head>
<meta charset="utf-8">
<title>The Dormouse's story</title>
</head>
<body>
<p class="title">
<b>The Dormouse's story</b>
</p>
<p class="story">Once upon a time there were three little sisters; and the.
<a href="http://example.com/elsie" class="sister" id='link1'>Elsie</a>,
<a href="http://example.com/lacie" class="sister" id='link2'>Lacie</a> and
<a href="http://example.com/tillie" class="sister" id='link3' name='tillie'>Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
</body>
</html>
"""
soup = BeautifulSoup(html_doc, 'lxml')
# a = soup.find_all('a', {'id': 'link3'}) ## Search by html id attribute
a = soup.find_all('a', {'name': 'tillie'}) ## Search by html name attribute
print(a)
| [
"rezwanul.cse@gmail.com"
] | rezwanul.cse@gmail.com |
dd2406b65e3b1a18b062bfb1b7a2892ae86324d2 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part004950.py | 05d936b5ea96b6a706d95d20168c983f2765c6f6 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,655 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher39782(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher39782._instance is None:
CommutativeMatcher39782._instance = CommutativeMatcher39782()
return CommutativeMatcher39782._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 39781
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 39783
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 39784
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 39785
if len(subjects2) == 0:
pass
# State 39786
if len(subjects) == 0:
pass
# 0: x**j
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
6271b235d95319d9ce49d00c4d0d361e640f8dba | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20191201/get_bastion_shareable_link.py | 08e9fbc511ce20444a5a465041764355e9f0164c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,881 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetBastionShareableLinkResult',
'AwaitableGetBastionShareableLinkResult',
'get_bastion_shareable_link',
'get_bastion_shareable_link_output',
]
@pulumi.output_type
class GetBastionShareableLinkResult:
"""
Response for all the Bastion Shareable Link endpoints.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The URL to get the next set of results.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BastionShareableLinkResponse']]:
"""
List of Bastion Shareable Links for the request.
"""
return pulumi.get(self, "value")
class AwaitableGetBastionShareableLinkResult(GetBastionShareableLinkResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBastionShareableLinkResult(
next_link=self.next_link,
value=self.value)
def get_bastion_shareable_link(bastion_host_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vms: Optional[Sequence[pulumi.InputType['BastionShareableLink']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBastionShareableLinkResult:
"""
Response for all the Bastion Shareable Link endpoints.
:param str bastion_host_name: The name of the Bastion Host.
:param str resource_group_name: The name of the resource group.
:param Sequence[pulumi.InputType['BastionShareableLink']] vms: List of VM references.
"""
__args__ = dict()
__args__['bastionHostName'] = bastion_host_name
__args__['resourceGroupName'] = resource_group_name
__args__['vms'] = vms
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20191201:getBastionShareableLink', __args__, opts=opts, typ=GetBastionShareableLinkResult).value
return AwaitableGetBastionShareableLinkResult(
next_link=__ret__.next_link,
value=__ret__.value)
@_utilities.lift_output_func(get_bastion_shareable_link)
def get_bastion_shareable_link_output(bastion_host_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
vms: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['BastionShareableLink']]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBastionShareableLinkResult]:
"""
Response for all the Bastion Shareable Link endpoints.
:param str bastion_host_name: The name of the Bastion Host.
:param str resource_group_name: The name of the resource group.
:param Sequence[pulumi.InputType['BastionShareableLink']] vms: List of VM references.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
cc8d1da40b1bd5ed0b6ca2cffc6b84f94ace159c | 8c658165889758b346038a308c863bc93a26d0da | /old_xlvin/gnn_executor/sparse_models.py | f0068b2788357709f3acafb2165840a12bb0f6da | [] | no_license | andreeadeac22/logml_xlvin | 4fd3279b38e19e2a7451ce123491b469b2203013 | 880e78059e43c1f4e3cac9497a9e73f2543e1896 | refs/heads/main | 2023-06-20T06:26:16.635690 | 2021-07-15T10:33:46 | 2021-07-15T10:33:46 | 385,325,958 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,513 | py | import torch
import torch.nn as nn
from torch_scatter import scatter_max, scatter_sum
from collections import OrderedDict
class SparseMessagePassing(nn.Module):
def __init__(self,
node_features,
edge_features,
hidden_dim,
message_function=None,
message_function_depth=None,
neighbour_state_aggr='sum',
activation=False,
layernorm=False):
super().__init__()
self.node_features = node_features
self.hidden_dim = hidden_dim
self.message_function = message_function
self.message_function_depth = message_function_depth
self.neighbour_state_aggr = neighbour_state_aggr
if message_function == 'mpnn':
self.message_proj1 = nn.Linear(3 * hidden_dim, hidden_dim)
if message_function_depth == 2:
self.message_proj2 = nn.Linear(hidden_dim, hidden_dim)
#self.readout_node_proj = nn.Linear(hidden_dim, hidden_dim)
#self.readout_msg_proj = nn.Linear(hidden_dim, hidden_dim) # TODO: where?
self.activation = activation
if activation:
self.relu = nn.ReLU()
self.layernorm = layernorm
if layernorm:
self.ln = nn.LayerNorm(hidden_dim)
def forward(self, x, senders, receivers, edge_feat):
msg = self.message_proj1(torch.cat((x[senders], x[receivers], edge_feat), dim=-1))
if self.activation:
msg = self.relu(msg)
if self.neighbour_state_aggr == 'sum':
aggr_messages = torch.zeros((x.shape[0], self.hidden_dim), device=x.device) # -1, adj_rows, msg)
aggr_messages.index_add_(0, receivers, msg)
elif self.neighbour_state_aggr == 'max':
import warnings
warnings.filterwarnings("ignore")
aggr_messages = torch.ones((x.shape[0], self.hidden_dim), device=x.device) * -1e9 # -1, adj_rows, msg)
scatter_max(msg, receivers, dim=0, out=aggr_messages)
indegree = scatter_sum(torch.ones_like(msg), receivers, dim=0, out=torch.zeros_like(aggr_messages))
aggr_messages = aggr_messages * (indegree > 0)
else:
raise NotImplementedError
x = aggr_messages + x
if self.layernorm:
x = self.ln(x)
return x
class SparseMPNN(nn.Module):
def __init__(self,
node_features,
edge_features,
hidden_dim=None,
out_features=None,
message_function=None,
message_function_depth=None,
neighbour_state_aggr=None,
gnn_steps=1,
msg_activation=False,
layernorm=False):
super().__init__()
self.node_proj = nn.Sequential(
nn.Linear(node_features, hidden_dim, bias=False))
self.edge_proj = nn.Linear(edge_features, hidden_dim)
self.hidden_dim = hidden_dim
self.mps = SparseMessagePassing(node_features=node_features,
edge_features=edge_features,
hidden_dim=hidden_dim,
message_function=message_function,
message_function_depth=message_function_depth,
neighbour_state_aggr=neighbour_state_aggr, activation=msg_activation,
layernorm=layernorm)
self.fc = nn.Linear(in_features=hidden_dim, out_features=out_features)
self.gnn_steps = gnn_steps
def forward(self, data):
# node.shape: a, s, 2
x, adj, adj_mask = data
senders, receivers, edge_feat = adj
num_actions = x.shape[0]
num_states = x.shape[1]
outputs = []
enc_x = self.node_proj(x)
edge_feat = self.edge_proj(edge_feat)
prev_gnnstep_x = torch.zeros_like(enc_x)
for i in range(self.gnn_steps):
prev_gnnstep_x = prev_gnnstep_x + enc_x
onestepx = self.mps(prev_gnnstep_x.reshape(-1, self.hidden_dim), senders, receivers, edge_feat)
onestepx = onestepx.reshape(num_actions, num_states, -1)
onestepx, ind = torch.max(onestepx, dim=0, keepdim=True)
prev_gnnstep_x = onestepx
output = self.fc(onestepx.squeeze())
outputs += [output]
return outputs
| [
"andreeadeac22@gmail.com"
] | andreeadeac22@gmail.com |
a1f17c242e8eb47405eb4d4d4b496c641aa3dd71 | 0e8758b50a74cdbb5d97d988f2075112e07d4f00 | /src/zojax/cache/ram.py | 2215b5bfdb3632b382d213dbeb63fcd7a98328ba | [
"ZPL-2.1"
] | permissive | Zojax/zojax.cache | 513d7dc749603f3583acca4b994d80ea58ec6c25 | bd22dfef23a4da21ca9a1079fa32fd374a96a75e | refs/heads/master | 2020-04-06T04:31:32.346337 | 2014-02-05T10:26:44 | 2014-02-05T10:26:44 | 2,035,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope.app.cache.ram import RAMCache
class RAMCache(RAMCache):
def __init__(self):
super(RAMCache, self).__init__()
self.update(maxAge=86400)
| [
"andrey.fedoseev@gmail.com"
] | andrey.fedoseev@gmail.com |
7870ca97acd70fe051a39ca5bbd5b94b3141d3cd | b5ef3b9da130f604f111bd469128b73e78d6ba9d | /bt5/erp5_payroll/SkinTemplateItem/portal_skins/erp5_payroll/AccountingTransactionModule_getPaySheetMovementMirrorSectionItemList.py | d45aca40fb53d879887519ab6da346b91bd519c6 | [] | no_license | soediro/erp5 | 154bb2057c4cd12c14018c1ab2a09a78b2d2386a | 3d1a8811007a363b7a43df4b295b5e0965c2d125 | refs/heads/master | 2021-01-11T00:31:05.445267 | 2016-10-05T09:28:05 | 2016-10-07T02:59:00 | 70,526,968 | 1 | 0 | null | 2016-10-10T20:40:41 | 2016-10-10T20:40:40 | null | UTF-8 | Python | false | false | 775 | py | item_list = [('', '')]
portal = context.getPortalObject()
getobject = portal.portal_catalog.getobject
for x in portal.portal_simulation.getInventoryList(
portal_type=('Pay Sheet Cell',
'Pay Sheet Line'),
group_by_resource=0,
group_by_section=0,
group_by_mirror_section=1):
mirror_section_uid = x.mirror_section_uid
if mirror_section_uid:
mirror_section = getobject(mirror_section_uid)
if mirror_section.getPortalType() == 'Organisation':
item_list.append((mirror_section.getTitle(),
mirror_section.getRelativeUrl()))
item_list.sort(key=lambda a:a[0])
return item_list
| [
"georgios.dagkakis@nexedi.com"
] | georgios.dagkakis@nexedi.com |
148a21faccd7c7c7aef62e51e326d413be656fc9 | b4eb7a3efff554ad76daeaf440721ab4011217af | /train_cifar.py | 0a69b749f25389f892e57b0fd6664af6b1b60e10 | [
"MIT"
] | permissive | Swall0w/dual-path-networks | 8c0b0012901735c565aa02007fd1c3fcb55a974d | dfd935b16a2fe7aa414ad280846a6cd2fd0fda12 | refs/heads/master | 2021-05-13T21:39:49.099834 | 2018-02-03T05:58:55 | 2018-02-03T05:58:55 | 116,468,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,312 | py | from __future__ import print_function
import argparse
import chainer
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer.datasets import get_cifar10
from chainer.datasets import get_cifar100
import models.VGG
import models.dpn
def main():
parser = argparse.ArgumentParser(description='Chainer CIFAR example:')
parser.add_argument('--dataset', '-d', default='cifar10',
help='The dataset to use: cifar10 or cifar100')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--learnrate', '-l', type=float, default=0.05,
help='Learning rate for SGD')
parser.add_argument('--epoch', '-e', type=int, default=300,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train.
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
if args.dataset == 'cifar10':
print('Using CIFAR10 dataset.')
class_labels = 10
train, test = get_cifar10()
elif args.dataset == 'cifar100':
print('Using CIFAR100 dataset.')
class_labels = 100
train, test = get_cifar100()
else:
raise RuntimeError('Invalid dataset choice.')
#model = L.Classifier(models.VGG.VGG(class_labels))
model = L.Classifier(models.dpn.DPN92(class_labels))
if args.gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
optimizer = chainer.optimizers.MomentumSGD(args.learnrate)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Reduce the learning rate by half every 25 epochs.
trainer.extend(extensions.ExponentialShift('lr', 0.5),
trigger=(25, 'epoch'))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.dump_graph('main/loss'))
# Take a snapshot at each epoch
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| [
"technext.jpn@gmail.com"
] | technext.jpn@gmail.com |
6908a5eb239a6916514e4e5a69ee1b36c604ebba | 8dc64db8a0d7ddb8778c8eae2dac9075b9a90e2b | /env/Lib/site-packages/flask_simple_geoip.py | bf15adea0d557037690ccb3fe517dd69d69abb9b | [
"MIT"
] | permissive | theXtroyer1221/Cloud-buffer | c3992d1b543a1f11fde180f6f7d988d28b8f9684 | 37eabdd78c15172ea980b59d1aff65d8628cb845 | refs/heads/master | 2022-11-22T22:37:10.453923 | 2022-02-25T01:15:57 | 2022-02-25T01:15:57 | 240,901,269 | 1 | 1 | MIT | 2022-09-04T14:48:02 | 2020-02-16T14:00:32 | HTML | UTF-8 | Python | false | false | 2,198 | py | from os import environ
from simple_geoip import GeoIP
from flask import request
CONFIG_KEY = "GEOIPIFY_API_KEY"
import ipaddress
class SimpleGeoIP(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
api_key = app.config.get(CONFIG_KEY) or environ.get(CONFIG_KEY)
if not api_key:
raise Exception(
"No API key was supplied for performing GeoIP lookups. Please set a value for GEOIPIFY_API_KEY.")
self.geoip_client = GeoIP(api_key)
def get_geoip_data(self, remote_addr=None):
"""
Performs a geoip lookup based on the requester's public IP address.
NOTE: This method will *always* return and never raise an exception --
it does this strategically because performing a geoip lookup should
never be a tier 1 stop-the-world exception.
:param remote_addr: IPv4 or IPv6 to search location by.
If None, it defaults to the request's public IP address
:type remote_addr: str or None
:rtype: dict or None
:returns: A dictionary containing the user's geolocation data, or None
if there was a problem.
"""
remote_addr = remote_addr if remote_addr else self.__resolve_remote_addr()
try:
data = self.geoip_client.lookup(remote_addr)
# Don't do anything if an exception arises -- since geolocation data isn't
# critical to a request being processed, we can always skip it in the worst
# case scenario.
#
# By default, the underlying `simple_geoip` library will handle retry logic
# for us ;)
except:
data = None
return data
def __resolve_remote_addr(self):
if request.environ.get('HTTP_X_FORWARDED_FOR') is not None:
x_forwarded_for = request.environ['HTTP_X_FORWARDED_FOR'].split(',')
addr = x_forwarded_for[0]
try:
ipaddress.ip_address(addr)
return addr
except:
pass
return request.remote_addr
| [
"jaddou2005@gmail.com"
] | jaddou2005@gmail.com |
a67d9defbdf633e3736721a0224c3c393d244d2a | dca0bd2e04dda3801d395c2a6ab2f9d95be79551 | /Python/SmallProject/improving_the_index.py | 00576b45bef129d99882d00405807d6d6ce25985 | [] | no_license | A-khateeb/Full-Stack-Development-Path | ab8c86abea2f983fb8e0046a65b99772416c754c | 5a5eaa198367cc95a6b5638e9740f4ad564dec23 | refs/heads/master | 2021-06-01T23:52:04.965494 | 2020-05-01T22:59:20 | 2020-05-01T22:59:20 | 89,286,943 | 2 | 0 | null | 2017-12-22T22:21:52 | 2017-04-24T21:04:07 | Shell | UTF-8 | Python | false | false | 3,096 | py | # The current index includes a url in the list of urls
# for a keyword multiple times if the keyword appears
# on that page more than once.
# It might be better to only include the same url
# once in the url list for a keyword, even if it appears
# many times.
# Modify add_to_index so that a given url is only
# included once in the url list for a keyword,
# no matter how many times that keyword appears.
def add_to_index(index, keyword, url):
for entry in index:
if entry[0] == keyword:
if url not in entry[1]:
entry[1].append(url)
return
# not found, add new keyword to index
index.append([keyword, [url]])
def get_page(url):
try:
if url == "http://www.udacity.com/cs101x/index.html":
return '''<html> <body> This is a test page for learning to crawl!
<p> It is a good idea to
<a href="http://www.udacity.com/cs101x/crawling.html">
learn to crawl</a> before you try to
<a href="http://www.udacity.com/cs101x/walking.html">walk</a> or
<a href="http://www.udacity.com/cs101x/flying.html">fly</a>.</p></body>
</html>'''
elif url == "http://www.udacity.com/cs101x/crawling.html":
return '''<html> <body> I have not learned to crawl yet, but I am
quite good at <a href="http://www.udacity.com/cs101x/kicking.html">kicking</a>.
</body> </html>'''
elif url == "http://www.udacity.com/cs101x/walking.html":
return '''<html> <body> I cant get enough
<a href="http://www.udacity.com/cs101x/index.html">crawling</a></body></html>'''
elif url == "http://www.udacity.com/cs101x/flying.html":
return '''<html>
<body>The magic words are Squeamish Ossifrage!</body></html>'''
except:
return ""
return ""
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = []
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
union(tocrawl, get_all_links(content))
crawled.append(page)
return index
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def lookup(index, keyword):
for entry in index:
if entry[0] == keyword:
return entry[1]
return None
#index = crawl_web("http://www.udacity.com/cs101x/index.html")
#print lookup(index,"is")
#>>> ['http://www.udacity.com/cs101x/index.html']
| [
"khateebafeef@gmail.com"
] | khateebafeef@gmail.com |
c394a9cbdf35c8a5e98933cb392930c904b2e737 | 1446ba991034652370c9d64df08caa817735e1f6 | /office365/graph/teams/channel.py | fb5034e80178479d9b640e5bafa997aa01a376e3 | [
"MIT"
] | permissive | kakaruna/Office365-REST-Python-Client | 3ab949bc8d48a604e534c902883af62fd089a87d | 9f2f564972b988f1f41a3080c99684b4f6ce2164 | refs/heads/master | 2022-09-26T15:32:40.326036 | 2020-06-02T13:24:04 | 2020-06-02T13:24:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from office365.runtime.client_object import ClientObject
class Channel(ClientObject):
"""Teams are made up of channels, which are the conversations you have with your teammates"""
@property
def webUrl(self):
"""A hyperlink that will navigate to the channel in Microsoft Teams. This is the URL that you get when you
right-click a channel in Microsoft Teams and select Get link to channel. This URL should be treated as an
opaque blob, and not parsed. Read-only. """
if self.is_property_available('webUrl'):
return self.properties['webUrl']
else:
return None
| [
"vvgrem@gmail.com"
] | vvgrem@gmail.com |
0de52008434f22cd1e333b2eb912ca49397e2ea1 | 5e2284bff015e6b03e4ea346572b29aaaf79c7c2 | /tests/correct_programs/aoc2020/test_day_8_handheld_halting.py | 7a0b9706e46bcedf4a4b8a1b0b45bbe6e810e889 | [
"MIT"
] | permissive | LaurenDebruyn/aocdbc | bbfd7d832f9761ba5b8fb527151157742b2e4890 | b857e8deff87373039636c12a170c0086b19f04c | refs/heads/main | 2023-06-11T23:02:09.825705 | 2021-07-05T09:26:23 | 2021-07-05T09:26:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import textwrap
import unittest
import icontract_hypothesis
from correct_programs import common
from correct_programs.aoc2020 import day_8_handheld_halting
class TestWithIcontractHypothesis(unittest.TestCase):
def test_functions(self) -> None:
for func in [
day_8_handheld_halting.parse,
day_8_handheld_halting.execute_instructions,
]:
try:
icontract_hypothesis.test_with_inferred_strategy(func) # type: ignore
except Exception as error:
raise Exception(
f"Automatically testing {func} with icontract-hypothesis failed "
f"(please see the original error above)"
) from error
class TestManually(unittest.TestCase):
def test_case(self) -> None:
lines = common.Lines(
textwrap.dedent(
"""\
nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6
"""
).splitlines()
)
instructions = day_8_handheld_halting.parse(lines=lines)
acc = day_8_handheld_halting.execute_instructions(instructions=instructions)
self.assertEqual(5, acc)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | LaurenDebruyn.noreply@github.com |
101e90cf679f5063e1980a9f9e9de772d5dc3146 | f915fd79447086ac70eac7de2aa7f12e1225b246 | /trimesh/interfaces/scad.py | f90038034166d7b2391819c575b994a47c8a9473 | [
"MIT"
] | permissive | drancom/trimesh | 55036fc37b20eebdd5c1bc5b0a1e47f81090cbcb | 9568557e00b501d242eb4de79f52c9a5e42a55af | refs/heads/master | 2021-01-18T02:25:46.446407 | 2016-03-20T19:54:58 | 2016-03-20T19:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | import numpy as np
from .generic import MeshScript
from distutils.spawn import find_executable
def interface_scad(meshes, script):
'''
A way to interface with openSCAD which is itself an interface
to the CGAL CSG bindings.
CGAL is very stable if difficult to install/use, so this function provides a
tempfile- happy solution for getting the basic CGAL CSG functionality.
Arguments
---------
meshes: list of Trimesh objects
script: string of the script to send to scad.
Trimesh objects can be referenced in the script as
$mesh_0, $mesh_1, etc.
'''
with MeshScript(meshes = meshes, script = script) as scad:
result = scad.run('openscad $script -o $mesh_post')
return result
def boolean(meshes, operation='difference'):
'''
Run an operation on a set of meshes
'''
script = operation + '(){'
for i in range(len(meshes)):
script += 'import(\"$mesh_' + str(i) + '\");'
script += '}'
return interface_scad(meshes, script)
exists = find_executable('openscad') is not None
| [
"mik3dh@gmail.com"
] | mik3dh@gmail.com |
331a5cef6090eb593ac8ec2b91fc7963b4abc6ab | 40f3a05d15269914ca2a0adccb66a9938c4e68fb | /examples/feasible_example.py | dadf6fa17e36835f94fc22ec5c4aa2fe142ce49e | [] | no_license | slivingston/magnumSTL | 1cc15123d17f86a96359eb9d9822850765e52217 | acb505874ddad2a2518669a163aeeff6d85ea63f | refs/heads/master | 2021-07-19T06:39:12.929399 | 2017-10-28T02:24:18 | 2017-10-28T02:24:18 | 108,613,125 | 3 | 0 | null | 2017-10-28T02:21:15 | 2017-10-28T02:21:15 | null | UTF-8 | Python | false | false | 900 | py | import stl
from sympy import Symbol
from magnum import game as G
from magnum import io
import numpy as np
## Setup the Model
model = G.Model(
dt=0.01,
H=2,
vars=G.Vars(
state=(Symbol("x"),),
input=(Symbol("u"),),
env=()
),
t=0,
dyn=G.Dynamics(
A=np.array([[0]]),
B=np.array([[10]]),
C=np.array([[]])
)
)
# Setup the specificatoion
context = {
stl.parse("Init"): stl.parse("x = 0"),
stl.parse("ReachFive"): stl.parse("F(x > 5)", H=model.H),
}
spec = G.Specs(
obj=stl.utils.inline_context(stl.parse("ReachFive"), context),
init=stl.utils.inline_context(stl.parse("Init"), context),
bounds=stl.parse("G((u <= 1) & (u >= 0))", H=model.H),
learned=stl.TOP
)
meta = G.Meta(
pri={},
names={},
drdu=None,
drdw=None,
)
feasible_example = G.Game(spec=spec, model=model, meta=meta)
| [
"mvc@linux.com"
] | mvc@linux.com |
61b774c0fc27cb3364a70fc915e6c1269ebb8ec8 | 5e20a397bef40163792a185d392cbd6794fca648 | /common/models.py | 8dc24f6a2bdc0383f2048310bae469e5e5554a3a | [] | no_license | uralbash/django-hyango | aba92d2e0d558b439229d67dced980e5f19e0280 | 301ec384774e9ab585831a0df8847fac8c9a18cc | refs/heads/master | 2020-12-30T10:50:15.154925 | 2014-02-02T15:01:33 | 2014-02-02T15:01:33 | 16,456,402 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | # -*- coding: utf-8 -*-
from django.db import models
from mptt.models import TreeManager
class VisibleTreeManager(TreeManager):
"""
Менеджер моделей с наследованием от менеджера MPTT
"""
def get_query_set(self):
return super(VisibleTreeManager, self).get_query_set()\
.filter(visible=True)
class VisibleManager(models.Manager):
def get_query_set(self):
return super(VisibleManager, self).get_query_set().filter(visible=True)
class VisibleObject(models.Model):
visible = visible = models.BooleanField(u'Показывать?', default=False)
allpages = models.Manager()
objects = VisibleManager()
class Meta:
abstract = True
class SEOModel(models.Model):
seo_title = models.CharField(u'SEO-заголовок (title)', max_length=255,
blank=True, null=True)
seo_meta = models.TextField(u'SEO-мета (keywords, description)',
blank=True, null=True,
help_text=u'Вставьте сюда HTML-код мета'
u' информации:\nНапример:'
u' <meta name="keywords"'
u' content="Ключевые слова" />')
class Meta:
abstract = True
class VisibleSEOModel(VisibleObject, SEOModel):
class Meta:
abstract = True
| [
"root@uralbash.ru"
] | root@uralbash.ru |
e66541a799c3600f25c6e3da839fb7744ba2058a | 7a8345e0a3b84614f9c8f31bb249f7e211cbd549 | /PycharmProjects/untitled/class/new.py | 83a8afd95ccd3a5278a4e7daa13f687ca849b118 | [] | no_license | oumingwang/ubuntupythoncode | 5ac5baf16acecec3cd518094a49f9766dc6a823b | 48dd76f848efedf13ba049c5d4ef9402d3285675 | refs/heads/master | 2020-07-04T03:42:16.080197 | 2016-11-19T15:21:49 | 2016-11-19T15:21:49 | 74,214,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | class MyClass(object):
def __new__(cls, *args, **kwargs):
print '__new__ called'
return object.__new__(cls)
def __init__(self):
print '__init__ called '
self.a = 1
instance = MyClass()
print instance.a
class MyOtherClass(MyClass):
def __init__(self):
print 'MyOther class __init__ called'
super(MyOtherClass,self).__init__()
self.b = 2
instance1 = MyOtherClass()
print instance1.a
print instance1.b | [
"474978390@qq.com"
] | 474978390@qq.com |
b89ee8ae67353f7aba0cc5263abbd8c3a1ea7b6b | 49185bd5cf7e2f5190ce22b5189a09fe1ab6bb0f | /Proper/proper/examples/talbot_correct_demo.py | c4cf7d80eb8839cfe27c4007790db5db7e006ed6 | [
"MIT"
] | permissive | RupertDodkins/MEDIS | c3f55d8adb6a8c4120593ba6552c9dfe3784d4e2 | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | refs/heads/master | 2021-07-05T20:06:44.162517 | 2019-09-05T22:16:12 | 2019-09-05T22:16:12 | 160,850,558 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | # Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
import proper
import numpy as np
import matplotlib.pylab as plt
def talbot_correct_demo():
diam = 0.1 # beam diameter in meters
period = 0.04 # period of cosine pattern (meters)
wavelength_microns = 0.5
wavelength_m = wavelength_microns * 1.e-6
n = 128
nseg = 9
talbot_length = 2 * period**2 / wavelength_m
delta_length = talbot_length / (nseg - 1.)
z = 0.
plt.close('all')
f = plt.figure(figsize = (8, 18))
for i in range(nseg):
(wavefront, sampling) = proper.prop_run('talbot_correct',
wavelength_microns, n,
PASSVALUE = {'diam': diam, 'period': period, 'dist': z})
# Extract central cross-section of array
wavefront = wavefront[:,n//2]
amp = np.abs(wavefront)
amp -= np.mean(amp)
phase = np.arctan2(wavefront.imag, wavefront.real)
phase -= np.mean(phase)
ax1 = f.add_subplot(nseg,2,2*i+1)
if i == 0:
ax1.set_title('Amplitude')
ax1.set_ylim(-0.0015, 0.0015)
ax1.plot(amp)
ax2 = f.add_subplot(nseg,2,2*i+2)
if i == 0:
ax2.set_title('Phase')
ax2.set_ylim(-0.25, 0.25)
ax2.plot(phase)
z += delta_length
plt.show()
return
if __name__ == '__main__':
talbot_correct_demo()
| [
"rupertdodkins@gmail.com"
] | rupertdodkins@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.